refactor: points rework (v1) (#1663)

* refactor: add new method of storing timeseries data

* mostly finish adding data

* tmp

* migrate over to separate lib

* prepare to migrate over to new timeseries storage

* prepare to migrate frozen state

* migrate frozen state

* name

* migrate data collection

* migrate network

* fix some stuff

* fix a panic from bad pruning

* Fix pruning issues

* migrate RAM

* migrate swap

* migrate cache label

* refactor out to function

* migrate ram points

* migrate swap points

* migrate cache points

* migrate arc

* migrate gpu, remove a bunch of state code around force update

* rename cache, also some comments

* some temp cleanup

* migrate disk

* comments to remind me above fixmes, fix bug around time graph spans

* migrate load avg

* port temps

* style

* fix bug wiwth left edge gap

* partial migration of cpu, reorganize data file structure

* migrate cpu

* some cleanup

* fix bug with cpu widget + clippy

* start some small optimization work

* fix some things for some platforms

* refactor: rename data_collection to collection

* refactor: only process temp type in data eat step

* flatten components folder a bit

* partially migrate to new graph system and fix cpu bug

* driveby migration of process list to reduce allocs + more migration of points drawing

* revert the collection change

Forgot that I cut a new `Data` on each collection so that change was
useless.

* port over network stuff...

* fully migrate network, and fix some log bugs while we're at it

This is something I never noticed, but the log of 0 is inf - so there
were gaps in the lines when using log scaling!

* fix cpu colour in all mode

* clean up some disk table stuff
This commit is contained in:
Clement Tsang 2025-02-03 01:34:58 -05:00 committed by GitHub
parent 0aae119cfa
commit 837e23560f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
97 changed files with 2014 additions and 2686 deletions

View File

@ -36,13 +36,17 @@ That said, these are more guidelines rather than hardset rules, though the proje
- [#1593](https://github.com/ClementTsang/bottom/pull/1593): Fix using `"none"` for chart legend position in configs.
- [#1594](https://github.com/ClementTsang/bottom/pull/1594): Fix incorrect default config definitions for chart legends.
- [#1596](https://github.com/ClementTsang/bottom/pull/1596): Fix support for nilfs2 file system.
- [#1660](https://github.com/ClementTsang/bottom/pull/1660): Handle terminal cleanup if the program is terminated due to an `Err` bubbling to the top.
- [#1660](https://github.com/ClementTsang/bottom/pull/1660): Fix properly cleaning up the terminal if the program is terminated due to an `Err` bubbling to the top.
- [#1663](https://github.com/ClementTsang/bottom/pull/1663): Fix network graphs using log scaling having broken lines when a point was 0.
### Changes
- [#1559](https://github.com/ClementTsang/bottom/pull/1559): Rename `--enable_gpu` to `--disable_gpu`, and make GPU features enabled by default.
- [#1570](https://github.com/ClementTsang/bottom/pull/1570): Consider `$XDG_CONFIG_HOME` on macOS when looking for a default config path in a
backwards-compatible fashion.
- [#1570](https://github.com/ClementTsang/bottom/pull/1570): Consider `$XDG_CONFIG_HOME` on macOS when looking for a default config path in a backwards-compatible fashion.
### Other
- [#1663](https://github.com/ClementTsang/bottom/pull/1663): Rework how data is stored internally, reducing memory usage a bit.
## [0.10.2] - 2024-08-05

7
Cargo.lock generated
View File

@ -187,6 +187,7 @@ dependencies = [
"sysinfo",
"tempfile",
"time",
"timeless",
"toml_edit",
"unicode-ellipsis",
"unicode-segmentation",
@ -1581,6 +1582,12 @@ dependencies = [
"time-core",
]
[[package]]
name = "timeless"
version = "0.0.14-alpha"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04121e3f47427f2604066a4c4af25102e6c5794b167f6dee85958898ebf7f131"
[[package]]
name = "toml_datetime"
version = "0.6.8"

View File

@ -91,6 +91,7 @@ regex = "1.11.1"
serde = { version = "1.0.217", features = ["derive"] }
starship-battery = { version = "0.10.0", optional = true }
sysinfo = "=0.30.13"
timeless = "0.0.14-alpha"
toml_edit = { version = "0.22.22", features = ["serde"] }
tui = { version = "0.29.0", package = "ratatui" }
unicode-ellipsis = "0.3.0"

View File

@ -1,6 +1,5 @@
pub mod data_farmer;
pub mod data;
pub mod filter;
pub mod frozen_state;
pub mod layout_manager;
mod process_killer;
pub mod states;
@ -12,25 +11,22 @@ use std::{
use anyhow::bail;
use concat_string::concat_string;
use data_farmer::*;
use data::*;
use filter::*;
use frozen_state::FrozenState;
use hashbrown::HashMap;
use layout_manager::*;
pub use states::*;
use unicode_segmentation::{GraphemeCursor, UnicodeSegmentation};
use crate::{
canvas::components::time_chart::LegendPosition,
constants, convert_mem_data_points, convert_swap_data_points,
data_collection::{processes::Pid, temperature},
data_conversion::ConvertedData,
get_network_points,
canvas::components::time_graph::LegendPosition,
collection::processes::Pid,
constants,
utils::data_units::DataUnit,
widgets::{ProcWidgetColumn, ProcWidgetMode},
};
#[derive(Debug, Clone, Eq, PartialEq, Default)]
#[derive(Debug, Clone, Eq, PartialEq, Default, Copy)]
pub enum AxisScaling {
#[default]
Log,
@ -42,7 +38,7 @@ pub enum AxisScaling {
#[derive(Debug, Default, Eq, PartialEq)]
pub struct AppConfigFields {
pub update_rate: u64,
pub temperature_type: temperature::TemperatureType,
pub temperature_type: TemperatureType,
pub use_dot: bool,
pub cpu_left_legend: bool,
pub show_average_cpu: bool, // TODO: Unify this in CPU options
@ -105,10 +101,8 @@ pub struct App {
second_char: Option<char>,
pub dd_err: Option<String>, // FIXME: The way we do deletes is really gross.
to_delete_process_list: Option<(String, Vec<Pid>)>,
pub frozen_state: FrozenState,
pub data_store: DataStore,
last_key_press: Instant,
pub converted_data: ConvertedData,
pub data_collection: DataCollection,
pub delete_dialog_state: AppDeleteDialogState,
pub help_dialog_state: AppHelpDialogState,
pub is_expanded: bool,
@ -135,10 +129,8 @@ impl App {
second_char: None,
dd_err: None,
to_delete_process_list: None,
frozen_state: FrozenState::default(),
data_store: DataStore::default(),
last_key_press: Instant::now(),
converted_data: ConvertedData::default(),
data_collection: DataCollection::default(),
delete_dialog_state: AppDeleteDialogState::default(),
help_dialog_state: AppHelpDialogState::default(),
is_expanded,
@ -156,82 +148,33 @@ impl App {
/// Update the data in the [`App`].
pub fn update_data(&mut self) {
let data_source = match &self.frozen_state {
FrozenState::NotFrozen => &self.data_collection,
FrozenState::Frozen(data) => data,
};
let data_source = self.data_store.get_data();
// FIXME: (points_rework_v1) maybe separate PR but would it make more sense to store references of data?
// Would it also make more sense to move the "data set" step to the draw step, and make it only set if force
// update is set here?
for proc in self.states.proc_state.widget_states.values_mut() {
if proc.force_update_data {
proc.set_table_data(data_source);
proc.force_update_data = false;
}
}
// FIXME: Make this CPU force update less terrible.
if self.states.cpu_state.force_update.is_some() {
self.converted_data.convert_cpu_data(data_source);
self.converted_data.load_avg_data = data_source.load_avg_harvest;
self.states.cpu_state.force_update = None;
}
// FIXME: This is a bit of a temp hack to move data over.
{
let data = &self.converted_data.cpu_data;
for cpu in self.states.cpu_state.widget_states.values_mut() {
cpu.update_table(data);
}
}
{
let data = &self.converted_data.temp_data;
for temp in self.states.temp_state.widget_states.values_mut() {
if temp.force_update_data {
temp.set_table_data(data);
temp.force_update_data = false;
}
}
}
{
let data = &self.converted_data.disk_data;
for disk in self.states.disk_state.widget_states.values_mut() {
if disk.force_update_data {
disk.set_table_data(data);
disk.force_update_data = false;
}
for temp in self.states.temp_state.widget_states.values_mut() {
if temp.force_update_data {
temp.set_table_data(&data_source.temp_data);
}
}
// TODO: [OPT] Prefer reassignment over new vectors?
if self.states.mem_state.force_update.is_some() {
self.converted_data.mem_data = convert_mem_data_points(data_source);
#[cfg(not(target_os = "windows"))]
{
self.converted_data.cache_data = crate::convert_cache_data_points(data_source);
for cpu in self.states.cpu_state.widget_states.values_mut() {
if cpu.force_update_data {
cpu.set_legend_data(&data_source.cpu_harvest);
}
self.converted_data.swap_data = convert_swap_data_points(data_source);
#[cfg(feature = "zfs")]
{
self.converted_data.arc_data = crate::convert_arc_data_points(data_source);
}
#[cfg(feature = "gpu")]
{
self.converted_data.gpu_data = crate::convert_gpu_data(data_source);
}
self.states.mem_state.force_update = None;
}
if self.states.net_state.force_update.is_some() {
let (rx, tx) = get_network_points(
data_source,
&self.app_config_fields.network_scale_type,
&self.app_config_fields.network_unit_type,
self.app_config_fields.network_use_binary_prefix,
);
self.converted_data.network_data_rx = rx;
self.converted_data.network_data_tx = tx;
self.states.net_state.force_update = None;
for disk in self.states.disk_state.widget_states.values_mut() {
if disk.force_update_data {
disk.set_table_data(data_source);
}
}
}
@ -256,16 +199,12 @@ impl App {
self.to_delete_process_list = None;
self.dd_err = None;
// Unfreeze.
self.frozen_state.thaw();
self.data_store.reset();
// Reset zoom
self.reset_cpu_zoom();
self.reset_mem_zoom();
self.reset_net_zoom();
// Reset data
self.data_collection.reset();
}
pub fn should_get_widget_bounds(&self) -> bool {
@ -762,10 +701,9 @@ impl App {
}
}
}
BottomWidgetType::Battery =>
{
BottomWidgetType::Battery => {
#[cfg(feature = "battery")]
if self.data_collection.battery_harvest.len() > 1 {
if self.data_store.get_data().battery_harvest.len() > 1 {
if let Some(battery_widget_state) = self
.states
.battery_state
@ -825,20 +763,21 @@ impl App {
}
}
}
BottomWidgetType::Battery =>
{
BottomWidgetType::Battery => {
#[cfg(feature = "battery")]
if self.data_collection.battery_harvest.len() > 1 {
let battery_count = self.data_collection.battery_harvest.len();
if let Some(battery_widget_state) = self
.states
.battery_state
.get_mut_widget_state(self.current_widget.widget_id)
{
if battery_widget_state.currently_selected_battery_index
< battery_count - 1
{
let battery_count = self.data_store.get_data().battery_harvest.len();
if battery_count > 1 {
if let Some(battery_widget_state) = self
.states
.battery_state
.get_mut_widget_state(self.current_widget.widget_id)
{
battery_widget_state.currently_selected_battery_index += 1;
if battery_widget_state.currently_selected_battery_index
< battery_count - 1
{
battery_widget_state.currently_selected_battery_index += 1;
}
}
}
}
@ -1277,9 +1216,7 @@ impl App {
'G' => self.skip_to_last(),
'k' => self.on_up_key(),
'j' => self.on_down_key(),
'f' => {
self.frozen_state.toggle(&self.data_collection); // TODO: Thawing should force a full data refresh and redraw immediately.
}
'f' => self.data_store.toggle_frozen(),
'c' => {
if let BottomWidgetType::Proc = self.current_widget.widget_type {
if let Some(proc_widget_state) = self
@ -2068,7 +2005,7 @@ impl App {
.disk_state
.get_mut_widget_state(self.current_widget.widget_id)
{
if !self.converted_data.disk_data.is_empty() {
if !self.data_store.get_data().disk_harvest.is_empty() {
disk_widget_state.table.scroll_to_last();
}
}
@ -2275,7 +2212,6 @@ impl App {
if new_time <= self.app_config_fields.retention_ms {
cpu_widget_state.current_display_time = new_time;
self.states.cpu_state.force_update = Some(self.current_widget.widget_id);
if self.app_config_fields.autohide_time {
cpu_widget_state.autohide_timer = Some(Instant::now());
}
@ -2283,7 +2219,6 @@ impl App {
!= self.app_config_fields.retention_ms
{
cpu_widget_state.current_display_time = self.app_config_fields.retention_ms;
self.states.cpu_state.force_update = Some(self.current_widget.widget_id);
if self.app_config_fields.autohide_time {
cpu_widget_state.autohide_timer = Some(Instant::now());
}
@ -2303,7 +2238,6 @@ impl App {
if new_time <= self.app_config_fields.retention_ms {
mem_widget_state.current_display_time = new_time;
self.states.mem_state.force_update = Some(self.current_widget.widget_id);
if self.app_config_fields.autohide_time {
mem_widget_state.autohide_timer = Some(Instant::now());
}
@ -2311,7 +2245,6 @@ impl App {
!= self.app_config_fields.retention_ms
{
mem_widget_state.current_display_time = self.app_config_fields.retention_ms;
self.states.mem_state.force_update = Some(self.current_widget.widget_id);
if self.app_config_fields.autohide_time {
mem_widget_state.autohide_timer = Some(Instant::now());
}
@ -2331,7 +2264,6 @@ impl App {
if new_time <= self.app_config_fields.retention_ms {
net_widget_state.current_display_time = new_time;
self.states.net_state.force_update = Some(self.current_widget.widget_id);
if self.app_config_fields.autohide_time {
net_widget_state.autohide_timer = Some(Instant::now());
}
@ -2339,7 +2271,6 @@ impl App {
!= self.app_config_fields.retention_ms
{
net_widget_state.current_display_time = self.app_config_fields.retention_ms;
self.states.net_state.force_update = Some(self.current_widget.widget_id);
if self.app_config_fields.autohide_time {
net_widget_state.autohide_timer = Some(Instant::now());
}
@ -2365,7 +2296,6 @@ impl App {
if new_time >= constants::STALE_MIN_MILLISECONDS {
cpu_widget_state.current_display_time = new_time;
self.states.cpu_state.force_update = Some(self.current_widget.widget_id);
if self.app_config_fields.autohide_time {
cpu_widget_state.autohide_timer = Some(Instant::now());
}
@ -2373,7 +2303,6 @@ impl App {
!= constants::STALE_MIN_MILLISECONDS
{
cpu_widget_state.current_display_time = constants::STALE_MIN_MILLISECONDS;
self.states.cpu_state.force_update = Some(self.current_widget.widget_id);
if self.app_config_fields.autohide_time {
cpu_widget_state.autohide_timer = Some(Instant::now());
}
@ -2393,7 +2322,6 @@ impl App {
if new_time >= constants::STALE_MIN_MILLISECONDS {
mem_widget_state.current_display_time = new_time;
self.states.mem_state.force_update = Some(self.current_widget.widget_id);
if self.app_config_fields.autohide_time {
mem_widget_state.autohide_timer = Some(Instant::now());
}
@ -2401,7 +2329,6 @@ impl App {
!= constants::STALE_MIN_MILLISECONDS
{
mem_widget_state.current_display_time = constants::STALE_MIN_MILLISECONDS;
self.states.mem_state.force_update = Some(self.current_widget.widget_id);
if self.app_config_fields.autohide_time {
mem_widget_state.autohide_timer = Some(Instant::now());
}
@ -2421,7 +2348,6 @@ impl App {
if new_time >= constants::STALE_MIN_MILLISECONDS {
net_widget_state.current_display_time = new_time;
self.states.net_state.force_update = Some(self.current_widget.widget_id);
if self.app_config_fields.autohide_time {
net_widget_state.autohide_timer = Some(Instant::now());
}
@ -2429,7 +2355,6 @@ impl App {
!= constants::STALE_MIN_MILLISECONDS
{
net_widget_state.current_display_time = constants::STALE_MIN_MILLISECONDS;
self.states.net_state.force_update = Some(self.current_widget.widget_id);
if self.app_config_fields.autohide_time {
net_widget_state.autohide_timer = Some(Instant::now());
}
@ -2448,7 +2373,6 @@ impl App {
.get_mut(&self.current_widget.widget_id)
{
cpu_widget_state.current_display_time = self.app_config_fields.default_time_value;
self.states.cpu_state.force_update = Some(self.current_widget.widget_id);
if self.app_config_fields.autohide_time {
cpu_widget_state.autohide_timer = Some(Instant::now());
}
@ -2463,7 +2387,6 @@ impl App {
.get_mut(&self.current_widget.widget_id)
{
mem_widget_state.current_display_time = self.app_config_fields.default_time_value;
self.states.mem_state.force_update = Some(self.current_widget.widget_id);
if self.app_config_fields.autohide_time {
mem_widget_state.autohide_timer = Some(Instant::now());
}
@ -2478,7 +2401,6 @@ impl App {
.get_mut(&self.current_widget.widget_id)
{
net_widget_state.current_display_time = self.app_config_fields.default_time_value;
self.states.net_state.force_update = Some(self.current_widget.widget_id);
if self.app_config_fields.autohide_time {
net_widget_state.autohide_timer = Some(Instant::now());
}
@ -2805,10 +2727,12 @@ impl App {
{
if (x >= *tlc_x && y >= *tlc_y) && (x <= *brc_x && y <= *brc_y)
{
if itx >= self.data_collection.battery_harvest.len() {
let num_batteries =
self.data_store.get_data().battery_harvest.len();
if itx >= num_batteries {
// range check to keep within current data
battery_widget_state.currently_selected_battery_index =
self.data_collection.battery_harvest.len() - 1;
num_batteries - 1;
} else {
battery_widget_state.currently_selected_battery_index =
itx;

13
src/app/data/mod.rs Normal file
View File

@ -0,0 +1,13 @@
//! How we manage data internally.
mod time_series;
pub use time_series::{TimeSeriesData, Values};
mod process;
pub use process::ProcessData;
mod store;
pub use store::*;
mod temperature;
pub use temperature::*;

54
src/app/data/process.rs Normal file
View File

@ -0,0 +1,54 @@
use std::{collections::BTreeMap, vec::Vec};
use crate::collection::processes::{Pid, ProcessHarvest};
use hashbrown::HashMap;
#[derive(Clone, Debug, Default)]
pub struct ProcessData {
/// A PID to process data map.
pub process_harvest: BTreeMap<Pid, ProcessHarvest>,
/// A mapping between a process PID to any children process PIDs.
pub process_parent_mapping: HashMap<Pid, Vec<Pid>>,
/// PIDs corresponding to processes that have no parents.
pub orphan_pids: Vec<Pid>,
}
impl ProcessData {
pub(super) fn ingest(&mut self, list_of_processes: Vec<ProcessHarvest>) {
self.process_parent_mapping.clear();
// Reverse as otherwise the pid mappings are in the wrong order.
list_of_processes.iter().rev().for_each(|process_harvest| {
if let Some(parent_pid) = process_harvest.parent_pid {
if let Some(entry) = self.process_parent_mapping.get_mut(&parent_pid) {
entry.push(process_harvest.pid);
} else {
self.process_parent_mapping
.insert(parent_pid, vec![process_harvest.pid]);
}
}
});
self.process_parent_mapping.shrink_to_fit();
let process_pid_map = list_of_processes
.into_iter()
.map(|process| (process.pid, process))
.collect();
self.process_harvest = process_pid_map;
// We collect all processes that either:
// - Do not have a parent PID (that is, they are orphan processes)
// - Have a parent PID but we don't have the parent (we promote them as orphans)
self.orphan_pids = self
.process_harvest
.iter()
.filter_map(|(pid, process_harvest)| match process_harvest.parent_pid {
Some(parent_pid) if self.process_harvest.contains_key(&parent_pid) => None,
_ => Some(*pid),
})
.collect();
}
}

326
src/app/data/store.rs Normal file
View File

@ -0,0 +1,326 @@
use std::{
time::{Duration, Instant},
vec::Vec,
};
#[cfg(feature = "battery")]
use crate::collection::batteries;
use crate::{
app::AppConfigFields,
collection::{cpu, disks, memory::MemHarvest, network, Data},
dec_bytes_per_second_string,
utils::data_units::DataUnit,
widgets::{DiskWidgetData, TempWidgetData},
};
use super::{ProcessData, TimeSeriesData};
/// A collection of data. This is where we dump data into.
///
/// TODO: Maybe reduce visibility of internal data, make it only accessible through DataStore?
#[derive(Debug, Clone)]
pub struct StoredData {
pub last_update_time: Instant, // FIXME: (points_rework_v1) remove this?
pub timeseries_data: TimeSeriesData, // FIXME: (points_rework_v1) Skip in basic?
pub network_harvest: network::NetworkHarvest,
pub ram_harvest: MemHarvest,
pub swap_harvest: Option<MemHarvest>,
#[cfg(not(target_os = "windows"))]
pub cache_harvest: Option<MemHarvest>,
#[cfg(feature = "zfs")]
pub arc_harvest: Option<MemHarvest>,
#[cfg(feature = "gpu")]
pub gpu_harvest: Vec<(String, MemHarvest)>,
pub cpu_harvest: cpu::CpuHarvest,
pub load_avg_harvest: cpu::LoadAvgHarvest,
pub process_data: ProcessData,
/// TODO: (points_rework_v1) Might be a better way to do this without having to store here?
pub prev_io: Vec<(u64, u64)>,
pub disk_harvest: Vec<DiskWidgetData>,
pub temp_data: Vec<TempWidgetData>,
#[cfg(feature = "battery")]
pub battery_harvest: Vec<batteries::BatteryData>,
}
impl Default for StoredData {
fn default() -> Self {
StoredData {
last_update_time: Instant::now(),
timeseries_data: TimeSeriesData::default(),
network_harvest: network::NetworkHarvest::default(),
ram_harvest: MemHarvest::default(),
#[cfg(not(target_os = "windows"))]
cache_harvest: None,
swap_harvest: None,
cpu_harvest: cpu::CpuHarvest::default(),
load_avg_harvest: cpu::LoadAvgHarvest::default(),
process_data: Default::default(),
prev_io: Vec::default(),
disk_harvest: Vec::default(),
temp_data: Vec::default(),
#[cfg(feature = "battery")]
battery_harvest: Vec::default(),
#[cfg(feature = "zfs")]
arc_harvest: None,
#[cfg(feature = "gpu")]
gpu_harvest: Vec::default(),
}
}
}
impl StoredData {
pub fn reset(&mut self) {
*self = StoredData::default();
}
#[allow(
clippy::boxed_local,
reason = "This avoids warnings on certain platforms (e.g. 32-bit)."
)]
fn eat_data(&mut self, mut data: Box<Data>, settings: &AppConfigFields) {
let harvested_time = data.collection_time;
// We must adjust all the network values to their selected type (defaults to bits).
if matches!(settings.network_unit_type, DataUnit::Byte) {
if let Some(network) = &mut data.network {
network.rx /= 8;
network.tx /= 8;
}
}
self.timeseries_data.add(&data);
if let Some(network) = data.network {
self.network_harvest = network;
}
if let Some(memory) = data.memory {
self.ram_harvest = memory;
}
self.swap_harvest = data.swap;
#[cfg(not(target_os = "windows"))]
{
self.cache_harvest = data.cache;
}
#[cfg(feature = "zfs")]
{
self.arc_harvest = data.arc;
}
#[cfg(feature = "gpu")]
if let Some(gpu) = data.gpu {
self.gpu_harvest = gpu;
}
if let Some(cpu) = data.cpu {
self.cpu_harvest = cpu;
}
if let Some(load_avg) = data.load_avg {
self.load_avg_harvest = load_avg;
}
self.temp_data = data
.temperature_sensors
.map(|sensors| {
sensors
.into_iter()
.map(|temp| TempWidgetData {
sensor: temp.name,
temperature: temp
.temperature
.map(|c| settings.temperature_type.convert_temp_unit(c)),
})
.collect()
})
.unwrap_or_default();
if let Some(disks) = data.disks {
if let Some(io) = data.io {
self.eat_disks(disks, io, harvested_time);
}
}
if let Some(list_of_processes) = data.list_of_processes {
self.process_data.ingest(list_of_processes);
}
#[cfg(feature = "battery")]
{
if let Some(list_of_batteries) = data.list_of_batteries {
self.battery_harvest = list_of_batteries;
}
}
// And we're done eating. Update time and push the new entry!
self.last_update_time = harvested_time;
}
fn eat_disks(
&mut self, disks: Vec<disks::DiskHarvest>, io: disks::IoHarvest, harvested_time: Instant,
) {
let time_since_last_harvest = harvested_time
.duration_since(self.last_update_time)
.as_secs_f64();
self.disk_harvest.clear();
let prev_io_diff = disks.len().saturating_sub(self.prev_io.len());
self.prev_io.reserve(prev_io_diff);
self.prev_io.extend((0..prev_io_diff).map(|_| (0, 0)));
for (itx, device) in disks.into_iter().enumerate() {
let Some(checked_name) = ({
#[cfg(target_os = "windows")]
{
match &device.volume_name {
Some(volume_name) => Some(volume_name.as_str()),
None => device.name.split('/').last(),
}
}
#[cfg(not(target_os = "windows"))]
{
#[cfg(feature = "zfs")]
{
if !device.name.starts_with('/') {
Some(device.name.as_str()) // use the whole zfs
// dataset name
} else {
device.name.split('/').last()
}
}
#[cfg(not(feature = "zfs"))]
{
device.name.split('/').last()
}
}
}) else {
continue;
};
let io_device = {
#[cfg(target_os = "macos")]
{
use std::sync::OnceLock;
use regex::Regex;
// Must trim one level further for macOS!
static DISK_REGEX: OnceLock<Regex> = OnceLock::new();
#[expect(
clippy::regex_creation_in_loops,
reason = "this is fine since it's done via a static OnceLock. In the future though, separate it out."
)]
if let Some(new_name) = DISK_REGEX
.get_or_init(|| Regex::new(r"disk\d+").unwrap())
.find(checked_name)
{
io.get(new_name.as_str())
} else {
None
}
}
#[cfg(not(target_os = "macos"))]
{
io.get(checked_name)
}
};
let (mut io_read, mut io_write) = ("N/A".to_string(), "N/A".to_string());
if let Some(Some(io_device)) = io_device {
if let Some(prev_io) = self.prev_io.get_mut(itx) {
let r_rate = ((io_device.read_bytes.saturating_sub(prev_io.0)) as f64
/ time_since_last_harvest)
.round() as u64;
let w_rate = ((io_device.write_bytes.saturating_sub(prev_io.1)) as f64
/ time_since_last_harvest)
.round() as u64;
*prev_io = (io_device.read_bytes, io_device.write_bytes);
io_read = dec_bytes_per_second_string(r_rate);
io_write = dec_bytes_per_second_string(w_rate);
}
}
let summed_total_bytes = match (device.used_space, device.free_space) {
(Some(used), Some(free)) => Some(used + free),
_ => None,
};
self.disk_harvest.push(DiskWidgetData {
name: device.name,
mount_point: device.mount_point,
free_bytes: device.free_space,
used_bytes: device.used_space,
total_bytes: device.total_space,
summed_total_bytes,
io_read,
io_write,
});
}
}
}
/// If we freeze data collection updates, we want to return a "frozen" copy
/// of the data at the time, while still updating things in the background.
#[derive(Default)]
pub enum FrozenState {
#[default]
NotFrozen,
Frozen(Box<StoredData>),
}
/// What data to share to other parts of the application.
#[derive(Default)]
pub struct DataStore {
frozen_state: FrozenState,
main: StoredData,
}
impl DataStore {
/// Toggle whether the [`DataState`] is frozen or not.
pub fn toggle_frozen(&mut self) {
match &self.frozen_state {
FrozenState::NotFrozen => {
self.frozen_state = FrozenState::Frozen(Box::new(self.main.clone()));
}
FrozenState::Frozen(_) => self.frozen_state = FrozenState::NotFrozen,
}
}
/// Return whether the [`DataState`] is frozen or not.
pub fn is_frozen(&self) -> bool {
matches!(self.frozen_state, FrozenState::Frozen(_))
}
/// Return a reference to the currently available data. Note that if the data is
/// in a frozen state, it will return the snapshot of data from when it was frozen.
pub fn get_data(&self) -> &StoredData {
match &self.frozen_state {
FrozenState::NotFrozen => &self.main,
FrozenState::Frozen(collected_data) => collected_data,
}
}
/// Eat data.
pub fn eat_data(&mut self, data: Box<Data>, settings: &AppConfigFields) {
self.main.eat_data(data, settings);
}
/// Clean data.
pub fn clean_data(&mut self, max_duration: Duration) {
self.main.timeseries_data.prune(max_duration);
}
/// Reset data state.
pub fn reset(&mut self) {
self.frozen_state = FrozenState::NotFrozen;
self.main = StoredData::default();
}
}

View File

@ -0,0 +1,83 @@
//! Code around temperature data.
use std::{fmt::Display, str::FromStr};
#[derive(Clone, Debug, Copy, PartialEq, Eq, Default)]
pub enum TemperatureType {
#[default]
Celsius,
Kelvin,
Fahrenheit,
}
impl FromStr for TemperatureType {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"fahrenheit" | "f" => Ok(TemperatureType::Fahrenheit),
"kelvin" | "k" => Ok(TemperatureType::Kelvin),
"celsius" | "c" => Ok(TemperatureType::Celsius),
_ => Err(format!(
"'{s}' is an invalid temperature type, use one of: [kelvin, k, celsius, c, fahrenheit, f]."
)),
}
}
}
impl TemperatureType {
/// Given a temperature in Celsius, covert it if necessary for a different
/// unit.
pub fn convert_temp_unit(&self, celsius: f32) -> TypedTemperature {
match self {
TemperatureType::Celsius => TypedTemperature::Celsius(celsius.ceil() as u32),
TemperatureType::Kelvin => TypedTemperature::Kelvin((celsius + 273.15).ceil() as u32),
TemperatureType::Fahrenheit => {
TypedTemperature::Fahrenheit(((celsius * (9.0 / 5.0)) + 32.0).ceil() as u32)
}
}
}
}
/// A temperature and its type.
#[derive(Debug, PartialEq, Clone, Eq, PartialOrd, Ord)]
pub enum TypedTemperature {
Celsius(u32),
Kelvin(u32),
Fahrenheit(u32),
}
impl Display for TypedTemperature {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
TypedTemperature::Celsius(val) => write!(f, "{val}°C"),
TypedTemperature::Kelvin(val) => write!(f, "{val}K"),
TypedTemperature::Fahrenheit(val) => write!(f, "{val}°F"),
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn temp_conversions() {
const TEMP: f32 = 100.0;
assert_eq!(
TemperatureType::Celsius.convert_temp_unit(TEMP),
TypedTemperature::Celsius(TEMP as u32),
);
assert_eq!(
TemperatureType::Kelvin.convert_temp_unit(TEMP),
TypedTemperature::Kelvin(373.15_f32.ceil() as u32)
);
assert_eq!(
TemperatureType::Fahrenheit.convert_temp_unit(TEMP),
TypedTemperature::Fahrenheit(212)
);
}
}

227
src/app/data/time_series.rs Normal file
View File

@ -0,0 +1,227 @@
//! Time series data.
use std::{
cmp::Ordering,
time::{Duration, Instant},
vec::Vec,
};
#[cfg(feature = "gpu")]
use hashbrown::{HashMap, HashSet}; // TODO: Try fxhash again.
use timeless::data::ChunkedData;
use crate::collection::Data;
/// Values corresponding to a time slice.
pub type Values = ChunkedData<f64>;
/// Represents time series data in a chunked, deduped manner.
///
/// Properties:
/// - Time in this manner is represented in a reverse-offset fashion from the current time.
/// - All data is stored in SoA fashion.
/// - Values are stored in a chunked format, which facilitates gaps in data collection if needed.
/// - Additional metadata is stored to make data pruning over time easy.
#[derive(Clone, Debug, Default)]
pub struct TimeSeriesData {
/// Time values.
pub time: Vec<Instant>,
/// Network RX data.
pub rx: Values,
/// Network TX data.
pub tx: Values,
/// CPU data.
pub cpu: Vec<Values>,
/// RAM memory data.
pub ram: Values,
/// Swap data.
pub swap: Values,
#[cfg(not(target_os = "windows"))]
/// Cache data.
pub cache_mem: Values,
#[cfg(feature = "zfs")]
/// Arc data.
pub arc_mem: Values,
#[cfg(feature = "gpu")]
/// GPU memory data.
pub gpu_mem: HashMap<String, Values>,
}
impl TimeSeriesData {
/// Add a new data point.
pub fn add(&mut self, data: &Data) {
self.time.push(data.collection_time);
if let Some(network) = &data.network {
self.rx.push(network.rx as f64);
self.tx.push(network.tx as f64);
} else {
self.rx.insert_break();
self.tx.insert_break();
}
if let Some(cpu) = &data.cpu {
match self.cpu.len().cmp(&cpu.len()) {
Ordering::Less => {
let diff = cpu.len() - self.cpu.len();
self.cpu.reserve_exact(diff);
for _ in 0..diff {
self.cpu.push(Default::default());
}
}
Ordering::Greater => {
let diff = self.cpu.len() - cpu.len();
let offset = self.cpu.len() - diff;
for curr in &mut self.cpu[offset..] {
curr.insert_break();
}
}
Ordering::Equal => {}
}
for (curr, new_data) in self.cpu.iter_mut().zip(cpu.iter()) {
curr.push(new_data.cpu_usage);
}
} else {
for c in &mut self.cpu {
c.insert_break();
}
}
if let Some(memory) = &data.memory {
self.ram.try_push(memory.checked_percent());
} else {
self.ram.insert_break();
}
if let Some(swap) = &data.swap {
self.swap.try_push(swap.checked_percent());
} else {
self.swap.insert_break();
}
#[cfg(not(target_os = "windows"))]
{
if let Some(cache) = &data.cache {
self.cache_mem.try_push(cache.checked_percent());
} else {
self.cache_mem.insert_break();
}
}
#[cfg(feature = "zfs")]
{
if let Some(arc) = &data.arc {
self.arc_mem.try_push(arc.checked_percent());
} else {
self.arc_mem.insert_break();
}
}
#[cfg(feature = "gpu")]
{
if let Some(gpu) = &data.gpu {
let mut not_visited = self
.gpu_mem
.keys()
.map(String::to_owned)
.collect::<HashSet<_>>();
for (name, new_data) in gpu {
not_visited.remove(name);
if !self.gpu_mem.contains_key(name) {
self.gpu_mem
.insert(name.to_string(), ChunkedData::default());
}
let curr = self
.gpu_mem
.get_mut(name)
.expect("entry must exist as it was created above");
curr.try_push(new_data.checked_percent());
}
for nv in not_visited {
if let Some(entry) = self.gpu_mem.get_mut(&nv) {
entry.insert_break();
}
}
} else {
for g in self.gpu_mem.values_mut() {
g.insert_break();
}
}
}
}
/// Prune any data older than the given duration.
pub fn prune(&mut self, max_age: Duration) {
if self.time.is_empty() {
return;
}
let now = Instant::now();
let end = {
let partition_point = self
.time
.partition_point(|then| now.duration_since(*then) > max_age);
// Partition point returns the first index that does not match the predicate, so minus one.
if partition_point > 0 {
partition_point - 1
} else {
// If the partition point was 0, then it means all values are too new to be pruned.
crate::info!("Skipping prune.");
return;
}
};
crate::info!("Pruning up to index {end}.");
// Note that end here is _inclusive_.
self.time.drain(0..=end);
self.time.shrink_to_fit();
let _ = self.rx.prune_and_shrink_to_fit(end);
let _ = self.tx.prune_and_shrink_to_fit(end);
for cpu in &mut self.cpu {
let _ = cpu.prune_and_shrink_to_fit(end);
}
let _ = self.ram.prune_and_shrink_to_fit(end);
let _ = self.swap.prune_and_shrink_to_fit(end);
#[cfg(not(target_os = "windows"))]
let _ = self.cache_mem.prune_and_shrink_to_fit(end);
#[cfg(feature = "zfs")]
let _ = self.arc_mem.prune_and_shrink_to_fit(end);
#[cfg(feature = "gpu")]
{
self.gpu_mem.retain(|_, gpu| {
let _ = gpu.prune(end);
// Remove the entry if it is empty. We can always add it again later.
if gpu.no_elements() {
false
} else {
gpu.shrink_to_fit();
true
}
});
}
}
}

View File

@ -1,468 +0,0 @@
//! In charge of cleaning, processing, and managing data. I couldn't think of
//! a better name for the file. Since I called data collection "harvesting",
//! then this is the farmer I guess.
//!
//! Essentially the main goal is to shift the initial calculation and
//! distribution of joiner points and data to one central location that will
//! only do it *once* upon receiving the data --- as opposed to doing it on
//! canvas draw, which will be a costly process.
//!
//! This will also handle the *cleaning* of stale data. That should be done
//! in some manner (timer on another thread, some loop) that will occasionally
//! call the purging function. Failure to do so *will* result in a growing
//! memory usage and higher CPU usage - you will be trying to process more and
//! more points as this is used!
use std::{collections::BTreeMap, time::Instant, vec::Vec};
use hashbrown::HashMap;
#[cfg(feature = "battery")]
use crate::data_collection::batteries;
use crate::{
data_collection::{
cpu, disks, memory, network,
processes::{Pid, ProcessHarvest},
temperature, Data,
},
dec_bytes_per_second_string,
};
#[derive(Debug, Default, Clone)]
pub struct TimedData {
pub rx_data: f64,
pub tx_data: f64,
pub cpu_data: Vec<f64>,
pub mem_data: Option<f64>,
#[cfg(not(target_os = "windows"))]
pub cache_data: Option<f64>,
pub swap_data: Option<f64>,
#[cfg(feature = "zfs")]
pub arc_data: Option<f64>,
#[cfg(feature = "gpu")]
pub gpu_data: Vec<Option<f64>>,
}
#[derive(Clone, Debug, Default)]
pub struct ProcessData {
/// A PID to process data map.
pub process_harvest: BTreeMap<Pid, ProcessHarvest>,
/// A mapping between a process PID to any children process PIDs.
pub process_parent_mapping: HashMap<Pid, Vec<Pid>>,
/// PIDs corresponding to processes that have no parents.
pub orphan_pids: Vec<Pid>,
}
impl ProcessData {
fn ingest(&mut self, list_of_processes: Vec<ProcessHarvest>) {
self.process_parent_mapping.clear();
// Reverse as otherwise the pid mappings are in the wrong order.
list_of_processes.iter().rev().for_each(|process_harvest| {
if let Some(parent_pid) = process_harvest.parent_pid {
if let Some(entry) = self.process_parent_mapping.get_mut(&parent_pid) {
entry.push(process_harvest.pid);
} else {
self.process_parent_mapping
.insert(parent_pid, vec![process_harvest.pid]);
}
}
});
self.process_parent_mapping.shrink_to_fit();
let process_pid_map = list_of_processes
.into_iter()
.map(|process| (process.pid, process))
.collect();
self.process_harvest = process_pid_map;
// We collect all processes that either:
// - Do not have a parent PID (that is, they are orphan processes)
// - Have a parent PID but we don't have the parent (we promote them as orphans)
self.orphan_pids = self
.process_harvest
.iter()
.filter_map(|(pid, process_harvest)| match process_harvest.parent_pid {
Some(parent_pid) if self.process_harvest.contains_key(&parent_pid) => None,
_ => Some(*pid),
})
.collect();
}
}
/// AppCollection represents the pooled data stored within the main app
/// thread. Basically stores a (occasionally cleaned) record of the data
/// collected, and what is needed to convert into a displayable form.
///
/// If the app is *frozen* - that is, we do not want to *display* any changing
/// data, keep updating this. As of 2021-09-08, we just clone the current
/// collection when it freezes to have a snapshot floating around.
///
/// Note that with this method, the *app* thread is responsible for cleaning -
/// not the data collector.
#[derive(Debug, Clone)]
pub struct DataCollection {
pub current_instant: Instant,
pub timed_data_vec: Vec<(Instant, TimedData)>,
pub network_harvest: network::NetworkHarvest,
pub memory_harvest: memory::MemHarvest,
#[cfg(not(target_os = "windows"))]
pub cache_harvest: memory::MemHarvest,
pub swap_harvest: memory::MemHarvest,
pub cpu_harvest: cpu::CpuHarvest,
pub load_avg_harvest: cpu::LoadAvgHarvest,
pub process_data: ProcessData,
pub disk_harvest: Vec<disks::DiskHarvest>,
pub io_harvest: disks::IoHarvest,
pub io_labels_and_prev: Vec<((u64, u64), (u64, u64))>,
pub io_labels: Vec<(String, String)>,
pub temp_harvest: Vec<temperature::TempHarvest>,
#[cfg(feature = "battery")]
pub battery_harvest: Vec<batteries::BatteryData>,
#[cfg(feature = "zfs")]
pub arc_harvest: memory::MemHarvest,
#[cfg(feature = "gpu")]
pub gpu_harvest: Vec<(String, memory::MemHarvest)>,
}
impl Default for DataCollection {
fn default() -> Self {
DataCollection {
current_instant: Instant::now(),
timed_data_vec: Vec::default(),
network_harvest: network::NetworkHarvest::default(),
memory_harvest: memory::MemHarvest::default(),
#[cfg(not(target_os = "windows"))]
cache_harvest: memory::MemHarvest::default(),
swap_harvest: memory::MemHarvest::default(),
cpu_harvest: cpu::CpuHarvest::default(),
load_avg_harvest: cpu::LoadAvgHarvest::default(),
process_data: Default::default(),
disk_harvest: Vec::default(),
io_harvest: disks::IoHarvest::default(),
io_labels_and_prev: Vec::default(),
io_labels: Vec::default(),
temp_harvest: Vec::default(),
#[cfg(feature = "battery")]
battery_harvest: Vec::default(),
#[cfg(feature = "zfs")]
arc_harvest: memory::MemHarvest::default(),
#[cfg(feature = "gpu")]
gpu_harvest: Vec::default(),
}
}
}
impl DataCollection {
pub fn reset(&mut self) {
self.timed_data_vec = Vec::default();
self.network_harvest = network::NetworkHarvest::default();
self.memory_harvest = memory::MemHarvest::default();
self.swap_harvest = memory::MemHarvest::default();
self.cpu_harvest = cpu::CpuHarvest::default();
self.process_data = Default::default();
self.disk_harvest = Vec::default();
self.io_harvest = disks::IoHarvest::default();
self.io_labels_and_prev = Vec::default();
self.temp_harvest = Vec::default();
#[cfg(feature = "battery")]
{
self.battery_harvest = Vec::default();
}
#[cfg(feature = "zfs")]
{
self.arc_harvest = memory::MemHarvest::default();
}
#[cfg(feature = "gpu")]
{
self.gpu_harvest = Vec::default();
}
}
pub fn clean_data(&mut self, max_time_millis: u64) {
let current_time = Instant::now();
let remove_index = match self
.timed_data_vec
.binary_search_by(|(instant, _timed_data)| {
current_time
.duration_since(*instant)
.as_millis()
.cmp(&(max_time_millis.into()))
.reverse()
}) {
Ok(index) => index,
Err(index) => index,
};
self.timed_data_vec.drain(0..remove_index);
self.timed_data_vec.shrink_to_fit();
}
#[allow(
clippy::boxed_local,
reason = "Clippy allow to avoid warning on certain platforms (e.g. 32-bit)."
)]
pub fn eat_data(&mut self, harvested_data: Box<Data>) {
let harvested_time = harvested_data.collection_time;
let mut new_entry = TimedData::default();
// Network
if let Some(network) = harvested_data.network {
self.eat_network(network, &mut new_entry);
}
// Memory, Swap
if let (Some(memory), Some(swap)) = (harvested_data.memory, harvested_data.swap) {
self.eat_memory_and_swap(memory, swap, &mut new_entry);
}
// Cache memory
#[cfg(not(target_os = "windows"))]
if let Some(cache) = harvested_data.cache {
self.eat_cache(cache, &mut new_entry);
}
#[cfg(feature = "zfs")]
if let Some(arc) = harvested_data.arc {
self.eat_arc(arc, &mut new_entry);
}
#[cfg(feature = "gpu")]
if let Some(gpu) = harvested_data.gpu {
self.eat_gpu(gpu, &mut new_entry);
}
// CPU
if let Some(cpu) = harvested_data.cpu {
self.eat_cpu(cpu, &mut new_entry);
}
// Load average
if let Some(load_avg) = harvested_data.load_avg {
self.eat_load_avg(load_avg);
}
// Temp
if let Some(temperature_sensors) = harvested_data.temperature_sensors {
self.eat_temp(temperature_sensors);
}
// Disks
if let Some(disks) = harvested_data.disks {
if let Some(io) = harvested_data.io {
self.eat_disks(disks, io, harvested_time);
}
}
// Processes
if let Some(list_of_processes) = harvested_data.list_of_processes {
self.eat_proc(list_of_processes);
}
#[cfg(feature = "battery")]
{
// Battery
if let Some(list_of_batteries) = harvested_data.list_of_batteries {
self.eat_battery(list_of_batteries);
}
}
// And we're done eating. Update time and push the new entry!
self.current_instant = harvested_time;
self.timed_data_vec.push((harvested_time, new_entry));
}
fn eat_memory_and_swap(
&mut self, memory: memory::MemHarvest, swap: memory::MemHarvest, new_entry: &mut TimedData,
) {
new_entry.mem_data = memory.checked_percent();
new_entry.swap_data = swap.checked_percent();
// In addition copy over latest data for easy reference
self.memory_harvest = memory;
self.swap_harvest = swap;
}
#[cfg(not(target_os = "windows"))]
fn eat_cache(&mut self, cache: memory::MemHarvest, new_entry: &mut TimedData) {
new_entry.cache_data = cache.checked_percent();
self.cache_harvest = cache;
}
fn eat_network(&mut self, network: network::NetworkHarvest, new_entry: &mut TimedData) {
// RX
if network.rx > 0 {
new_entry.rx_data = network.rx as f64;
}
// TX
if network.tx > 0 {
new_entry.tx_data = network.tx as f64;
}
// In addition copy over latest data for easy reference
self.network_harvest = network;
}
fn eat_cpu(&mut self, cpu: Vec<cpu::CpuData>, new_entry: &mut TimedData) {
// Note this only pre-calculates the data points - the names will be
// within the local copy of cpu_harvest. Since it's all sequential
// it probably doesn't matter anyways.
cpu.iter()
.for_each(|cpu| new_entry.cpu_data.push(cpu.cpu_usage));
self.cpu_harvest = cpu;
}
fn eat_load_avg(&mut self, load_avg: cpu::LoadAvgHarvest) {
self.load_avg_harvest = load_avg;
}
fn eat_temp(&mut self, temperature_sensors: Vec<temperature::TempHarvest>) {
self.temp_harvest = temperature_sensors;
}
fn eat_disks(
&mut self, disks: Vec<disks::DiskHarvest>, io: disks::IoHarvest, harvested_time: Instant,
) {
let time_since_last_harvest = harvested_time
.duration_since(self.current_instant)
.as_secs_f64();
for (itx, device) in disks.iter().enumerate() {
let checked_name = {
#[cfg(target_os = "windows")]
{
match &device.volume_name {
Some(volume_name) => Some(volume_name.as_str()),
None => device.name.split('/').last(),
}
}
#[cfg(not(target_os = "windows"))]
{
#[cfg(feature = "zfs")]
{
if !device.name.starts_with('/') {
Some(device.name.as_str()) // use the whole zfs
// dataset name
} else {
device.name.split('/').last()
}
}
#[cfg(not(feature = "zfs"))]
{
device.name.split('/').last()
}
}
};
if let Some(checked_name) = checked_name {
let io_device = {
#[cfg(target_os = "macos")]
{
use std::sync::OnceLock;
use regex::Regex;
// Must trim one level further for macOS!
static DISK_REGEX: OnceLock<Regex> = OnceLock::new();
#[expect(
clippy::regex_creation_in_loops,
reason = "this is fine since it's done via a static OnceLock. In the future though, separate it out."
)]
if let Some(new_name) = DISK_REGEX
.get_or_init(|| Regex::new(r"disk\d+").unwrap())
.find(checked_name)
{
io.get(new_name.as_str())
} else {
None
}
}
#[cfg(not(target_os = "macos"))]
{
io.get(checked_name)
}
};
if let Some(io_device) = io_device {
let (io_r_pt, io_w_pt) = if let Some(io) = io_device {
(io.read_bytes, io.write_bytes)
} else {
(0, 0)
};
if self.io_labels.len() <= itx {
self.io_labels.push((String::default(), String::default()));
}
if self.io_labels_and_prev.len() <= itx {
self.io_labels_and_prev.push(((0, 0), (io_r_pt, io_w_pt)));
}
if let Some((io_curr, io_prev)) = self.io_labels_and_prev.get_mut(itx) {
let r_rate = ((io_r_pt.saturating_sub(io_prev.0)) as f64
/ time_since_last_harvest)
.round() as u64;
let w_rate = ((io_w_pt.saturating_sub(io_prev.1)) as f64
/ time_since_last_harvest)
.round() as u64;
*io_curr = (r_rate, w_rate);
*io_prev = (io_r_pt, io_w_pt);
// TODO: idk why I'm generating this here tbh
if let Some(io_labels) = self.io_labels.get_mut(itx) {
*io_labels = (
dec_bytes_per_second_string(r_rate),
dec_bytes_per_second_string(w_rate),
);
}
}
} else {
if self.io_labels.len() <= itx {
self.io_labels.push((String::default(), String::default()));
}
if let Some(io_labels) = self.io_labels.get_mut(itx) {
*io_labels = ("N/A".to_string(), "N/A".to_string());
}
}
}
}
self.disk_harvest = disks;
self.io_harvest = io;
}
fn eat_proc(&mut self, list_of_processes: Vec<ProcessHarvest>) {
self.process_data.ingest(list_of_processes);
}
#[cfg(feature = "battery")]
fn eat_battery(&mut self, list_of_batteries: Vec<batteries::BatteryData>) {
self.battery_harvest = list_of_batteries;
}
#[cfg(feature = "zfs")]
fn eat_arc(&mut self, arc: memory::MemHarvest, new_entry: &mut TimedData) {
new_entry.arc_data = arc.checked_percent();
self.arc_harvest = arc;
}
#[cfg(feature = "gpu")]
fn eat_gpu(&mut self, gpu: Vec<(String, memory::MemHarvest)>, new_entry: &mut TimedData) {
// Note this only pre-calculates the data points - the names will be
// within the local copy of gpu_harvest. Since it's all sequential
// it probably doesn't matter anyways.
gpu.iter().for_each(|data| {
new_entry.gpu_data.push(data.1.checked_percent());
});
self.gpu_harvest = gpu;
}
}

View File

@ -1,46 +0,0 @@
use super::DataCollection;
/// The [`FrozenState`] indicates whether the application state should be
/// frozen. It is either not frozen or frozen and containing a copy of the state
/// at the time.
pub enum FrozenState {
NotFrozen,
Frozen(Box<DataCollection>),
}
impl Default for FrozenState {
fn default() -> Self {
Self::NotFrozen
}
}
pub type IsFrozen = bool;
impl FrozenState {
/// Checks whether the [`FrozenState`] is currently frozen.
pub fn is_frozen(&self) -> IsFrozen {
matches!(self, FrozenState::Frozen(_))
}
/// Freezes the [`FrozenState`].
pub fn freeze(&mut self, data: Box<DataCollection>) {
*self = FrozenState::Frozen(data);
}
/// Unfreezes the [`FrozenState`].
pub fn thaw(&mut self) {
*self = FrozenState::NotFrozen;
}
/// Toggles the [`FrozenState`] and returns whether it is now frozen.
pub fn toggle(&mut self, data: &DataCollection) -> IsFrozen {
if self.is_frozen() {
self.thaw();
false
} else {
// Could we use an Arc instead? Is it worth it?
self.freeze(Box::new(data.clone()));
true
}
}
}

View File

@ -10,7 +10,7 @@ use windows::Win32::{
},
};
use crate::data_collection::processes::Pid;
use crate::collection::processes::Pid;
/// Based from [this SO answer](https://stackoverflow.com/a/55231715).
#[cfg(target_os = "windows")]

View File

@ -285,30 +285,22 @@ impl ProcState {
}
pub struct NetState {
pub force_update: Option<u64>,
pub widget_states: HashMap<u64, NetWidgetState>,
}
impl NetState {
pub fn init(widget_states: HashMap<u64, NetWidgetState>) -> Self {
NetState {
force_update: None,
widget_states,
}
NetState { widget_states }
}
}
pub struct CpuState {
pub force_update: Option<u64>,
pub widget_states: HashMap<u64, CpuWidgetState>,
}
impl CpuState {
pub fn init(widget_states: HashMap<u64, CpuWidgetState>) -> Self {
CpuState {
force_update: None,
widget_states,
}
CpuState { widget_states }
}
pub fn get_mut_widget_state(&mut self, widget_id: u64) -> Option<&mut CpuWidgetState> {
@ -321,16 +313,12 @@ impl CpuState {
}
pub struct MemState {
pub force_update: Option<u64>,
pub widget_states: HashMap<u64, MemWidgetState>,
}
impl MemState {
pub fn init(widget_states: HashMap<u64, MemWidgetState>) -> Self {
MemState {
force_update: None,
widget_states,
}
MemState { widget_states }
}
}

View File

@ -174,7 +174,7 @@ impl Painter {
use BottomWidgetType::*;
terminal.draw(|f| {
let (terminal_size, frozen_draw_loc) = if app_state.frozen_state.is_frozen() {
let (terminal_size, frozen_draw_loc) = if app_state.data_store.is_frozen() {
// TODO: Remove built-in cache?
let split_loc = Layout::default()
.constraints([Constraint::Min(0), Constraint::Length(1)])
@ -343,13 +343,14 @@ impl Painter {
_ => {}
}
} else if app_state.app_config_fields.use_basic_mode {
// Basic mode. This basically removes all graphs but otherwise
// Basic mode. This basically removes all graphs but otherwise
// the same info.
if let Some(frozen_draw_loc) = frozen_draw_loc {
self.draw_frozen_indicator(f, frozen_draw_loc);
}
let actual_cpu_data_len = app_state.converted_data.cpu_data.len().saturating_sub(1);
let data = app_state.data_store.get_data();
let actual_cpu_data_len = data.cpu_harvest.len();
// This fixes #397, apparently if the height is 1, it can't render the CPU
// bars...
@ -370,29 +371,27 @@ impl Painter {
let mut mem_rows = 1;
if app_state.converted_data.swap_labels.is_some() {
if data.swap_harvest.is_some() {
mem_rows += 1; // add row for swap
}
#[cfg(feature = "zfs")]
{
if app_state.converted_data.arc_labels.is_some() {
if data.arc_harvest.is_some() {
mem_rows += 1; // add row for arc
}
}
#[cfg(not(target_os = "windows"))]
{
if app_state.converted_data.cache_labels.is_some() {
if data.cache_harvest.is_some() {
mem_rows += 1;
}
}
#[cfg(feature = "gpu")]
{
if let Some(gpu_data) = &app_state.converted_data.gpu_data {
mem_rows += gpu_data.len() as u16; // add row(s) for gpu
}
mem_rows += data.gpu_harvest.len() as u16; // add row(s) for gpu
}
if mem_rows == 1 {

View File

@ -1,9 +1,6 @@
//! Lower-level components used throughout bottom.
pub mod data_table;
pub mod pipe_gauge;
pub mod time_graph;
mod tui_widget;
pub mod widget_carousel;
pub use tui_widget::*;

View File

@ -29,6 +29,8 @@ use crate::utils::general::ClampExt;
/// - [`Sortable`]: This table expects sorted data, and there are helper
/// functions to facilitate things like sorting based on a selected column,
/// shortcut column selection support, mouse column selection support, etc.
///
/// FIXME: We already do all the text width checks - can we skip the underlying ones?
pub struct DataTable<DataType, Header, S = Unsortable, C = Column<Header>> {
pub columns: Vec<C>,
pub state: DataTableState,

View File

@ -1,4 +1,7 @@
use std::borrow::Cow;
mod time_chart;
pub use time_chart::*;
use std::{borrow::Cow, time::Instant};
use concat_string::concat_string;
use tui::{
@ -10,29 +13,50 @@ use tui::{
Frame,
};
use crate::canvas::drawing_utils::widget_block;
use super::time_chart::{
Axis, Dataset, LegendPosition, Point, TimeChart, DEFAULT_LEGEND_CONSTRAINTS,
};
use crate::{app::data::Values, canvas::drawing_utils::widget_block};
/// Represents the data required by the [`TimeGraph`].
pub struct GraphData<'a> {
pub points: &'a [Point],
pub style: Style,
pub name: Option<Cow<'a, str>>,
///
/// TODO: We may be able to get rid of this intermediary data structure.
#[derive(Default)]
pub(crate) struct GraphData<'a> {
time: &'a [Instant],
values: Option<&'a Values>,
style: Style,
name: Option<Cow<'a, str>>,
}
impl<'a> GraphData<'a> {
pub fn time(mut self, time: &'a [Instant]) -> Self {
self.time = time;
self
}
pub fn values(mut self, values: &'a Values) -> Self {
self.values = Some(values);
self
}
pub fn style(mut self, style: Style) -> Self {
self.style = style;
self
}
pub fn name(mut self, name: Cow<'a, str>) -> Self {
self.name = Some(name);
self
}
}
pub struct TimeGraph<'a> {
/// The min and max x boundaries. Expects a f64 representing the time range
/// in milliseconds.
pub x_bounds: [u64; 2],
/// The min x value.
pub x_min: f64,
/// Whether to hide the time/x-labels.
pub hide_x_labels: bool,
/// The min and max y boundaries.
pub y_bounds: [f64; 2],
pub y_bounds: AxisBound,
/// Any y-labels.
pub y_labels: &'a [Cow<'a, str>],
@ -67,24 +91,26 @@ pub struct TimeGraph<'a> {
/// The marker type. Unlike ratatui's native charts, we assume
/// only a single type of marker.
pub marker: Marker,
/// The chart scaling.
pub scaling: ChartScaling,
}
impl TimeGraph<'_> {
/// Generates the [`Axis`] for the x-axis.
fn generate_x_axis(&self) -> Axis<'_> {
// Due to how we display things, we need to adjust the time bound values.
let time_start = -(self.x_bounds[1] as f64);
let adjusted_x_bounds = [time_start, 0.0];
let adjusted_x_bounds = AxisBound::Min(self.x_min);
if self.hide_x_labels {
Axis::default().bounds(adjusted_x_bounds)
} else {
let xb_one = (self.x_bounds[1] / 1000).to_string();
let xb_zero = (self.x_bounds[0] / 1000).to_string();
let x_bound_left = ((-self.x_min) as u64 / 1000).to_string();
let x_bound_right = "0s";
let x_labels = vec![
Span::styled(concat_string!(xb_one, "s"), self.graph_style),
Span::styled(concat_string!(xb_zero, "s"), self.graph_style),
Span::styled(concat_string!(x_bound_left, "s"), self.graph_style),
Span::styled(x_bound_right, self.graph_style),
];
Axis::default()
@ -116,13 +142,14 @@ impl TimeGraph<'_> {
/// graph.
/// - Expects `graph_data`, which represents *what* data to draw, and
/// various details like style and optional legends.
pub fn draw_time_graph(&self, f: &mut Frame<'_>, draw_loc: Rect, graph_data: &[GraphData<'_>]) {
pub fn draw_time_graph(
&self, f: &mut Frame<'_>, draw_loc: Rect, graph_data: Vec<GraphData<'_>>,
) {
// TODO: (points_rework_v1) can we reduce allocations in the underlying graph by saving some sort of state?
let x_axis = self.generate_x_axis();
let y_axis = self.generate_y_axis();
// This is some ugly manual loop unswitching. Maybe unnecessary.
// TODO: Optimize this step. Cut out unneeded points.
let data = graph_data.iter().map(create_dataset).collect();
let data = graph_data.into_iter().map(create_dataset).collect();
let block = {
let mut b = widget_block(false, self.is_selected, self.border_type)
@ -147,30 +174,38 @@ impl TimeGraph<'_> {
.hidden_legend_constraints(
self.legend_constraints
.unwrap_or(DEFAULT_LEGEND_CONSTRAINTS),
),
)
.scaling(self.scaling),
draw_loc,
)
}
}
/// Creates a new [`Dataset`].
fn create_dataset<'a>(data: &'a GraphData<'a>) -> Dataset<'a> {
fn create_dataset(data: GraphData<'_>) -> Dataset<'_> {
let GraphData {
points,
time,
values,
style,
name,
} = data;
let Some(values) = values else {
return Dataset::default();
};
let dataset = Dataset::default()
.style(*style)
.data(points)
.style(style)
.data(time, values)
.graph_type(GraphType::Line);
if let Some(name) = name {
dataset.name(name.as_ref())
let dataset = if let Some(name) = name {
dataset.name(name)
} else {
dataset
}
};
dataset
}
#[cfg(test)]
@ -184,8 +219,8 @@ mod test {
widgets::BorderType,
};
use super::TimeGraph;
use crate::canvas::components::time_chart::Axis;
use super::{AxisBound, ChartScaling, TimeGraph};
use crate::canvas::components::time_graph::Axis;
const Y_LABELS: [Cow<'static, str>; 3] = [
Cow::Borrowed("0%"),
@ -196,9 +231,9 @@ mod test {
fn create_time_graph() -> TimeGraph<'static> {
TimeGraph {
title: " Network ".into(),
x_bounds: [0, 15000],
x_min: -15000.0,
hide_x_labels: false,
y_bounds: [0.0, 100.5],
y_bounds: AxisBound::Max(100.5),
y_labels: &Y_LABELS,
graph_style: Style::default().fg(Color::Red),
border_style: Style::default().fg(Color::Blue),
@ -209,6 +244,7 @@ mod test {
legend_position: None,
legend_constraints: None,
marker: Marker::Braille,
scaling: ChartScaling::Linear,
}
}
@ -219,7 +255,7 @@ mod test {
let x_axis = tg.generate_x_axis();
let actual = Axis::default()
.bounds([-15000.0, 0.0])
.bounds(AxisBound::Min(-15000.0))
.labels(vec![Span::styled("15s", style), Span::styled("0s", style)])
.style(style);
assert_eq!(x_axis.bounds, actual.bounds);
@ -234,7 +270,7 @@ mod test {
let y_axis = tg.generate_y_axis();
let actual = Axis::default()
.bounds([0.0, 100.5])
.bounds(AxisBound::Max(100.5))
.labels(vec![
Span::styled("0%", style),
Span::styled("50%", style),

View File

@ -7,7 +7,7 @@
mod canvas;
mod points;
use std::{cmp::max, str::FromStr};
use std::{cmp::max, str::FromStr, time::Instant};
use canvas::*;
use tui::{
@ -20,12 +20,40 @@ use tui::{
};
use unicode_width::UnicodeWidthStr;
use crate::{
app::data::Values,
utils::general::{saturating_log10, saturating_log2},
};
pub const DEFAULT_LEGEND_CONSTRAINTS: (Constraint, Constraint) =
(Constraint::Ratio(1, 4), Constraint::Length(4));
/// A single graph point.
pub type Point = (f64, f64);
/// An axis bound type. Allows us to save a f64 since we know that we are
/// usually bound from some values [0.0, a], or [-b, 0.0].
#[derive(Debug, Default, Clone, Copy, PartialEq)]
pub enum AxisBound {
/// Just 0.
#[default]
Zero,
/// Bound by a minimum value to 0.
Min(f64),
/// Bound by 0 and a max value.
Max(f64),
}
impl AxisBound {
fn get_bounds(&self) -> [f64; 2] {
match self {
AxisBound::Zero => [0.0, 0.0],
AxisBound::Min(min) => [*min, 0.0],
AxisBound::Max(max) => [0.0, *max],
}
}
}
/// An X or Y axis for the [`TimeChart`] widget
#[derive(Debug, Default, Clone, PartialEq)]
pub struct Axis<'a> {
@ -33,7 +61,7 @@ pub struct Axis<'a> {
pub(crate) title: Option<Line<'a>>,
/// Bounds for the axis (all data points outside these limits will not be
/// represented)
pub(crate) bounds: [f64; 2],
pub(crate) bounds: AxisBound,
/// A list of labels to put to the left or below the axis
pub(crate) labels: Option<Vec<Span<'a>>>,
/// The style used to draw the axis itself
@ -47,9 +75,6 @@ impl<'a> Axis<'a> {
///
/// It will be displayed at the end of the axis. For an X axis this is the
/// right, for a Y axis, this is the top.
///
/// This is a fluent setter method which must be chained or used as it
/// consumes self
#[must_use = "method moves the value of self and returns the modified value"]
#[cfg_attr(not(test), expect(dead_code))]
pub fn title<T>(mut self, title: T) -> Axis<'a>
@ -60,14 +85,9 @@ impl<'a> Axis<'a> {
self
}
/// Sets the bounds of this axis
///
/// In other words, sets the min and max value on this axis.
///
/// This is a fluent setter method which must be chained or used as it
/// consumes self
/// Sets the bounds of this axis.
#[must_use = "method moves the value of self and returns the modified value"]
pub fn bounds(mut self, bounds: [f64; 2]) -> Axis<'a> {
pub fn bounds(mut self, bounds: AxisBound) -> Axis<'a> {
self.bounds = bounds;
self
}
@ -239,23 +259,28 @@ impl FromStr for LegendPosition {
}
}
#[derive(Debug, Default, Clone)]
enum Data<'a> {
Some {
times: &'a [Instant],
values: &'a Values,
},
#[default]
None,
}
/// A group of data points
///
/// This is the main element composing a [`TimeChart`].
///
/// A dataset can be [named](Dataset::name). Only named datasets will be
/// rendered in the legend.
///
/// After that, you can pass it data with [`Dataset::data`]. Data is an array of
/// `f64` tuples (`(f64, f64)`), the first element being X and the second Y.
/// It's also worth noting that, unlike the [`Rect`], here the Y axis is bottom
/// to top, as in math.
#[derive(Debug, Default, Clone, PartialEq)]
#[derive(Debug, Default, Clone)]
pub struct Dataset<'a> {
/// Name of the dataset (used in the legend if shown)
name: Option<Line<'a>>,
/// A reference to the actual data
data: &'a [(f64, f64)],
/// A reference to data.
data: Data<'a>,
/// Symbol used for each points of this dataset
marker: symbols::Marker,
/// Determines graph type used for drawing points
@ -284,8 +309,8 @@ impl<'a> Dataset<'a> {
/// element being X and the second Y. It's also worth noting that,
/// unlike the [`Rect`], here the Y axis is bottom to top, as in math.
#[must_use = "method moves the value of self and returns the modified value"]
pub fn data(mut self, data: &'a [(f64, f64)]) -> Dataset<'a> {
self.data = data;
pub fn data(mut self, times: &'a [Instant], values: &'a Values) -> Dataset<'a> {
self.data = Data::Some { times, values };
self
}
@ -297,9 +322,6 @@ impl<'a> Dataset<'a> {
///
/// Note [`Marker::Braille`] requires a font that supports Unicode Braille
/// Patterns.
///
/// This is a fluent setter method which must be chained or used as it
/// consumes self
#[must_use = "method moves the value of self and returns the modified value"]
#[expect(dead_code)]
pub fn marker(mut self, marker: symbols::Marker) -> Dataset<'a> {
@ -357,6 +379,28 @@ struct ChartLayout {
graph_area: Rect,
}
/// Whether to additionally scale all values before displaying them. Defaults to none.
#[derive(Default, Debug, Clone, Copy)]
pub(crate) enum ChartScaling {
#[default]
Linear,
Log10,
Log2,
}
impl ChartScaling {
/// Scale a value.
pub(super) fn scale(&self, value: f64) -> f64 {
// Remember to do saturating log checks as otherwise 0.0 becomes inf, and you get
// gaps!
match self {
ChartScaling::Linear => value,
ChartScaling::Log10 => saturating_log10(value),
ChartScaling::Log2 => saturating_log2(value),
}
}
}
/// A "custom" chart, just a slightly tweaked [`tui::widgets::Chart`] from
/// ratatui, but with greater control over the legend, and built with the idea
/// of drawing data points relative to a time-based x-axis.
@ -367,7 +411,7 @@ struct ChartLayout {
/// - Automatic interpolation to points that fall *just* outside of the screen.
///
/// TODO: Support for putting the legend on the left side.
#[derive(Debug, Default, Clone, PartialEq)]
#[derive(Debug, Default, Clone)]
pub struct TimeChart<'a> {
/// A block to display around the widget eventually
block: Option<Block<'a>>,
@ -383,17 +427,17 @@ pub struct TimeChart<'a> {
legend_style: Style,
/// Constraints used to determine whether the legend should be shown or not
hidden_legend_constraints: (Constraint, Constraint),
/// The position detnermine where the legenth is shown or hide regaurdless
/// The position determining whether the length is shown or hidden, regardless
/// of `hidden_legend_constraints`
legend_position: Option<LegendPosition>,
/// The marker type.
marker: Marker,
/// Whether to scale the values differently.
scaling: ChartScaling,
}
impl<'a> TimeChart<'a> {
/// Creates a chart with the given [datasets](Dataset)
///
/// A chart can render multiple datasets.
/// Creates a chart with the given [datasets](Dataset).
pub fn new(datasets: Vec<Dataset<'a>>) -> TimeChart<'a> {
TimeChart {
block: None,
@ -405,6 +449,7 @@ impl<'a> TimeChart<'a> {
hidden_legend_constraints: (Constraint::Ratio(1, 4), Constraint::Ratio(1, 4)),
legend_position: Some(LegendPosition::default()),
marker: Marker::Braille,
scaling: ChartScaling::default(),
}
}
@ -478,7 +523,7 @@ impl<'a> TimeChart<'a> {
self
}
/// Sets the position of a legend or hide it
/// Sets the position of a legend or hide it.
///
/// The default is [`LegendPosition::TopRight`].
///
@ -496,6 +541,13 @@ impl<'a> TimeChart<'a> {
self
}
/// Set chart scaling.
#[must_use = "method moves the value of self and returns the modified value"]
pub fn scaling(mut self, scaling: ChartScaling) -> TimeChart<'a> {
self.scaling = scaling;
self
}
/// Compute the internal layout of the chart given the area. If the area is
/// too small some elements may be automatically hidden
fn layout(&self, area: Rect) -> ChartLayout {
@ -695,6 +747,8 @@ impl<'a> TimeChart<'a> {
fn render_y_labels(
&self, buf: &mut Buffer, layout: &ChartLayout, chart_area: Rect, graph_area: Rect,
) {
// FIXME: Control how many y-axis labels are rendered based on height.
let Some(x) = layout.label_y else { return };
let labels = self.y_axis.labels.as_ref().unwrap();
let labels_len = labels.len() as u16;
@ -725,7 +779,7 @@ impl Widget for TimeChart<'_> {
// Sample the style of the entire widget. This sample will be used to reset the
// style of the cells that are part of the components put on top of the
// grah area (i.e legend and axis names).
// graph area (i.e legend and axis names).
let Some(original_style) = buf.cell((area.left(), area.top())).map(|cell| cell.style())
else {
return;
@ -767,10 +821,13 @@ impl Widget for TimeChart<'_> {
}
}
let x_bounds = self.x_axis.bounds.get_bounds();
let y_bounds = self.y_axis.bounds.get_bounds();
Canvas::default()
.background_color(self.style.bg.unwrap_or(Color::Reset))
.x_bounds(self.x_axis.bounds)
.y_bounds(self.y_axis.bounds)
.x_bounds(x_bounds)
.y_bounds(y_bounds)
.marker(self.marker)
.paint(|ctx| {
self.draw_points(ctx);
@ -930,6 +987,8 @@ mod tests {
};
}
use std::time::Duration;
use tui::style::{Modifier, Stylize};
use super::*;
@ -942,7 +1001,17 @@ mod tests {
#[test]
fn it_should_hide_the_legend() {
let data = [(0.0, 5.0), (1.0, 6.0), (3.0, 7.0)];
let now = Instant::now();
let times = [
now,
now.checked_add(Duration::from_secs(1)).unwrap(),
now.checked_add(Duration::from_secs(2)).unwrap(),
];
let mut values = Values::default();
values.push(5.0);
values.push(6.0);
values.push(7.0);
let cases = [
LegendTestCase {
chart_area: Rect::new(0, 0, 100, 100),
@ -959,7 +1028,7 @@ mod tests {
let datasets = (0..10)
.map(|i| {
let name = format!("Dataset #{i}");
Dataset::default().name(name).data(&data)
Dataset::default().name(name).data(&times, &values)
})
.collect::<Vec<_>>();
let chart = TimeChart::new(datasets)

View File

@ -154,7 +154,7 @@ trait Grid: Debug {
struct BrailleGrid {
width: u16,
height: u16,
cells: Vec<u16>,
cells: Vec<u16>, // FIXME: (points_rework_v1) isn't this really inefficient to go u16 -> String from utf16?
colors: Vec<Color>,
}
@ -171,14 +171,6 @@ impl BrailleGrid {
}
impl Grid for BrailleGrid {
// fn width(&self) -> u16 {
// self.width
// }
// fn height(&self) -> u16 {
// self.height
// }
fn resolution(&self) -> (f64, f64) {
(
f64::from(self.width) * 2.0 - 1.0,
@ -242,14 +234,6 @@ impl CharGrid {
}
impl Grid for CharGrid {
// fn width(&self) -> u16 {
// self.width
// }
// fn height(&self) -> u16 {
// self.height
// }
fn resolution(&self) -> (f64, f64) {
(f64::from(self.width) - 1.0, f64::from(self.height) - 1.0)
}
@ -325,14 +309,6 @@ impl HalfBlockGrid {
}
impl Grid for HalfBlockGrid {
// fn width(&self) -> u16 {
// self.width
// }
// fn height(&self) -> u16 {
// self.height
// }
fn resolution(&self) -> (f64, f64) {
(f64::from(self.width), f64::from(self.height) * 2.0)
}
@ -362,8 +338,9 @@ impl Grid for HalfBlockGrid {
// Note we implement this slightly differently to what is done in ratatui's
// repo, since their version doesn't seem to compile for me...
//
// TODO: Whenever I add this as a valid marker, make sure this works fine with
// the overriden time_chart drawing-layer-thing.
// the overridden time_chart drawing-layer-thing.
// Join the upper and lower rows, and emit a tuple vector of strings to print,
// and their colours.
@ -401,28 +378,7 @@ impl Grid for HalfBlockGrid {
}
impl Painter<'_, '_> {
/// Convert the (x, y) coordinates to location of a point on the grid
///
/// # Examples:
/// ```
/// use tui::{
/// symbols,
/// widgets::canvas::{Context, Painter},
/// };
///
/// let mut ctx = Context::new(2, 2, [1.0, 2.0], [0.0, 2.0], symbols::Marker::Braille);
/// let mut painter = Painter::from(&mut ctx);
/// let point = painter.get_point(1.0, 0.0);
/// assert_eq!(point, Some((0, 7)));
/// let point = painter.get_point(1.5, 1.0);
/// assert_eq!(point, Some((1, 3)));
/// let point = painter.get_point(0.0, 0.0);
/// assert_eq!(point, None);
/// let point = painter.get_point(2.0, 2.0);
/// assert_eq!(point, Some((3, 0)));
/// let point = painter.get_point(1.0, 2.0);
/// assert_eq!(point, Some((0, 0)));
/// ```
/// Convert the (x, y) coordinates to location of a point on the grid.
pub fn get_point(&self, x: f64, y: f64) -> Option<(usize, usize)> {
let left = self.context.x_bounds[0];
let right = self.context.x_bounds[1];
@ -441,20 +397,7 @@ impl Painter<'_, '_> {
Some((x, y))
}
/// Paint a point of the grid
///
/// # Examples:
/// ```
/// use tui::{
/// style::Color,
/// symbols,
/// widgets::canvas::{Context, Painter},
/// };
///
/// let mut ctx = Context::new(1, 1, [0.0, 2.0], [0.0, 2.0], symbols::Marker::Braille);
/// let mut painter = Painter::from(&mut ctx);
/// let cell = painter.paint(1, 3, Color::Red);
/// ```
/// Paint a point of the grid.
pub fn paint(&mut self, x: usize, y: usize, color: Color) {
self.context.grid.paint(x, y, color);
}
@ -570,24 +513,6 @@ where
/// braille patterns are used as they provide a more fine grained result
/// but you might want to use the simple dot or block instead if the
/// targeted terminal does not support those symbols.
///
/// # Examples
///
/// ```
/// # use tui::widgets::canvas::Canvas;
/// # use tui::symbols;
/// Canvas::default()
/// .marker(symbols::Marker::Braille)
/// .paint(|ctx| {});
///
/// Canvas::default()
/// .marker(symbols::Marker::Dot)
/// .paint(|ctx| {});
///
/// Canvas::default()
/// .marker(symbols::Marker::Block)
/// .paint(|ctx| {});
/// ```
pub fn marker(mut self, marker: symbols::Marker) -> Canvas<'a, F> {
self.marker = marker;
self

View File

@ -0,0 +1,128 @@
use itertools::Itertools;
use tui::{
style::Color,
widgets::{
canvas::{Line as CanvasLine, Points},
GraphType,
},
};
use super::{Context, Data, Point, TimeChart};
impl TimeChart<'_> {
pub(crate) fn draw_points(&self, ctx: &mut Context<'_>) {
// Idea is to:
// - Go over all datasets, determine *where* a point will be drawn.
// - Last point wins for what gets drawn.
// - We set _all_ points for all datasets before actually rendering.
//
// By doing this, it's a bit more efficient from my experience than looping
// over each dataset and rendering a new layer each time.
//
// See https://github.com/ClementTsang/bottom/pull/918 and
// https://github.com/ClementTsang/bottom/pull/937 for the original motivation.
//
// We also additionally do some interpolation logic because we may get caught
// missing some points when drawing, but we generally want to avoid
// jarring gaps between the edges when there's a point that is off
// screen and so a line isn't drawn (right edge generally won't have this issue
// issue but it can happen in some cases).
for dataset in &self.datasets {
let Data::Some { times, values } = dataset.data else {
continue;
};
let Some(current_time) = times.last() else {
continue;
};
let color = dataset.style.fg.unwrap_or(Color::Reset);
let left_edge = self.x_axis.bounds.get_bounds()[0];
// TODO: (points_rework_v1) Can we instead modify the range so it's based on the epoch rather than having to convert?
// TODO: (points_rework_v1) Is this efficient? Or should I prune using take_while first?
for (curr, next) in values
.iter_along_base(times)
.rev()
.map(|(&time, &val)| {
let from_start: f64 =
(current_time.duration_since(time).as_millis() as f64).floor();
// XXX: Should this be generic over dataset.graph_type instead? That would allow us to move
// transformations behind a type - however, that also means that there's some complexity added.
(-from_start, self.scaling.scale(val))
})
.tuple_windows()
{
if curr.0 == left_edge {
// The current point hits the left edge. Draw just the current point and halt.
ctx.draw(&Points {
coords: &[curr],
color,
});
break;
} else if next.0 < left_edge {
// The next point goes past the left edge. Interpolate a point + the line and halt.
let interpolated = interpolate_point(&next, &curr, left_edge);
ctx.draw(&CanvasLine {
x1: curr.0,
y1: curr.1,
x2: left_edge,
y2: interpolated,
color,
});
break;
} else {
// Draw the current point and the line to the next point.
if let GraphType::Line = dataset.graph_type {
ctx.draw(&CanvasLine {
x1: curr.0,
y1: curr.1,
x2: next.0,
y2: next.1,
color,
});
} else {
ctx.draw(&Points {
coords: &[curr],
color,
});
}
}
}
}
}
}
/// Returns the y-axis value for a given `x`, given two points to draw a line
/// between.
fn interpolate_point(older_point: &Point, newer_point: &Point, x: f64) -> f64 {
let delta_x = newer_point.0 - older_point.0;
let delta_y = newer_point.1 - older_point.1;
let slope = delta_y / delta_x;
(older_point.1 + (x - older_point.0) * slope).max(0.0)
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn time_chart_test_interpolation() {
let data = [(-3.0, 8.0), (-1.0, 6.0), (0.0, 5.0)];
assert_eq!(interpolate_point(&data[1], &data[2], 0.0), 5.0);
assert_eq!(interpolate_point(&data[1], &data[2], -0.25), 5.25);
assert_eq!(interpolate_point(&data[1], &data[2], -0.5), 5.5);
assert_eq!(interpolate_point(&data[0], &data[1], -1.0), 6.0);
assert_eq!(interpolate_point(&data[0], &data[1], -1.5), 6.5);
assert_eq!(interpolate_point(&data[0], &data[1], -2.0), 7.0);
assert_eq!(interpolate_point(&data[0], &data[1], -2.5), 7.5);
assert_eq!(interpolate_point(&data[0], &data[1], -3.0), 8.0);
}
}

View File

@ -1,4 +0,0 @@
//! Components derived from ratatui widgets.
pub mod pipe_gauge;
pub mod time_chart;

View File

@ -1,219 +0,0 @@
use tui::{
style::Color,
widgets::{
canvas::{Line as CanvasLine, Points},
GraphType,
},
};
use super::{Context, Dataset, Point, TimeChart};
use crate::utils::general::partial_ordering;
impl TimeChart<'_> {
pub(crate) fn draw_points(&self, ctx: &mut Context<'_>) {
// Idea is to:
// - Go over all datasets, determine *where* a point will be drawn.
// - Last point wins for what gets drawn.
// - We set _all_ points for all datasets before actually rendering.
//
// By doing this, it's a bit more efficient from my experience than looping
// over each dataset and rendering a new layer each time.
//
// See <https://github.com/ClementTsang/bottom/pull/918> and <https://github.com/ClementTsang/bottom/pull/937>
// for the original motivation.
//
// We also additionally do some interpolation logic because we may get caught
// missing some points when drawing, but we generally want to avoid
// jarring gaps between the edges when there's a point that is off
// screen and so a line isn't drawn (right edge generally won't have this issue
// issue but it can happen in some cases).
for dataset in &self.datasets {
let color = dataset.style.fg.unwrap_or(Color::Reset);
let start_bound = self.x_axis.bounds[0];
let end_bound = self.x_axis.bounds[1];
let (start_index, interpolate_start) = get_start(dataset, start_bound);
let (end_index, interpolate_end) = get_end(dataset, end_bound);
let data_slice = &dataset.data[start_index..end_index];
if let Some(interpolate_start) = interpolate_start {
if let (Some(older_point), Some(newer_point)) = (
dataset.data.get(interpolate_start),
dataset.data.get(interpolate_start + 1),
) {
let interpolated_point = (
self.x_axis.bounds[0],
interpolate_point(older_point, newer_point, self.x_axis.bounds[0]),
);
if let GraphType::Line = dataset.graph_type {
ctx.draw(&CanvasLine {
x1: interpolated_point.0,
y1: interpolated_point.1,
x2: newer_point.0,
y2: newer_point.1,
color,
});
} else {
ctx.draw(&Points {
coords: &[interpolated_point],
color,
});
}
}
}
if let GraphType::Line = dataset.graph_type {
for data in data_slice.windows(2) {
ctx.draw(&CanvasLine {
x1: data[0].0,
y1: data[0].1,
x2: data[1].0,
y2: data[1].1,
color,
});
}
} else {
ctx.draw(&Points {
coords: data_slice,
color,
});
}
if let Some(interpolate_end) = interpolate_end {
if let (Some(older_point), Some(newer_point)) = (
dataset.data.get(interpolate_end - 1),
dataset.data.get(interpolate_end),
) {
let interpolated_point = (
self.x_axis.bounds[1],
interpolate_point(older_point, newer_point, self.x_axis.bounds[1]),
);
if let GraphType::Line = dataset.graph_type {
ctx.draw(&CanvasLine {
x1: older_point.0,
y1: older_point.1,
x2: interpolated_point.0,
y2: interpolated_point.1,
color,
});
} else {
ctx.draw(&Points {
coords: &[interpolated_point],
color,
});
}
}
}
}
}
}
/// Returns the start index and potential interpolation index given the start
/// time and the dataset.
fn get_start(dataset: &Dataset<'_>, start_bound: f64) -> (usize, Option<usize>) {
match dataset
.data
.binary_search_by(|(x, _y)| partial_ordering(x, &start_bound))
{
Ok(index) => (index, None),
Err(index) => (index, index.checked_sub(1)),
}
}
/// Returns the end position and potential interpolation index given the end
/// time and the dataset.
fn get_end(dataset: &Dataset<'_>, end_bound: f64) -> (usize, Option<usize>) {
match dataset
.data
.binary_search_by(|(x, _y)| partial_ordering(x, &end_bound))
{
// In the success case, this means we found an index. Add one since we want to include this
// index and we expect to use the returned index as part of a (m..n) range.
Ok(index) => (index.saturating_add(1), None),
// In the fail case, this means we did not find an index, and the returned index is where
// one would *insert* the location. This index is where one would insert to fit
// inside the dataset - and since this is an end bound, index is, in a sense,
// already "+1" for our range later.
Err(index) => (index, {
let sum = index.checked_add(1);
match sum {
Some(s) if s < dataset.data.len() => sum,
_ => None,
}
}),
}
}
/// Returns the y-axis value for a given `x`, given two points to draw a line
/// between.
fn interpolate_point(older_point: &Point, newer_point: &Point, x: f64) -> f64 {
let delta_x = newer_point.0 - older_point.0;
let delta_y = newer_point.1 - older_point.1;
let slope = delta_y / delta_x;
(older_point.1 + (x - older_point.0) * slope).max(0.0)
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn time_chart_test_interpolation() {
let data = [(-3.0, 8.0), (-1.0, 6.0), (0.0, 5.0)];
assert_eq!(interpolate_point(&data[1], &data[2], 0.0), 5.0);
assert_eq!(interpolate_point(&data[1], &data[2], -0.25), 5.25);
assert_eq!(interpolate_point(&data[1], &data[2], -0.5), 5.5);
assert_eq!(interpolate_point(&data[0], &data[1], -1.0), 6.0);
assert_eq!(interpolate_point(&data[0], &data[1], -1.5), 6.5);
assert_eq!(interpolate_point(&data[0], &data[1], -2.0), 7.0);
assert_eq!(interpolate_point(&data[0], &data[1], -2.5), 7.5);
assert_eq!(interpolate_point(&data[0], &data[1], -3.0), 8.0);
}
#[test]
fn time_chart_empty_dataset() {
let data = [];
let dataset = Dataset::default().data(&data);
assert_eq!(get_start(&dataset, -100.0), (0, None));
assert_eq!(get_start(&dataset, -3.0), (0, None));
assert_eq!(get_end(&dataset, 0.0), (0, None));
assert_eq!(get_end(&dataset, 100.0), (0, None));
}
#[test]
fn time_chart_test_data_trimming() {
let data = [
(-3.0, 8.0),
(-2.5, 15.0),
(-2.0, 9.0),
(-1.0, 6.0),
(0.0, 5.0),
];
let dataset = Dataset::default().data(&data);
// Test start point cases (miss and hit)
assert_eq!(get_start(&dataset, -100.0), (0, None));
assert_eq!(get_start(&dataset, -3.0), (0, None));
assert_eq!(get_start(&dataset, -2.8), (1, Some(0)));
assert_eq!(get_start(&dataset, -2.5), (1, None));
assert_eq!(get_start(&dataset, -2.4), (2, Some(1)));
// Test end point cases (miss and hit)
assert_eq!(get_end(&dataset, -2.5), (2, None));
assert_eq!(get_end(&dataset, -2.4), (2, Some(3)));
assert_eq!(get_end(&dataset, -1.4), (3, Some(4)));
assert_eq!(get_end(&dataset, -1.0), (4, None));
assert_eq!(get_end(&dataset, 0.0), (5, None));
assert_eq!(get_end(&dataset, 1.0), (5, None));
assert_eq!(get_end(&dataset, 100.0), (5, None));
}
}

View File

@ -11,8 +11,8 @@ use unicode_width::UnicodeWidthStr;
use crate::{
app::App,
canvas::{drawing_utils::widget_block, Painter},
collection::batteries::BatteryState,
constants::*,
data_collection::batteries::BatteryState,
};
/// Calculate how many bars are to be drawn within basic mode's components.
@ -65,10 +65,9 @@ impl Painter {
block
};
if app_state.data_collection.battery_harvest.len() > 1 {
let battery_names = app_state
.data_collection
.battery_harvest
let battery_harvest = &(app_state.data_store.get_data().battery_harvest);
if battery_harvest.len() > 1 {
let battery_names = battery_harvest
.iter()
.enumerate()
.map(|(itx, _)| format!("Battery {itx}"))
@ -124,10 +123,8 @@ impl Painter {
.direction(Direction::Horizontal)
.split(draw_loc)[0];
if let Some(battery_details) = app_state
.data_collection
.battery_harvest
.get(battery_widget_state.currently_selected_battery_index)
if let Some(battery_details) =
battery_harvest.get(battery_widget_state.currently_selected_battery_index)
{
let full_width = draw_loc.width.saturating_sub(2);
let bar_length = usize::from(full_width.saturating_sub(6));
@ -202,7 +199,7 @@ impl Painter {
battery_rows.push(Row::new(["Health", &health]).style(self.styles.text_style));
let header = if app_state.data_collection.battery_harvest.len() > 1 {
let header = if battery_harvest.len() > 1 {
Row::new([""]).bottom_margin(table_gap)
} else {
Row::default()

View File

@ -1,5 +1,6 @@
use std::cmp::min;
use itertools::{Either, Itertools};
use tui::{
layout::{Constraint, Direction, Layout, Rect},
Frame,
@ -12,8 +13,7 @@ use crate::{
drawing_utils::widget_block,
Painter,
},
data_collection::cpu::CpuDataType,
data_conversion::CpuWidgetData,
collection::cpu::{CpuData, CpuDataType},
};
impl Painter {
@ -21,33 +21,34 @@ impl Painter {
pub fn draw_basic_cpu(
&self, f: &mut Frame<'_>, app_state: &mut App, mut draw_loc: Rect, widget_id: u64,
) {
// Skip the first element, it's the "all" element
if app_state.converted_data.cpu_data.len() > 1 {
let cpu_data: &[CpuWidgetData] = &app_state.converted_data.cpu_data[1..];
let cpu_data = &app_state.data_store.get_data().cpu_harvest;
// This is a bit complicated, but basically, we want to draw SOME number
// of columns to draw all CPUs. Ideally, as well, we want to not have
// to ever scroll.
// **General logic** - count number of elements in cpu_data. Then see how
// many rows and columns we have in draw_loc (-2 on both sides for border?).
// I think what we can do is try to fit in as many in one column as possible.
// If not, then add a new column.
// Then, from this, split the row space across ALL columns. From there,
// generate the desired lengths.
// This is a bit complicated, but basically, we want to draw SOME number
// of columns to draw all CPUs. Ideally, as well, we want to not have
// to ever scroll.
//
// **General logic** - count number of elements in cpu_data. Then see how
// many rows and columns we have in draw_loc (-2 on both sides for border?).
// I think what we can do is try to fit in as many in one column as possible.
// If not, then add a new column. Then, from this, split the row space across ALL columns.
// From there, generate the desired lengths.
if app_state.current_widget.widget_id == widget_id {
f.render_widget(
widget_block(true, true, self.styles.border_type)
.border_style(self.styles.highlighted_border_style),
draw_loc,
);
}
if app_state.current_widget.widget_id == widget_id {
f.render_widget(
widget_block(true, true, self.styles.border_type)
.border_style(self.styles.highlighted_border_style),
draw_loc,
);
}
let (cpu_data, avg_data) =
maybe_split_avg(cpu_data, app_state.app_config_fields.dedicated_average_row);
if let Some(avg) = avg_data {
let (outer, inner, ratio, style) = self.cpu_info(&avg);
// TODO: This is pretty ugly. Is there a better way of doing it?
let mut cpu_iter = Either::Right(cpu_data.iter());
if app_state.app_config_fields.dedicated_average_row {
if let Some((index, avg)) = cpu_data
.iter()
.find_position(|&datum| matches!(datum.data_type, CpuDataType::Avg))
{
let (outer, inner, ratio, style) = self.cpu_info(avg);
let [cores_loc, mut avg_loc] =
Layout::vertical([Constraint::Min(0), Constraint::Length(1)]).areas(draw_loc);
@ -67,67 +68,66 @@ impl Painter {
);
draw_loc = cores_loc;
cpu_iter = Either::Left(cpu_data.iter().skip(index));
}
}
if draw_loc.height > 0 {
let remaining_height = usize::from(draw_loc.height);
const REQUIRED_COLUMNS: usize = 4;
if draw_loc.height > 0 {
let remaining_height = usize::from(draw_loc.height);
const REQUIRED_COLUMNS: usize = 4;
let col_constraints =
vec![Constraint::Percentage((100 / REQUIRED_COLUMNS) as u16); REQUIRED_COLUMNS];
let columns = Layout::default()
.constraints(col_constraints)
.direction(Direction::Horizontal)
.split(draw_loc);
let col_constraints =
vec![Constraint::Percentage((100 / REQUIRED_COLUMNS) as u16); REQUIRED_COLUMNS];
let columns = Layout::default()
.constraints(col_constraints)
.direction(Direction::Horizontal)
.split(draw_loc);
let mut gauge_info = cpu_data.iter().map(|cpu| self.cpu_info(cpu));
let mut gauge_info = cpu_iter.map(|cpu| self.cpu_info(cpu));
// Very ugly way to sync the gauge limit across all gauges.
let hide_parts = columns
.first()
.map(|col| {
if col.width >= 12 {
LabelLimit::None
} else if col.width >= 10 {
LabelLimit::Bars
} else {
LabelLimit::StartLabel
}
})
.unwrap_or_default();
// Very ugly way to sync the gauge limit across all gauges.
let hide_parts = columns
.first()
.map(|col| {
if col.width >= 12 {
LabelLimit::None
} else if col.width >= 10 {
LabelLimit::Bars
} else {
LabelLimit::StartLabel
}
})
.unwrap_or_default();
let num_entries = cpu_data.len();
let mut row_counter = num_entries;
for (itx, column) in columns.iter().enumerate() {
if REQUIRED_COLUMNS > itx {
let to_divide = REQUIRED_COLUMNS - itx;
let num_taken = min(
remaining_height,
(row_counter / to_divide) + usize::from(row_counter % to_divide != 0),
let num_entries = cpu_data.len();
let mut row_counter = num_entries;
for (itx, column) in columns.iter().enumerate() {
if REQUIRED_COLUMNS > itx {
let to_divide = REQUIRED_COLUMNS - itx;
let num_taken = min(
remaining_height,
(row_counter / to_divide) + usize::from(row_counter % to_divide != 0),
);
row_counter -= num_taken;
let chunk = (&mut gauge_info).take(num_taken);
let rows = Layout::default()
.direction(Direction::Vertical)
.constraints(vec![Constraint::Length(1); remaining_height])
.horizontal_margin(1)
.split(*column);
for ((start_label, inner_label, ratio, style), row) in chunk.zip(rows.iter()) {
f.render_widget(
PipeGauge::default()
.gauge_style(style)
.label_style(style)
.inner_label(inner_label)
.start_label(start_label)
.ratio(ratio)
.hide_parts(hide_parts),
*row,
);
row_counter -= num_taken;
let chunk = (&mut gauge_info).take(num_taken);
let rows = Layout::default()
.direction(Direction::Vertical)
.constraints(vec![Constraint::Length(1); remaining_height])
.horizontal_margin(1)
.split(*column);
for ((start_label, inner_label, ratio, style), row) in
chunk.zip(rows.iter())
{
f.render_widget(
PipeGauge::default()
.gauge_style(style)
.label_style(style)
.inner_label(inner_label)
.start_label(start_label)
.ratio(ratio)
.hide_parts(hide_parts),
*row,
);
}
}
}
}
@ -143,63 +143,19 @@ impl Painter {
}
}
fn cpu_info(&self, cpu: &CpuWidgetData) -> (String, String, f64, tui::style::Style) {
let CpuWidgetData::Entry {
data_type,
last_entry,
..
} = cpu
else {
unreachable!()
};
let (outer, style) = match data_type {
#[inline]
fn cpu_info(&self, data: &CpuData) -> (String, String, f64, tui::style::Style) {
let (outer, style) = match data.data_type {
CpuDataType::Avg => ("AVG".to_string(), self.styles.avg_cpu_colour),
CpuDataType::Cpu(index) => (
format!("{index:<3}",),
self.styles.cpu_colour_styles[index % self.styles.cpu_colour_styles.len()],
),
};
let inner = format!("{:>3.0}%", last_entry.round());
let ratio = last_entry / 100.0;
let inner = format!("{:>3.0}%", data.cpu_usage.round());
let ratio = data.cpu_usage / 100.0;
(outer, inner, ratio, style)
}
}
fn maybe_split_avg(
data: &[CpuWidgetData], separate_avg: bool,
) -> (Vec<CpuWidgetData>, Option<CpuWidgetData>) {
let mut cpu_data = vec![];
let mut avg_data = None;
for cpu in data {
let CpuWidgetData::Entry {
data_type,
data,
last_entry,
} = cpu
else {
unreachable!()
};
match data_type {
CpuDataType::Avg if separate_avg => {
avg_data = Some(CpuWidgetData::Entry {
data_type: *data_type,
data: data.clone(),
last_entry: *last_entry,
});
}
_ => {
cpu_data.push(CpuWidgetData::Entry {
data_type: *data_type,
data: data.clone(),
last_entry: *last_entry,
});
}
}
}
(cpu_data, avg_data)
}

View File

@ -7,16 +7,16 @@ use tui::{
};
use crate::{
app::{layout_manager::WidgetDirection, App},
app::{data::StoredData, layout_manager::WidgetDirection, App},
canvas::{
components::{
data_table::{DrawInfo, SelectionState},
time_graph::{GraphData, TimeGraph},
time_graph::{AxisBound, GraphData, TimeGraph},
},
drawing_utils::should_hide_x_label,
Painter,
},
data_conversion::CpuWidgetData,
collection::cpu::CpuData,
widgets::CpuWidgetState,
};
@ -120,56 +120,48 @@ impl Painter {
}
fn generate_points<'a>(
&self, cpu_widget_state: &CpuWidgetState, cpu_data: &'a [CpuWidgetData], show_avg_cpu: bool,
&self, cpu_widget_state: &'a mut CpuWidgetState, data: &'a StoredData, show_avg_cpu: bool,
) -> Vec<GraphData<'a>> {
let show_avg_offset = if show_avg_cpu { AVG_POSITION } else { 0 };
let current_scroll_position = cpu_widget_state.table.state.current_index;
let cpu_entries = &data.cpu_harvest;
let cpu_points = &data.timeseries_data.cpu;
let time = &data.timeseries_data.time;
if current_scroll_position == ALL_POSITION {
// This case ensures the other cases cannot have the position be equal to 0.
cpu_data
cpu_points
.iter()
.enumerate()
.rev()
.filter_map(|(itx, cpu)| {
match &cpu {
CpuWidgetData::All => None,
CpuWidgetData::Entry { data, .. } => {
let style = if show_avg_cpu && itx == AVG_POSITION {
self.styles.avg_cpu_colour
} else if itx == ALL_POSITION {
self.styles.all_cpu_colour
} else {
let offset_position = itx - 1; // Because of the all position
self.styles.cpu_colour_styles[(offset_position - show_avg_offset)
% self.styles.cpu_colour_styles.len()]
};
.map(|(itx, values)| {
let style = if show_avg_cpu && itx == AVG_POSITION {
self.styles.avg_cpu_colour
} else if itx == ALL_POSITION {
self.styles.all_cpu_colour
} else {
self.styles.cpu_colour_styles
[(itx - show_avg_offset) % self.styles.cpu_colour_styles.len()]
};
Some(GraphData {
points: &data[..],
style,
name: None,
})
}
}
GraphData::default().style(style).time(time).values(values)
})
.collect::<Vec<_>>()
} else if let Some(CpuWidgetData::Entry { data, .. }) =
cpu_data.get(current_scroll_position)
{
.collect()
} else if let Some(CpuData { .. }) = cpu_entries.get(current_scroll_position - 1) {
// We generally subtract one from current scroll position because of the all entry.
let style = if show_avg_cpu && current_scroll_position == AVG_POSITION {
self.styles.avg_cpu_colour
} else {
let offset_position = current_scroll_position - 1; // Because of the all position
let offset_position = current_scroll_position - 1;
self.styles.cpu_colour_styles
[(offset_position - show_avg_offset) % self.styles.cpu_colour_styles.len()]
};
vec![GraphData {
points: &data[..],
style,
name: None,
}]
vec![GraphData::default()
.style(style)
.time(time)
.values(&cpu_points[current_scroll_position - 1])]
} else {
vec![]
}
@ -178,14 +170,15 @@ impl Painter {
fn draw_cpu_graph(
&self, f: &mut Frame<'_>, app_state: &mut App, draw_loc: Rect, widget_id: u64,
) {
const Y_BOUNDS: [f64; 2] = [0.0, 100.5];
const Y_BOUNDS: AxisBound = AxisBound::Max(100.5);
const Y_LABELS: [Cow<'static, str>; 2] = [Cow::Borrowed(" 0%"), Cow::Borrowed("100%")];
if let Some(cpu_widget_state) = app_state.states.cpu_state.widget_states.get_mut(&widget_id)
{
let cpu_data = &app_state.converted_data.cpu_data;
let data = app_state.data_store.get_data();
let border_style = self.get_border_style(widget_id, app_state.current_widget.widget_id);
let x_bounds = [0, cpu_widget_state.current_display_time];
let x_min = -(cpu_widget_state.current_display_time as f64);
let hide_x_labels = should_hide_x_label(
app_state.app_config_fields.hide_time,
app_state.app_config_fields.autohide_time,
@ -193,9 +186,9 @@ impl Painter {
draw_loc,
);
let points = self.generate_points(
let graph_data = self.generate_points(
cpu_widget_state,
cpu_data,
data,
app_state.app_config_fields.show_average_cpu,
);
@ -203,7 +196,7 @@ impl Painter {
let title = {
#[cfg(target_family = "unix")]
{
let load_avg = app_state.converted_data.load_avg_data;
let load_avg = &data.load_avg_harvest;
let load_avg_str = format!(
"─ {:.2} {:.2} {:.2} ",
load_avg[0], load_avg[1], load_avg[2]
@ -224,7 +217,7 @@ impl Painter {
};
TimeGraph {
x_bounds,
x_min,
hide_x_labels,
y_bounds: Y_BOUNDS,
y_labels: &Y_LABELS,
@ -238,8 +231,9 @@ impl Painter {
legend_position: None,
legend_constraints: None,
marker,
scaling: Default::default(),
}
.draw_time_graph(f, draw_loc, &points);
.draw_time_graph(f, draw_loc, graph_data);
}
}

View File

@ -1,3 +1,5 @@
use std::borrow::Cow;
use tui::{
layout::{Constraint, Direction, Layout, Rect},
Frame,
@ -6,13 +8,48 @@ use tui::{
use crate::{
app::App,
canvas::{components::pipe_gauge::PipeGauge, drawing_utils::widget_block, Painter},
collection::memory::MemHarvest,
get_binary_unit_and_denominator,
};
/// Convert memory info into a string representing a fraction.
#[inline]
fn memory_fraction_label(data: &MemHarvest) -> Cow<'static, str> {
if data.total_bytes > 0 {
let (unit, denominator) = get_binary_unit_and_denominator(data.total_bytes);
let used = data.used_bytes as f64 / denominator;
let total = data.total_bytes as f64 / denominator;
format!("{used:.1}{unit}/{total:.1}{unit}").into()
} else {
"0.0B/0.0B".into()
}
}
/// Convert memory info into a string representing a percentage.
#[inline]
fn memory_percentage_label(data: &MemHarvest) -> Cow<'static, str> {
if data.total_bytes > 0 {
let percentage = data.used_bytes as f64 / data.total_bytes as f64 * 100.0;
format!("{percentage:3.0}%").into()
} else {
" 0%".into()
}
}
#[inline]
fn memory_label(data: &MemHarvest, is_percentage: bool) -> Cow<'static, str> {
if is_percentage {
memory_percentage_label(data)
} else {
memory_fraction_label(data)
}
}
impl Painter {
pub fn draw_basic_memory(
&self, f: &mut Frame<'_>, app_state: &mut App, draw_loc: Rect, widget_id: u64,
) {
let mem_data = &app_state.converted_data.mem_data;
let mut draw_widgets: Vec<PipeGauge<'_>> = Vec::new();
if app_state.current_widget.widget_id == widget_id {
@ -23,50 +60,41 @@ impl Painter {
);
}
let ram_percentage = if let Some(mem) = mem_data.last() {
mem.1
} else {
0.0
};
let data = app_state.data_store.get_data();
const EMPTY_MEMORY_FRAC_STRING: &str = "0.0B/0.0B";
let memory_fraction_label =
if let Some((_, label_frac)) = &app_state.converted_data.mem_labels {
if app_state.basic_mode_use_percent {
format!("{:3.0}%", ram_percentage.round())
} else {
label_frac.trim().to_string()
}
} else {
EMPTY_MEMORY_FRAC_STRING.to_string()
};
let ram_percentage = data.ram_harvest.saturating_percentage();
let ram_label = memory_label(&data.ram_harvest, app_state.basic_mode_use_percent);
draw_widgets.push(
PipeGauge::default()
.ratio(ram_percentage / 100.0)
.start_label("RAM")
.inner_label(memory_fraction_label)
.inner_label(ram_label)
.label_style(self.styles.ram_style)
.gauge_style(self.styles.ram_style),
);
if let Some(swap_harvest) = &data.swap_harvest {
let swap_percentage = swap_harvest.saturating_percentage();
let swap_label = memory_label(swap_harvest, app_state.basic_mode_use_percent);
draw_widgets.push(
PipeGauge::default()
.ratio(swap_percentage / 100.0)
.start_label("SWP")
.inner_label(swap_label)
.label_style(self.styles.swap_style)
.gauge_style(self.styles.swap_style),
);
}
#[cfg(not(target_os = "windows"))]
{
if let Some((_, label_frac)) = &app_state.converted_data.cache_labels {
let cache_data = &app_state.converted_data.cache_data;
if let Some(cache_harvest) = &data.cache_harvest {
let cache_percentage = cache_harvest.saturating_percentage();
let cache_fraction_label =
memory_label(cache_harvest, app_state.basic_mode_use_percent);
let cache_percentage = if let Some(cache) = cache_data.last() {
cache.1
} else {
0.0
};
let cache_fraction_label = if app_state.basic_mode_use_percent {
format!("{:3.0}%", cache_percentage.round())
} else {
label_frac.trim().to_string()
};
draw_widgets.push(
PipeGauge::default()
.ratio(cache_percentage / 100.0)
@ -78,44 +106,13 @@ impl Painter {
}
}
let swap_data = &app_state.converted_data.swap_data;
let swap_percentage = if let Some(swap) = swap_data.last() {
swap.1
} else {
0.0
};
if let Some((_, label_frac)) = &app_state.converted_data.swap_labels {
let swap_fraction_label = if app_state.basic_mode_use_percent {
format!("{:3.0}%", swap_percentage.round())
} else {
label_frac.trim().to_string()
};
draw_widgets.push(
PipeGauge::default()
.ratio(swap_percentage / 100.0)
.start_label("SWP")
.inner_label(swap_fraction_label)
.label_style(self.styles.swap_style)
.gauge_style(self.styles.swap_style),
);
}
#[cfg(feature = "zfs")]
{
let arc_data = &app_state.converted_data.arc_data;
let arc_percentage = if let Some(arc) = arc_data.last() {
arc.1
} else {
0.0
};
if let Some((_, label_frac)) = &app_state.converted_data.arc_labels {
let arc_fraction_label = if app_state.basic_mode_use_percent {
format!("{:3.0}%", arc_percentage.round())
} else {
label_frac.trim().to_string()
};
if let Some(arc_harvest) = &data.arc_harvest {
let arc_percentage = arc_harvest.saturating_percentage();
let arc_fraction_label =
memory_label(arc_harvest, app_state.basic_mode_use_percent);
draw_widgets.push(
PipeGauge::default()
.ratio(arc_percentage / 100.0)
@ -129,45 +126,32 @@ impl Painter {
#[cfg(feature = "gpu")]
{
if let Some(gpu_data) = &app_state.converted_data.gpu_data {
let gpu_styles = &self.styles.gpu_colours;
let mut color_index = 0;
let gpu_styles = &self.styles.gpu_colours;
let mut colour_index = 0;
gpu_data.iter().for_each(|gpu_data_vec| {
let gpu_data = gpu_data_vec.points.as_slice();
let gpu_percentage = if let Some(gpu) = gpu_data.last() {
gpu.1
for (_, harvest) in data.gpu_harvest.iter() {
let percentage = harvest.saturating_percentage();
let label = memory_label(harvest, app_state.basic_mode_use_percent);
let style = {
if gpu_styles.is_empty() {
tui::style::Style::default()
} else {
0.0
};
let trimmed_gpu_frac = {
if app_state.basic_mode_use_percent {
format!("{:3.0}%", gpu_percentage.round())
} else {
gpu_data_vec.mem_total.trim().to_string()
}
};
let style = {
if gpu_styles.is_empty() {
tui::style::Style::default()
} else if color_index >= gpu_styles.len() {
// cycle styles
color_index = 1;
gpu_styles[color_index - 1]
} else {
color_index += 1;
gpu_styles[color_index - 1]
}
};
draw_widgets.push(
PipeGauge::default()
.ratio(gpu_percentage / 100.0)
.start_label("GPU")
.inner_label(trimmed_gpu_frac)
.label_style(style)
.gauge_style(style),
);
});
let colour = gpu_styles[colour_index % gpu_styles.len()];
colour_index += 1;
colour
}
};
draw_widgets.push(
PipeGauge::default()
.ratio(percentage / 100.0)
.start_label("GPU")
.inner_label(label)
.label_style(style)
.gauge_style(style),
);
}
}

View File

@ -1,116 +1,171 @@
use std::borrow::Cow;
use std::{borrow::Cow, time::Instant};
use tui::{
layout::{Constraint, Rect},
style::Style,
symbols::Marker,
Frame,
};
use crate::{
app::App,
app::{data::Values, App},
canvas::{
components::time_graph::{GraphData, TimeGraph},
components::time_graph::{AxisBound, GraphData, TimeGraph},
drawing_utils::should_hide_x_label,
Painter,
},
collection::memory::MemHarvest,
get_binary_unit_and_denominator,
};
/// Convert memory info into a combined memory label.
#[inline]
fn memory_legend_label(name: &str, data: Option<&MemHarvest>) -> String {
if let Some(data) = data {
if data.total_bytes > 0 {
let percentage = data.used_bytes as f64 / data.total_bytes as f64 * 100.0;
let (unit, denominator) = get_binary_unit_and_denominator(data.total_bytes);
let used = data.used_bytes as f64 / denominator;
let total = data.total_bytes as f64 / denominator;
format!("{name}:{percentage:3.0}% {used:.1}{unit}/{total:.1}{unit}")
} else {
format!("{name}: 0% 0.0B/0.0B")
}
} else {
format!("{name}: 0% 0.0B/0.0B")
}
}
/// Get graph data.
#[inline]
fn graph_data<'a>(
out: &mut Vec<GraphData<'a>>, name: &str, last_harvest: Option<&'a MemHarvest>,
time: &'a [Instant], values: &'a Values, style: Style,
) {
if !values.no_elements() {
let label = memory_legend_label(name, last_harvest).into();
out.push(
GraphData::default()
.name(label)
.time(time)
.values(values)
.style(style),
);
}
}
impl Painter {
pub fn draw_memory_graph(
&self, f: &mut Frame<'_>, app_state: &mut App, draw_loc: Rect, widget_id: u64,
) {
const Y_BOUNDS: [f64; 2] = [0.0, 100.5];
const Y_BOUNDS: AxisBound = AxisBound::Max(100.5);
const Y_LABELS: [Cow<'static, str>; 2] = [Cow::Borrowed(" 0%"), Cow::Borrowed("100%")];
if let Some(mem_widget_state) = app_state.states.mem_state.widget_states.get_mut(&widget_id)
{
if let Some(mem_state) = app_state.states.mem_state.widget_states.get_mut(&widget_id) {
let border_style = self.get_border_style(widget_id, app_state.current_widget.widget_id);
let x_bounds = [0, mem_widget_state.current_display_time];
let x_min = -(mem_state.current_display_time as f64);
let hide_x_labels = should_hide_x_label(
app_state.app_config_fields.hide_time,
app_state.app_config_fields.autohide_time,
&mut mem_widget_state.autohide_timer,
&mut mem_state.autohide_timer,
draw_loc,
);
let points = {
let graph_data = {
let mut size = 1;
if app_state.converted_data.swap_labels.is_some() {
let data = app_state.data_store.get_data();
// TODO: is this optimization really needed...? This just pre-allocates a vec, but it'll probably never
// be that big...
if data.swap_harvest.is_some() {
size += 1; // add capacity for SWAP
}
#[cfg(feature = "zfs")]
{
if app_state.converted_data.arc_labels.is_some() {
if data.arc_harvest.is_some() {
size += 1; // add capacity for ARC
}
}
#[cfg(feature = "gpu")]
{
if let Some(gpu_data) = &app_state.converted_data.gpu_data {
size += gpu_data.len(); // add row(s) for gpu
}
size += data.gpu_harvest.len(); // add row(s) for gpu
}
let mut points = Vec::with_capacity(size);
if let Some((label_percent, label_frac)) = &app_state.converted_data.mem_labels {
let mem_label = format!("RAM:{label_percent}{label_frac}");
points.push(GraphData {
points: &app_state.converted_data.mem_data,
style: self.styles.ram_style,
name: Some(mem_label.into()),
});
}
let timeseries = &data.timeseries_data;
let time = &timeseries.time;
graph_data(
&mut points,
"RAM",
Some(&data.ram_harvest),
time,
&timeseries.ram,
self.styles.ram_style,
);
graph_data(
&mut points,
"SWP",
data.swap_harvest.as_ref(),
time,
&timeseries.swap,
self.styles.swap_style,
);
#[cfg(not(target_os = "windows"))]
if let Some((label_percent, label_frac)) = &app_state.converted_data.cache_labels {
let cache_label = format!("CHE:{label_percent}{label_frac}");
points.push(GraphData {
points: &app_state.converted_data.cache_data,
style: self.styles.cache_style,
name: Some(cache_label.into()),
});
}
if let Some((label_percent, label_frac)) = &app_state.converted_data.swap_labels {
let swap_label = format!("SWP:{label_percent}{label_frac}");
points.push(GraphData {
points: &app_state.converted_data.swap_data,
style: self.styles.swap_style,
name: Some(swap_label.into()),
});
{
graph_data(
&mut points,
"CACHE", // TODO: Figure out how to line this up better
data.cache_harvest.as_ref(),
time,
&timeseries.cache_mem,
self.styles.cache_style,
);
}
#[cfg(feature = "zfs")]
if let Some((label_percent, label_frac)) = &app_state.converted_data.arc_labels {
let arc_label = format!("ARC:{label_percent}{label_frac}");
points.push(GraphData {
points: &app_state.converted_data.arc_data,
style: self.styles.arc_style,
name: Some(arc_label.into()),
});
{
graph_data(
&mut points,
"ARC",
data.arc_harvest.as_ref(),
time,
&timeseries.arc_mem,
self.styles.arc_style,
);
}
#[cfg(feature = "gpu")]
{
if let Some(gpu_data) = &app_state.converted_data.gpu_data {
let mut color_index = 0;
let gpu_styles = &self.styles.gpu_colours;
gpu_data.iter().for_each(|gpu| {
let gpu_label =
format!("{}:{}{}", gpu.name, gpu.mem_percent, gpu.mem_total);
let mut colour_index = 0;
let gpu_styles = &self.styles.gpu_colours;
for (name, harvest) in &data.gpu_harvest {
if let Some(gpu_data) = data.timeseries_data.gpu_mem.get(name) {
let style = {
if gpu_styles.is_empty() {
tui::style::Style::default()
} else if color_index >= gpu_styles.len() {
// cycle styles
color_index = 1;
gpu_styles[color_index - 1]
Style::default()
} else {
color_index += 1;
gpu_styles[color_index - 1]
let colour = gpu_styles[colour_index % gpu_styles.len()];
colour_index += 1;
colour
}
};
points.push(GraphData {
points: gpu.points.as_slice(),
graph_data(
&mut points,
name, // TODO: REALLY figure out how to line this up better
Some(harvest),
time,
gpu_data,
style,
name: Some(gpu_label.into()),
});
});
);
}
}
}
@ -124,7 +179,7 @@ impl Painter {
};
TimeGraph {
x_bounds,
x_min,
hide_x_labels,
y_bounds: Y_BOUNDS,
y_labels: &Y_LABELS,
@ -138,8 +193,9 @@ impl Painter {
legend_position: app_state.app_config_fields.memory_legend_position,
legend_constraints: Some((Constraint::Ratio(3, 4), Constraint::Ratio(3, 4))),
marker,
scaling: Default::default(),
}
.draw_time_graph(f, draw_loc, &points);
.draw_time_graph(f, draw_loc, graph_data);
}
if app_state.should_get_widget_bounds() {

View File

@ -8,6 +8,7 @@ use tui::{
use crate::{
app::App,
canvas::{drawing_utils::widget_block, Painter},
utils::data_units::{convert_bits, get_unit_prefix},
};
impl Painter {
@ -39,10 +40,17 @@ impl Painter {
);
}
let rx_label = format!("RX: {}", app_state.converted_data.rx_display);
let tx_label = format!("TX: {}", app_state.converted_data.tx_display);
let total_rx_label = format!("Total RX: {}", app_state.converted_data.total_rx_display);
let total_tx_label = format!("Total TX: {}", app_state.converted_data.total_tx_display);
let use_binary_prefix = app_state.app_config_fields.network_use_binary_prefix;
let network_data = &(app_state.data_store.get_data().network_harvest);
let rx = get_unit_prefix(network_data.rx, use_binary_prefix);
let tx = get_unit_prefix(network_data.tx, use_binary_prefix);
let total_rx = convert_bits(network_data.total_rx, use_binary_prefix);
let total_tx = convert_bits(network_data.total_tx, use_binary_prefix);
let rx_label = format!("RX: {:.1}{}", rx.0, rx.1);
let tx_label = format!("TX: {:.1}{}", tx.0, tx.1);
let total_rx_label = format!("Total RX: {:.1}{}", total_rx.0, total_rx.1);
let total_tx_label = format!("Total TX: {:.1}{}", total_tx.0, total_tx.1);
let net_text = vec![
Line::from(Span::styled(rx_label, self.styles.rx_style)),

View File

@ -1,3 +1,5 @@
use std::time::Duration;
use tui::{
layout::{Constraint, Direction, Layout, Rect},
symbols::Marker,
@ -7,16 +9,16 @@ use tui::{
};
use crate::{
app::{App, AxisScaling},
app::{App, AppConfigFields, AxisScaling},
canvas::{
components::{
time_chart::Point,
time_graph::{GraphData, TimeGraph},
},
components::time_graph::{AxisBound, ChartScaling, GraphData, TimeGraph},
drawing_utils::should_hide_x_label,
Painter,
},
utils::{data_prefixes::*, data_units::DataUnit, general::partial_ordering},
utils::{
data_units::*,
general::{saturating_log10, saturating_log2},
},
};
impl Painter {
@ -54,16 +56,19 @@ impl Painter {
pub fn draw_network_graph(
&self, f: &mut Frame<'_>, app_state: &mut App, draw_loc: Rect, widget_id: u64,
hide_legend: bool,
full_screen: bool,
) {
if let Some(network_widget_state) =
app_state.states.net_state.widget_states.get_mut(&widget_id)
{
let network_data_rx = &app_state.converted_data.network_data_rx;
let network_data_tx = &app_state.converted_data.network_data_tx;
let shared_data = app_state.data_store.get_data();
let network_latest_data = &(shared_data.network_harvest);
let rx_points = &(shared_data.timeseries_data.rx);
let tx_points = &(shared_data.timeseries_data.tx);
let time = &(shared_data.timeseries_data.time);
let time_start = -(network_widget_state.current_display_time as f64);
let border_style = self.get_border_style(widget_id, app_state.current_widget.widget_id);
let x_bounds = [0, network_widget_state.current_display_time];
let hide_x_labels = should_hide_x_label(
app_state.app_config_fields.hide_time,
app_state.app_config_fields.autohide_time,
@ -71,79 +76,105 @@ impl Painter {
draw_loc,
);
// TODO: Cache network results: Only update if:
// - Force update (includes time interval change)
// - Old max time is off screen
// - A new time interval is better and does not fit (check from end of vector to
// last checked; we only want to update if it is TOO big!)
let y_max = {
if let Some(last_time) = time.last() {
// For now, just do it each time. Might want to cache this later though.
// Find the maximal rx/tx so we know how to scale, and return it.
let (_best_time, max_entry) = get_max_entry(
network_data_rx,
network_data_tx,
time_start,
&app_state.app_config_fields.network_scale_type,
app_state.app_config_fields.network_use_binary_prefix,
);
let mut biggest = 0.0;
let first_time = *last_time
- Duration::from_millis(network_widget_state.current_display_time);
let (max_range, labels) = adjust_network_data_point(
max_entry,
&app_state.app_config_fields.network_scale_type,
&app_state.app_config_fields.network_unit_type,
app_state.app_config_fields.network_use_binary_prefix,
);
for (_, &v) in rx_points
.iter_along_base(time)
.rev()
.take_while(|(&time, _)| time >= first_time)
{
if v > biggest {
biggest = v;
}
}
let y_labels = labels.iter().map(|label| label.into()).collect::<Vec<_>>();
let y_bounds = [0.0, max_range];
for (_, &v) in tx_points
.iter_along_base(time)
.rev()
.take_while(|(&time, _)| time >= first_time)
{
if v > biggest {
biggest = v;
}
}
let legend_constraints = if hide_legend {
biggest
} else {
0.0
}
};
let (y_max, y_labels) = adjust_network_data_point(y_max, &app_state.app_config_fields);
let y_bounds = AxisBound::Max(y_max);
let legend_constraints = if full_screen {
(Constraint::Ratio(0, 1), Constraint::Ratio(0, 1))
} else {
(Constraint::Ratio(1, 1), Constraint::Ratio(3, 4))
};
// TODO: Add support for clicking on legend to only show that value on chart.
let points = if app_state.app_config_fields.use_old_network_legend && !hide_legend {
let use_binary_prefix = app_state.app_config_fields.network_use_binary_prefix;
let unit_type = app_state.app_config_fields.network_unit_type;
let unit = match unit_type {
DataUnit::Byte => "B/s",
DataUnit::Bit => "b/s",
};
let rx = get_unit_prefix(network_latest_data.rx, use_binary_prefix);
let tx = get_unit_prefix(network_latest_data.tx, use_binary_prefix);
let total_rx = convert_bits(network_latest_data.total_rx, use_binary_prefix);
let total_tx = convert_bits(network_latest_data.total_tx, use_binary_prefix);
// TODO: This behaviour is pretty weird, we should probably just make it so if you use old network legend
// you don't do whatever this is...
let graph_data = if app_state.app_config_fields.use_old_network_legend && !full_screen {
let rx_label = format!("RX: {:.1}{}{}", rx.0, rx.1, unit);
let tx_label = format!("TX: {:.1}{}{}", tx.0, tx.1, unit);
let total_rx_label = format!("Total RX: {:.1}{}", total_rx.0, total_rx.1);
let total_tx_label = format!("Total TX: {:.1}{}", total_tx.0, total_tx.1);
vec![
GraphData {
points: network_data_rx,
style: self.styles.rx_style,
name: Some(format!("RX: {:7}", app_state.converted_data.rx_display).into()),
},
GraphData {
points: network_data_tx,
style: self.styles.tx_style,
name: Some(format!("TX: {:7}", app_state.converted_data.tx_display).into()),
},
GraphData {
points: &[],
style: self.styles.total_rx_style,
name: Some(
format!("Total RX: {:7}", app_state.converted_data.total_rx_display)
.into(),
),
},
GraphData {
points: &[],
style: self.styles.total_tx_style,
name: Some(
format!("Total TX: {:7}", app_state.converted_data.total_tx_display)
.into(),
),
},
GraphData::default()
.name(rx_label.into())
.time(time)
.values(rx_points)
.style(self.styles.rx_style),
GraphData::default()
.name(tx_label.into())
.time(time)
.values(tx_points)
.style(self.styles.tx_style),
GraphData::default()
.style(self.styles.total_rx_style)
.name(total_rx_label.into()),
GraphData::default()
.style(self.styles.total_tx_style)
.name(total_tx_label.into()),
]
} else {
let rx_label = format!("{:.1}{}{}", rx.0, rx.1, unit);
let tx_label = format!("{:.1}{}{}", tx.0, tx.1, unit);
let total_rx_label = format!("{:.1}{}", total_rx.0, total_rx.1);
let total_tx_label = format!("{:.1}{}", total_tx.0, total_tx.1);
vec![
GraphData {
points: network_data_rx,
style: self.styles.rx_style,
name: Some((&app_state.converted_data.rx_display).into()),
},
GraphData {
points: network_data_tx,
style: self.styles.tx_style,
name: Some((&app_state.converted_data.tx_display).into()),
},
GraphData::default()
.name(format!("RX: {:<10} All: {}", rx_label, total_rx_label).into())
.time(time)
.values(rx_points)
.style(self.styles.rx_style),
GraphData::default()
.name(format!("TX: {:<10} All: {}", tx_label, total_tx_label).into())
.time(time)
.values(tx_points)
.style(self.styles.tx_style),
]
};
@ -153,11 +184,23 @@ impl Painter {
Marker::Braille
};
let scaling = match app_state.app_config_fields.network_scale_type {
AxisScaling::Log => {
// TODO: I might change this behaviour later.
if app_state.app_config_fields.network_use_binary_prefix {
ChartScaling::Log2
} else {
ChartScaling::Log10
}
}
AxisScaling::Linear => ChartScaling::Linear,
};
TimeGraph {
x_bounds,
x_min: time_start,
hide_x_labels,
y_bounds,
y_labels: &y_labels,
y_labels: &(y_labels.into_iter().map(Into::into).collect::<Vec<_>>()),
graph_style: self.styles.graph_style,
border_style,
border_type: self.styles.border_type,
@ -168,8 +211,9 @@ impl Painter {
legend_position: app_state.app_config_fields.network_legend_position,
legend_constraints: Some(legend_constraints),
marker,
scaling,
}
.draw_time_graph(f, draw_loc, &points);
.draw_time_graph(f, draw_loc, graph_data);
}
}
@ -178,17 +222,31 @@ impl Painter {
) {
const NETWORK_HEADERS: [&str; 4] = ["RX", "TX", "Total RX", "Total TX"];
let rx_display = &app_state.converted_data.rx_display;
let tx_display = &app_state.converted_data.tx_display;
let total_rx_display = &app_state.converted_data.total_rx_display;
let total_tx_display = &app_state.converted_data.total_tx_display;
let network_latest_data = &(app_state.data_store.get_data().network_harvest);
let use_binary_prefix = app_state.app_config_fields.network_use_binary_prefix;
let unit_type = app_state.app_config_fields.network_unit_type;
let unit = match unit_type {
DataUnit::Byte => "B/s",
DataUnit::Bit => "b/s",
};
let rx = get_unit_prefix(network_latest_data.rx, use_binary_prefix);
let tx = get_unit_prefix(network_latest_data.tx, use_binary_prefix);
let rx_label = format!("{:.1}{}{}", rx.0, rx.1, unit);
let tx_label = format!("{:.1}{}{}", tx.0, tx.1, unit);
let total_rx = convert_bits(network_latest_data.total_rx, use_binary_prefix);
let total_tx = convert_bits(network_latest_data.total_tx, use_binary_prefix);
let total_rx_label = format!("{:.1}{}", total_rx.0, total_rx.1);
let total_tx_label = format!("{:.1}{}", total_tx.0, total_tx.1);
// Gross but I need it to work...
let total_network = vec![Row::new([
Text::styled(rx_display, self.styles.rx_style),
Text::styled(tx_display, self.styles.tx_style),
Text::styled(total_rx_display, self.styles.total_rx_style),
Text::styled(total_tx_display, self.styles.total_tx_style),
Text::styled(rx_label, self.styles.rx_style),
Text::styled(tx_label, self.styles.tx_style),
Text::styled(total_rx_label, self.styles.total_rx_style),
Text::styled(total_tx_label, self.styles.total_tx_style),
])];
// Draw
@ -214,133 +272,11 @@ impl Painter {
}
}
/// Returns the max data point and time given a time.
fn get_max_entry(
rx: &[Point], tx: &[Point], time_start: f64, network_scale_type: &AxisScaling,
network_use_binary_prefix: bool,
) -> Point {
/// Determines a "fake" max value in circumstances where we couldn't find
/// one from the data.
fn calculate_missing_max(
network_scale_type: &AxisScaling, network_use_binary_prefix: bool,
) -> f64 {
match network_scale_type {
AxisScaling::Log => {
if network_use_binary_prefix {
LOG_KIBI_LIMIT
} else {
LOG_KILO_LIMIT
}
}
AxisScaling::Linear => {
if network_use_binary_prefix {
KIBI_LIMIT_F64
} else {
KILO_LIMIT_F64
}
}
}
}
// First, let's shorten our ranges to actually look. We can abuse the fact that
// our rx and tx arrays are sorted, so we can short-circuit our search to
// filter out only the relevant data points...
let filtered_rx = if let (Some(rx_start), Some(rx_end)) = (
rx.iter().position(|(time, _data)| *time >= time_start),
rx.iter().rposition(|(time, _data)| *time <= 0.0),
) {
Some(&rx[rx_start..=rx_end])
} else {
None
};
let filtered_tx = if let (Some(tx_start), Some(tx_end)) = (
tx.iter().position(|(time, _data)| *time >= time_start),
tx.iter().rposition(|(time, _data)| *time <= 0.0),
) {
Some(&tx[tx_start..=tx_end])
} else {
None
};
// Then, find the maximal rx/tx so we know how to scale, and return it.
match (filtered_rx, filtered_tx) {
(None, None) => (
time_start,
calculate_missing_max(network_scale_type, network_use_binary_prefix),
),
(None, Some(filtered_tx)) => {
match filtered_tx
.iter()
.max_by(|(_, data_a), (_, data_b)| partial_ordering(data_a, data_b))
{
Some((best_time, max_val)) => {
if *max_val == 0.0 {
(
time_start,
calculate_missing_max(network_scale_type, network_use_binary_prefix),
)
} else {
(*best_time, *max_val)
}
}
None => (
time_start,
calculate_missing_max(network_scale_type, network_use_binary_prefix),
),
}
}
(Some(filtered_rx), None) => {
match filtered_rx
.iter()
.max_by(|(_, data_a), (_, data_b)| partial_ordering(data_a, data_b))
{
Some((best_time, max_val)) => {
if *max_val == 0.0 {
(
time_start,
calculate_missing_max(network_scale_type, network_use_binary_prefix),
)
} else {
(*best_time, *max_val)
}
}
None => (
time_start,
calculate_missing_max(network_scale_type, network_use_binary_prefix),
),
}
}
(Some(filtered_rx), Some(filtered_tx)) => {
match filtered_rx
.iter()
.chain(filtered_tx)
.max_by(|(_, data_a), (_, data_b)| partial_ordering(data_a, data_b))
{
Some((best_time, max_val)) => {
if *max_val == 0.0 {
(
*best_time,
calculate_missing_max(network_scale_type, network_use_binary_prefix),
)
} else {
(*best_time, *max_val)
}
}
None => (
time_start,
calculate_missing_max(network_scale_type, network_use_binary_prefix),
),
}
}
}
}
/// Returns the required max data point and labels.
fn adjust_network_data_point(
max_entry: f64, network_scale_type: &AxisScaling, network_unit_type: &DataUnit,
network_use_binary_prefix: bool,
) -> (f64, Vec<String>) {
/// Returns the required labels.
///
/// TODO: This is _really_ ugly... also there might be a bug with certain heights and too many labels.
/// We may need to take draw height into account, either here, or in the time graph itself.
fn adjust_network_data_point(max_entry: f64, config: &AppConfigFields) -> (f64, Vec<String>) {
// So, we're going with an approach like this for linear data:
// - Main goal is to maximize the amount of information displayed given a
// specific height. We don't want to drown out some data if the ranges are too
@ -353,9 +289,9 @@ fn adjust_network_data_point(
// drew 4 segments, it would be 97.5, 195, 292.5, 390, and
// probably something like 438.75?
//
// So, how do we do this in ratatui? Well, if we are using intervals that tie
// So, how do we do this in ratatui? Well, if we are using intervals that tie
// in perfectly to the max value we want... then it's actually not that
// hard. Since ratatui accepts a vector as labels and will properly space
// hard. Since ratatui accepts a vector as labels and will properly space
// them all out... we just work with that and space it out properly.
//
// Dynamic chart idea based off of FreeNAS's chart design.
@ -368,14 +304,20 @@ fn adjust_network_data_point(
// Now just check the largest unit we correspond to... then proceed to build
// some entries from there!
let scale_type = config.network_scale_type;
let use_binary_prefix = config.network_use_binary_prefix;
let network_unit_type = config.network_unit_type;
let unit_char = match network_unit_type {
DataUnit::Byte => "B",
DataUnit::Bit => "b",
};
match network_scale_type {
match scale_type {
AxisScaling::Linear => {
let (k_limit, m_limit, g_limit, t_limit) = if network_use_binary_prefix {
let max_entry = max_entry * 1.5;
let (k_limit, m_limit, g_limit, t_limit) = if use_binary_prefix {
(
KIBI_LIMIT_F64,
MEBI_LIMIT_F64,
@ -391,32 +333,31 @@ fn adjust_network_data_point(
)
};
let bumped_max_entry = max_entry * 1.5; // We use the bumped up version to calculate our unit type.
let (max_value_scaled, unit_prefix, unit_type): (f64, &str, &str) =
if bumped_max_entry < k_limit {
if max_entry < k_limit {
(max_entry, "", unit_char)
} else if bumped_max_entry < m_limit {
} else if max_entry < m_limit {
(
max_entry / k_limit,
if network_use_binary_prefix { "Ki" } else { "K" },
if use_binary_prefix { "Ki" } else { "K" },
unit_char,
)
} else if bumped_max_entry < g_limit {
} else if max_entry < g_limit {
(
max_entry / m_limit,
if network_use_binary_prefix { "Mi" } else { "M" },
if use_binary_prefix { "Mi" } else { "M" },
unit_char,
)
} else if bumped_max_entry < t_limit {
} else if max_entry < t_limit {
(
max_entry / g_limit,
if network_use_binary_prefix { "Gi" } else { "G" },
if use_binary_prefix { "Gi" } else { "G" },
unit_char,
)
} else {
(
max_entry / t_limit,
if network_use_binary_prefix { "Ti" } else { "T" },
if use_binary_prefix { "Ti" } else { "T" },
unit_char,
)
};
@ -424,7 +365,6 @@ fn adjust_network_data_point(
// Finally, build an acceptable range starting from there, using the given
// height! Note we try to put more of a weight on the bottom section
// vs. the top, since the top has less data.
let base_unit = max_value_scaled;
let labels: Vec<String> = vec![
format!("0{unit_prefix}{unit_type}"),
@ -433,19 +373,29 @@ fn adjust_network_data_point(
format!("{:.1}", base_unit * 1.5),
]
.into_iter()
.map(|s| format!("{s:>5}")) // Pull 5 as the longest legend value is generally going to be 5 digits (if they somehow
// hit over 5 terabits per second)
.map(|s| {
// Pull 5 as the longest legend value is generally going to be 5 digits (if they somehow hit over 5 terabits per second)
format!("{s:>5}")
})
.collect();
(bumped_max_entry, labels)
(max_entry, labels)
}
AxisScaling::Log => {
let (m_limit, g_limit, t_limit) = if network_use_binary_prefix {
let (m_limit, g_limit, t_limit) = if use_binary_prefix {
(LOG_MEBI_LIMIT, LOG_GIBI_LIMIT, LOG_TEBI_LIMIT)
} else {
(LOG_MEGA_LIMIT, LOG_GIGA_LIMIT, LOG_TERA_LIMIT)
};
// Remember to do saturating log checks as otherwise 0.0 becomes inf, and you get
// gaps!
let max_entry = if use_binary_prefix {
saturating_log2(max_entry)
} else {
saturating_log10(max_entry)
};
fn get_zero(network_use_binary_prefix: bool, unit_char: &str) -> String {
format!(
"{}0{}",
@ -498,47 +448,47 @@ fn adjust_network_data_point(
(
m_limit,
vec![
get_zero(network_use_binary_prefix, unit_char),
get_k(network_use_binary_prefix, unit_char),
get_m(network_use_binary_prefix, unit_char),
get_zero(use_binary_prefix, unit_char),
get_k(use_binary_prefix, unit_char),
get_m(use_binary_prefix, unit_char),
],
)
} else if max_entry < g_limit {
(
g_limit,
vec![
get_zero(network_use_binary_prefix, unit_char),
get_k(network_use_binary_prefix, unit_char),
get_m(network_use_binary_prefix, unit_char),
get_g(network_use_binary_prefix, unit_char),
get_zero(use_binary_prefix, unit_char),
get_k(use_binary_prefix, unit_char),
get_m(use_binary_prefix, unit_char),
get_g(use_binary_prefix, unit_char),
],
)
} else if max_entry < t_limit {
(
t_limit,
vec![
get_zero(network_use_binary_prefix, unit_char),
get_k(network_use_binary_prefix, unit_char),
get_m(network_use_binary_prefix, unit_char),
get_g(network_use_binary_prefix, unit_char),
get_t(network_use_binary_prefix, unit_char),
get_zero(use_binary_prefix, unit_char),
get_k(use_binary_prefix, unit_char),
get_m(use_binary_prefix, unit_char),
get_g(use_binary_prefix, unit_char),
get_t(use_binary_prefix, unit_char),
],
)
} else {
// I really doubt anyone's transferring beyond petabyte speeds...
(
if network_use_binary_prefix {
if use_binary_prefix {
LOG_PEBI_LIMIT
} else {
LOG_PETA_LIMIT
},
vec![
get_zero(network_use_binary_prefix, unit_char),
get_k(network_use_binary_prefix, unit_char),
get_m(network_use_binary_prefix, unit_char),
get_g(network_use_binary_prefix, unit_char),
get_t(network_use_binary_prefix, unit_char),
get_p(network_use_binary_prefix, unit_char),
get_zero(use_binary_prefix, unit_char),
get_k(use_binary_prefix, unit_char),
get_m(use_binary_prefix, unit_char),
get_g(use_binary_prefix, unit_char),
get_t(use_binary_prefix, unit_char),
get_p(use_binary_prefix, unit_char),
],
)
}

View File

@ -1,4 +1,6 @@
//! This is the main file to house data collection functions.
//!
//! TODO: Rename this to intake? Collection?
#[cfg(feature = "nvidia")]
pub mod nvidia;
@ -25,10 +27,10 @@ use processes::Pid;
#[cfg(feature = "battery")]
use starship_battery::{Battery, Manager};
use self::temperature::TemperatureType;
use super::DataFilters;
use crate::app::layout_manager::UsedWidgets;
// TODO: We can possibly re-use an internal buffer for this to reduce allocs.
#[derive(Clone, Debug)]
pub struct Data {
pub collection_time: Instant,
@ -38,7 +40,7 @@ pub struct Data {
#[cfg(not(target_os = "windows"))]
pub cache: Option<memory::MemHarvest>,
pub swap: Option<memory::MemHarvest>,
pub temperature_sensors: Option<Vec<temperature::TempHarvest>>,
pub temperature_sensors: Option<Vec<temperature::TempSensorData>>,
pub network: Option<network::NetworkHarvest>,
pub list_of_processes: Option<Vec<processes::ProcessHarvest>>,
pub disks: Option<Vec<disks::DiskHarvest>>,
@ -143,7 +145,6 @@ impl Default for SysinfoSource {
pub struct DataCollector {
pub data: Data,
sys: SysinfoSource,
temperature_type: TemperatureType,
use_current_cpu_total: bool,
unnormalized_cpu: bool,
last_collection_time: Instant,
@ -189,7 +190,6 @@ impl DataCollector {
prev_idle: 0_f64,
#[cfg(target_os = "linux")]
prev_non_idle: 0_f64,
temperature_type: TemperatureType::Celsius,
use_current_cpu_total: false,
unnormalized_cpu: false,
last_collection_time,
@ -236,14 +236,10 @@ impl DataCollector {
self.data.cleanup();
}
pub fn set_data_collection(&mut self, used_widgets: UsedWidgets) {
pub fn set_collection(&mut self, used_widgets: UsedWidgets) {
self.widgets_to_harvest = used_widgets;
}
pub fn set_temperature_type(&mut self, temperature_type: TemperatureType) {
self.temperature_type = temperature_type;
}
pub fn set_use_current_cpu_total(&mut self, use_current_cpu_total: bool) {
self.use_current_cpu_total = use_current_cpu_total;
}
@ -354,11 +350,9 @@ impl DataCollector {
let mut local_gpu_total_mem: u64 = 0;
#[cfg(feature = "nvidia")]
if let Some(data) = nvidia::get_nvidia_vecs(
&self.temperature_type,
&self.filters.temp_filter,
&self.widgets_to_harvest,
) {
if let Some(data) =
nvidia::get_nvidia_vecs(&self.filters.temp_filter, &self.widgets_to_harvest)
{
if let Some(mut temp) = data.temperature {
if let Some(sensors) = &mut self.data.temperature_sensors {
sensors.append(&mut temp);
@ -377,7 +371,6 @@ impl DataCollector {
#[cfg(target_os = "linux")]
if let Some(data) = amd::get_amd_vecs(
&self.temperature_type,
&self.filters.temp_filter,
&self.widgets_to_harvest,
self.last_collection_time,
@ -433,18 +426,14 @@ impl DataCollector {
fn update_temps(&mut self) {
if self.widgets_to_harvest.use_temp {
#[cfg(not(target_os = "linux"))]
if let Ok(data) = temperature::get_temperature_data(
&self.sys.temps,
&self.temperature_type,
&self.filters.temp_filter,
) {
if let Ok(data) =
temperature::get_temperature_data(&self.sys.temps, &self.filters.temp_filter)
{
self.data.temperature_sensors = data;
}
#[cfg(target_os = "linux")]
if let Ok(data) =
temperature::get_temperature_data(&self.temperature_type, &self.filters.temp_filter)
{
if let Ok(data) = temperature::get_temperature_data(&self.filters.temp_filter) {
self.data.temperature_sensors = data;
}
}

View File

@ -2,10 +2,7 @@ mod amdgpu_marketing;
use crate::{
app::{filter::Filter, layout_manager::UsedWidgets},
data_collection::{
memory::MemHarvest,
temperature::{TempHarvest, TemperatureType},
},
collection::{memory::MemHarvest, temperature::TempSensorData},
};
use hashbrown::{HashMap, HashSet};
use std::{
@ -18,7 +15,7 @@ use std::{
pub struct AMDGPUData {
pub memory: Option<Vec<(String, MemHarvest)>>,
pub temperature: Option<Vec<TempHarvest>>,
pub temperature: Option<Vec<TempSensorData>>,
pub procs: Option<(u64, Vec<HashMap<u32, (u64, u32)>>)>,
}
@ -49,7 +46,7 @@ pub struct AMDGPUProc {
static PROC_DATA: LazyLock<Mutex<HashMap<PathBuf, HashMap<u32, AMDGPUProc>>>> =
LazyLock::new(|| Mutex::new(HashMap::new()));
pub fn get_amd_devs() -> Option<Vec<PathBuf>> {
fn get_amd_devs() -> Option<Vec<PathBuf>> {
let mut devices = Vec::new();
// read all PCI devices controlled by the AMDGPU module
@ -75,7 +72,7 @@ pub fn get_amd_devs() -> Option<Vec<PathBuf>> {
}
}
pub fn get_amd_name(device_path: &Path) -> Option<String> {
fn get_amd_name(device_path: &Path) -> Option<String> {
// get revision and device ids from sysfs
let rev_path = device_path.join("revision");
let dev_path = device_path.join("device");
@ -113,7 +110,7 @@ pub fn get_amd_name(device_path: &Path) -> Option<String> {
.map(|tuple| tuple.2.to_string())
}
pub fn get_amd_vram(device_path: &Path) -> Option<AMDGPUMemory> {
fn get_amd_vram(device_path: &Path) -> Option<AMDGPUMemory> {
// get vram memory info from sysfs
let vram_total_path = device_path.join("mem_info_vram_total");
let vram_used_path = device_path.join("mem_info_vram_used");
@ -142,7 +139,7 @@ pub fn get_amd_vram(device_path: &Path) -> Option<AMDGPUMemory> {
})
}
pub fn get_amd_temp(device_path: &Path) -> Option<Vec<AMDGPUTemperature>> {
fn get_amd_temp(device_path: &Path) -> Option<Vec<AMDGPUTemperature>> {
let mut temperatures = Vec::new();
// get hardware monitoring sensor info from sysfs
@ -224,7 +221,7 @@ pub fn get_amd_temp(device_path: &Path) -> Option<Vec<AMDGPUTemperature>> {
}
// from amdgpu_top: https://github.com/Umio-Yasuno/amdgpu_top/blob/c961cf6625c4b6d63fda7f03348323048563c584/crates/libamdgpu_top/src/stat/fdinfo/proc_info.rs#L114
pub fn diff_usage(pre: u64, cur: u64, interval: &Duration) -> u64 {
fn diff_usage(pre: u64, cur: u64, interval: &Duration) -> u64 {
use std::ops::Mul;
let diff_ns = if pre == 0 || cur < pre {
@ -240,7 +237,7 @@ pub fn diff_usage(pre: u64, cur: u64, interval: &Duration) -> u64 {
}
// from amdgpu_top: https://github.com/Umio-Yasuno/amdgpu_top/blob/c961cf6625c4b6d63fda7f03348323048563c584/crates/libamdgpu_top/src/stat/fdinfo/proc_info.rs#L13-L27
pub fn get_amdgpu_pid_fds(pid: u32, device_path: Vec<PathBuf>) -> Option<Vec<u32>> {
fn get_amdgpu_pid_fds(pid: u32, device_path: Vec<PathBuf>) -> Option<Vec<u32>> {
let Ok(fd_list) = fs::read_dir(format!("/proc/{pid}/fd/")) else {
return None;
};
@ -266,7 +263,7 @@ pub fn get_amdgpu_pid_fds(pid: u32, device_path: Vec<PathBuf>) -> Option<Vec<u32
}
}
pub fn get_amdgpu_drm(device_path: &Path) -> Option<Vec<PathBuf>> {
fn get_amdgpu_drm(device_path: &Path) -> Option<Vec<PathBuf>> {
let mut drm_devices = Vec::new();
let drm_root = device_path.join("drm");
@ -300,7 +297,7 @@ pub fn get_amdgpu_drm(device_path: &Path) -> Option<Vec<PathBuf>> {
}
}
pub fn get_amd_fdinfo(device_path: &Path) -> Option<HashMap<u32, AMDGPUProc>> {
fn get_amd_fdinfo(device_path: &Path) -> Option<HashMap<u32, AMDGPUProc>> {
let mut fdinfo = HashMap::new();
let drm_paths = get_amdgpu_drm(device_path)?;
@ -401,10 +398,8 @@ pub fn get_amd_fdinfo(device_path: &Path) -> Option<HashMap<u32, AMDGPUProc>> {
Some(fdinfo)
}
#[inline]
pub fn get_amd_vecs(
temp_type: &TemperatureType, filter: &Option<Filter>, widgets_to_harvest: &UsedWidgets,
prev_time: Instant,
filter: &Option<Filter>, widgets_to_harvest: &UsedWidgets, prev_time: Instant,
) -> Option<AMDGPUData> {
let device_path_list = get_amd_devs()?;
let interval = Instant::now().duration_since(prev_time);
@ -436,11 +431,9 @@ pub fn get_amd_vecs(
if widgets_to_harvest.use_temp && Filter::optional_should_keep(filter, &device_name) {
if let Some(temperatures) = get_amd_temp(&device_path) {
for info in temperatures {
let temperature = temp_type.convert_temp_unit(info.temperature);
temp_vec.push(TempHarvest {
temp_vec.push(TempSensorData {
name: format!("{} {}", device_name, info.name),
temperature: Some(temperature),
temperature: Some(info.temperature),
});
}
}

View File

@ -1,38 +1,39 @@
//! CPU stats through sysinfo.
//! Supports FreeBSD.
use std::collections::VecDeque;
use sysinfo::System;
use super::{CpuData, CpuDataType, CpuHarvest};
use crate::data_collection::error::CollectionResult;
use crate::collection::error::CollectionResult;
pub fn get_cpu_data_list(sys: &System, show_average_cpu: bool) -> CollectionResult<CpuHarvest> {
let mut cpu_deque: VecDeque<_> = sys
.cpus()
.iter()
.enumerate()
.map(|(i, cpu)| CpuData {
data_type: CpuDataType::Cpu(i),
cpu_usage: cpu.cpu_usage() as f64,
})
.collect();
let mut cpus = vec![];
if show_average_cpu {
let cpu = sys.global_cpu_info();
cpu_deque.push_front(CpuData {
cpus.push(CpuData {
data_type: CpuDataType::Avg,
cpu_usage: cpu.cpu_usage() as f64,
})
}
Ok(Vec::from(cpu_deque))
cpus.extend(
sys.cpus()
.iter()
.enumerate()
.map(|(i, cpu)| CpuData {
data_type: CpuDataType::Cpu(i),
cpu_usage: cpu.cpu_usage() as f64,
})
.collect::<Vec<_>>(),
);
Ok(cpus)
}
#[cfg(target_family = "unix")]
pub(crate) fn get_load_avg() -> crate::data_collection::cpu::LoadAvgHarvest {
pub(crate) fn get_load_avg() -> crate::collection::cpu::LoadAvgHarvest {
// The API for sysinfo apparently wants you to call it like this, rather than
// using a &System.
let sysinfo::LoadAvg { one, five, fifteen } = sysinfo::System::load_average();

View File

@ -6,9 +6,7 @@ use hashbrown::HashMap;
use serde::Deserialize;
use super::{keep_disk_entry, DiskHarvest, IoHarvest};
use crate::data_collection::{
deserialize_xo, disks::IoData, error::CollectionResult, DataCollector,
};
use crate::collection::{deserialize_xo, disks::IoData, error::CollectionResult, DataCollector};
#[derive(Deserialize, Debug, Default)]
#[serde(rename_all = "kebab-case")]
@ -40,7 +38,7 @@ pub fn get_io_usage() -> CollectionResult<IoHarvest> {
#[cfg(feature = "zfs")]
{
use crate::data_collection::disks::zfs_io_counters;
use crate::collection::disks::zfs_io_counters;
if let Ok(zfs_io) = zfs_io_counters::zfs_io_stats() {
for io in zfs_io.into_iter() {
let mount_point = io.device_name().to_string_lossy();

View File

@ -1,7 +1,7 @@
//! Fallback disk info using sysinfo.
use super::{keep_disk_entry, DiskHarvest};
use crate::data_collection::DataCollector;
use crate::collection::DataCollector;
pub(crate) fn get_disk_usage(collector: &DataCollector) -> anyhow::Result<Vec<DiskHarvest>> {
let disks = &collector.sys.disks;

View File

@ -25,7 +25,7 @@ use file_systems::*;
use usage::*;
use super::{keep_disk_entry, DiskHarvest};
use crate::data_collection::DataCollector;
use crate::collection::DataCollector;
/// Returns the disk usage of the mounted (and for now, physical) disks.
pub fn get_disk_usage(collector: &DataCollector) -> anyhow::Result<Vec<DiskHarvest>> {

View File

@ -7,7 +7,7 @@ use std::{
str::FromStr,
};
use crate::data_collection::disks::IoCounters;
use crate::collection::disks::IoCounters;
/// Copied from the `psutil` sources:
///
@ -87,7 +87,7 @@ pub fn io_stats() -> anyhow::Result<Vec<IoCounters>> {
#[cfg(feature = "zfs")]
{
use crate::data_collection::disks::zfs_io_counters;
use crate::collection::disks::zfs_io_counters;
if let Ok(mut zfs_io) = zfs_io_counters::zfs_io_stats() {
results.append(&mut zfs_io);
}

View File

@ -12,7 +12,7 @@ use std::{
use anyhow::bail;
use crate::data_collection::disks::unix::{FileSystem, Usage};
use crate::collection::disks::unix::{FileSystem, Usage};
/// Representation of partition details. Based on [`heim`](https://github.com/heim-rs/heim/tree/master).
pub(crate) struct Partition {

View File

@ -1,7 +1,7 @@
//! Based on [heim's implementation](https://github.com/heim-rs/heim/blob/master/heim-disk/src/sys/macos/counters.rs).
use super::io_kit::{self, get_dict, get_disks, get_i64, get_string};
use crate::data_collection::disks::IoCounters;
use crate::collection::disks::IoCounters;
fn get_device_io(device: io_kit::IoObject) -> anyhow::Result<IoCounters> {
let parent = device.service_parent()?;

View File

@ -8,7 +8,7 @@ use std::{
use anyhow::bail;
use super::bindings;
use crate::data_collection::disks::unix::{FileSystem, Usage};
use crate::collection::disks::unix::{FileSystem, Usage};
pub(crate) struct Partition {
device: String,

View File

@ -6,7 +6,7 @@ use bindings::*;
use itertools::Itertools;
use super::{keep_disk_entry, DiskHarvest};
use crate::data_collection::{disks::IoCounters, DataCollector};
use crate::collection::{disks::IoCounters, DataCollector};
/// Returns I/O stats.
pub(crate) fn io_stats() -> anyhow::Result<Vec<IoCounters>> {

View File

@ -1,4 +1,4 @@
use crate::data_collection::disks::IoCounters;
use crate::collection::disks::IoCounters;
/// Returns zpool I/O stats. Pulls data from `sysctl
/// kstat.zfs.{POOL}.dataset.{objset-*}`

View File

@ -23,6 +23,7 @@ pub struct MemHarvest {
impl MemHarvest {
/// Return the use percentage. If the total bytes is 0, then this returns `None`.
#[inline]
pub fn checked_percent(&self) -> Option<f64> {
let used = self.used_bytes as f64;
let total = self.total_bytes as f64;
@ -33,4 +34,17 @@ impl MemHarvest {
Some(used / total * 100.0)
}
}
/// Return the use percentage. If the total bytes is 0, then this returns 0.0.
#[inline]
pub fn saturating_percentage(&self) -> f64 {
let used = self.used_bytes as f64;
let total = self.total_bytes as f64;
if total == 0.0 {
0.0
} else {
used / total * 100.0
}
}
}

View File

@ -2,7 +2,7 @@
use sysinfo::System;
use crate::data_collection::memory::MemHarvest;
use crate::collection::memory::MemHarvest;
/// Returns RAM usage.
pub(crate) fn get_ram_usage(sys: &System) -> Option<MemHarvest> {

View File

@ -2,7 +2,7 @@ use std::mem::{size_of, zeroed};
use windows::Win32::System::ProcessStatus::{GetPerformanceInfo, PERFORMANCE_INFORMATION};
use crate::data_collection::memory::MemHarvest;
use crate::collection::memory::MemHarvest;
const PERFORMANCE_INFORMATION_SIZE: u32 = size_of::<PERFORMANCE_INFORMATION>() as _;

View File

@ -7,17 +7,14 @@ use nvml_wrapper::{
use crate::{
app::{filter::Filter, layout_manager::UsedWidgets},
data_collection::{
memory::MemHarvest,
temperature::{TempHarvest, TemperatureType},
},
collection::{memory::MemHarvest, temperature::TempSensorData},
};
pub static NVML_DATA: OnceLock<Result<Nvml, NvmlError>> = OnceLock::new();
pub struct GpusData {
pub memory: Option<Vec<(String, MemHarvest)>>,
pub temperature: Option<Vec<TempHarvest>>,
pub temperature: Option<Vec<TempSensorData>>,
pub procs: Option<(u64, Vec<HashMap<u32, (u64, u32)>>)>,
}
@ -47,7 +44,7 @@ fn init_nvml() -> Result<Nvml, NvmlError> {
/// Returns the GPU data from NVIDIA cards.
#[inline]
pub fn get_nvidia_vecs(
temp_type: &TemperatureType, filter: &Option<Filter>, widgets_to_harvest: &UsedWidgets,
filter: &Option<Filter>, widgets_to_harvest: &UsedWidgets,
) -> Option<GpusData> {
if let Ok(nvml) = NVML_DATA.get_or_init(init_nvml) {
if let Ok(num_gpu) = nvml.device_count() {
@ -75,14 +72,12 @@ pub fn get_nvidia_vecs(
&& Filter::optional_should_keep(filter, &name)
{
if let Ok(temperature) = device.temperature(TemperatureSensor::Gpu) {
let temperature = temp_type.convert_temp_unit(temperature as f32);
temp_vec.push(TempHarvest {
temp_vec.push(TempSensorData {
name,
temperature: Some(temperature),
temperature: Some(temperature as f32),
});
} else {
temp_vec.push(TempHarvest {
temp_vec.push(TempSensorData {
name,
temperature: None,
});

View File

@ -145,7 +145,7 @@ impl DataCollector {
} else if #[cfg(any(target_os = "freebsd", target_os = "macos", target_os = "windows", target_os = "android", target_os = "ios"))] {
sysinfo_process_data(self)
} else {
Err(crate::data_collection::error::CollectionError::Unsupported)
Err(crate::collection::error::CollectionError::Unsupported)
}
}
}

View File

@ -5,7 +5,7 @@ use std::{io, process::Command};
use hashbrown::HashMap;
use serde::{Deserialize, Deserializer};
use crate::data_collection::{deserialize_xo, processes::UnixProcessExt, Pid};
use crate::collection::{deserialize_xo, processes::UnixProcessExt, Pid};
#[derive(Deserialize, Debug, Default)]
#[serde(rename_all = "kebab-case")]

View File

@ -13,7 +13,7 @@ use process::*;
use sysinfo::ProcessStatus;
use super::{Pid, ProcessHarvest, UserTable};
use crate::data_collection::{error::CollectionResult, DataCollector};
use crate::collection::{error::CollectionResult, DataCollector};
/// Maximum character length of a `/proc/<PID>/stat`` process name.
/// If it's equal or greater, then we instead refer to the command for the name.

View File

@ -16,7 +16,7 @@ use rustix::{
path::Arg,
};
use crate::data_collection::processes::Pid;
use crate::collection::processes::Pid;
static PAGESIZE: OnceLock<u64> = OnceLock::new();

View File

@ -8,7 +8,7 @@ use hashbrown::HashMap;
use itertools::Itertools;
use super::UnixProcessExt;
use crate::data_collection::Pid;
use crate::collection::Pid;
pub(crate) struct MacOSProcessExt;

View File

@ -10,7 +10,7 @@ use libc::{
};
use mach2::vm_types::user_addr_t;
use crate::data_collection::Pid;
use crate::collection::Pid;
#[repr(C)]
pub(crate) struct kinfo_proc {

View File

@ -12,8 +12,8 @@ cfg_if! {
use super::ProcessHarvest;
use crate::data_collection::{DataCollector, processes::*};
use crate::data_collection::error::CollectionResult;
use crate::collection::{DataCollector, processes::*};
use crate::collection::error::CollectionResult;
pub fn sysinfo_process_data(collector: &mut DataCollector) -> CollectionResult<Vec<ProcessHarvest>> {
let sys = &collector.sys.system;

View File

@ -6,7 +6,7 @@ use hashbrown::HashMap;
use sysinfo::{ProcessStatus, System};
use super::ProcessHarvest;
use crate::data_collection::{error::CollectionResult, processes::UserTable, Pid};
use crate::collection::{error::CollectionResult, processes::UserTable, Pid};
pub(crate) trait UnixProcessExt {
fn sysinfo_process_data(

View File

@ -1,6 +1,6 @@
use hashbrown::HashMap;
use crate::data_collection::error::{CollectionError, CollectionResult};
use crate::collection::error::{CollectionError, CollectionResult};
#[derive(Debug, Default)]
pub struct UserTable {

View File

@ -3,7 +3,7 @@
use std::time::Duration;
use super::ProcessHarvest;
use crate::data_collection::{error::CollectionResult, DataCollector};
use crate::collection::{error::CollectionResult, DataCollector};
// TODO: There's a lot of shared code with this and the unix impl.
pub fn sysinfo_process_data(

View File

@ -0,0 +1,23 @@
//! Data collection for temperature metrics.
//!
//! For Linux and macOS, this is handled by Heim.
//! For Windows, this is handled by sysinfo.
cfg_if::cfg_if! {
if #[cfg(target_os = "linux")] {
pub mod linux;
pub use self::linux::*;
} else if #[cfg(any(target_os = "freebsd", target_os = "macos", target_os = "windows", target_os = "android", target_os = "ios"))] {
pub mod sysinfo;
pub use self::sysinfo::*;
}
}
#[derive(Default, Debug, Clone)]
pub struct TempSensorData {
/// The name of the sensor.
pub name: String,
/// The temperature in Celsius.
pub temperature: Option<f32>,
}

View File

@ -8,7 +8,7 @@ use std::{
use anyhow::Result;
use hashbrown::{HashMap, HashSet};
use super::{TempHarvest, TemperatureType};
use super::TempSensorData;
use crate::app::filter::Filter;
const EMPTY_NAME: &str = "Unknown";
@ -16,7 +16,7 @@ const EMPTY_NAME: &str = "Unknown";
/// Returned results from grabbing hwmon/coretemp temperature sensor
/// values/names.
struct HwmonResults {
temperatures: Vec<TempHarvest>,
temperatures: Vec<TempSensorData>,
num_hwmon: usize,
}
@ -223,8 +223,8 @@ fn is_device_awake(path: &Path) -> bool {
/// the device is already in ACPI D0. This has the notable issue that
/// once this happens, the device will be *kept* on through the sensor
/// reading, and not be able to re-enter ACPI D3cold.
fn hwmon_temperatures(temp_type: &TemperatureType, filter: &Option<Filter>) -> HwmonResults {
let mut temperatures: Vec<TempHarvest> = vec![];
fn hwmon_temperatures(filter: &Option<Filter>) -> HwmonResults {
let mut temperatures: Vec<TempSensorData> = vec![];
let mut seen_names: HashMap<String, u32> = HashMap::new();
let (dirs, num_hwmon) = get_hwmon_candidates();
@ -246,7 +246,7 @@ fn hwmon_temperatures(temp_type: &TemperatureType, filter: &Option<Filter>) -> H
if !is_device_awake(&file_path) {
let name = finalize_name(None, None, &sensor_name, &mut seen_names);
temperatures.push(TempHarvest {
temperatures.push(TempSensorData {
name,
temperature: None,
});
@ -329,9 +329,9 @@ fn hwmon_temperatures(temp_type: &TemperatureType, filter: &Option<Filter>) -> H
// probing hwmon if not needed?
if Filter::optional_should_keep(filter, &name) {
if let Ok(temp_celsius) = parse_temp(&temp_path) {
temperatures.push(TempHarvest {
temperatures.push(TempSensorData {
name,
temperature: Some(temp_type.convert_temp_unit(temp_celsius)),
temperature: Some(temp_celsius),
});
}
}
@ -351,9 +351,7 @@ fn hwmon_temperatures(temp_type: &TemperatureType, filter: &Option<Filter>) -> H
///
/// See [the Linux kernel documentation](https://www.kernel.org/doc/Documentation/ABI/testing/sysfs-class-thermal)
/// for more details.
fn add_thermal_zone_temperatures(
temperatures: &mut Vec<TempHarvest>, temp_type: &TemperatureType, filter: &Option<Filter>,
) {
fn add_thermal_zone_temperatures(temperatures: &mut Vec<TempSensorData>, filter: &Option<Filter>) {
let path = Path::new("/sys/class/thermal");
let Ok(read_dir) = path.read_dir() else {
return;
@ -382,9 +380,9 @@ fn add_thermal_zone_temperatures(
if let Ok(temp_celsius) = parse_temp(&temp_path) {
let name = counted_name(&mut seen_names, name);
temperatures.push(TempHarvest {
temperatures.push(TempSensorData {
name,
temperature: Some(temp_type.convert_temp_unit(temp_celsius)),
temperature: Some(temp_celsius),
});
}
}
@ -394,13 +392,11 @@ fn add_thermal_zone_temperatures(
}
/// Gets temperature sensors and data.
pub fn get_temperature_data(
temp_type: &TemperatureType, filter: &Option<Filter>,
) -> Result<Option<Vec<TempHarvest>>> {
let mut results = hwmon_temperatures(temp_type, filter);
pub fn get_temperature_data(filter: &Option<Filter>) -> Result<Option<Vec<TempSensorData>>> {
let mut results = hwmon_temperatures(filter);
if results.num_hwmon == 0 {
add_thermal_zone_temperatures(&mut results.temperatures, temp_type, filter);
add_thermal_zone_temperatures(&mut results.temperatures, filter);
}
Ok(Some(results.temperatures))

View File

@ -2,21 +2,21 @@
use anyhow::Result;
use super::{TempHarvest, TemperatureType};
use super::TempSensorData;
use crate::app::filter::Filter;
pub fn get_temperature_data(
components: &sysinfo::Components, temp_type: &TemperatureType, filter: &Option<Filter>,
) -> Result<Option<Vec<TempHarvest>>> {
let mut temperature_vec: Vec<TempHarvest> = Vec::new();
components: &sysinfo::Components, filter: &Option<Filter>,
) -> Result<Option<Vec<TempSensorData>>> {
let mut temperatures: Vec<TempSensorData> = Vec::new();
for component in components {
let name = component.label().to_string();
if Filter::optional_should_keep(filter, &name) {
temperature_vec.push(TempHarvest {
temperatures.push(TempSensorData {
name,
temperature: Some(temp_type.convert_temp_unit(component.temperature())),
temperature: Some(component.temperature()),
});
}
}
@ -32,13 +32,9 @@ pub fn get_temperature_data(
for ctl in sysctl::CtlIter::below(root).flatten() {
if let (Ok(name), Ok(temp)) = (ctl.name(), ctl.value()) {
if let Some(temp) = temp.as_temperature() {
temperature_vec.push(TempHarvest {
temperatures.push(TempSensorData {
name,
temperature: Some(match temp_type {
TemperatureType::Celsius => temp.celsius(),
TemperatureType::Kelvin => temp.kelvin(),
TemperatureType::Fahrenheit => temp.fahrenheit(),
}),
temperature: Some(temp.celsius()),
});
}
}
@ -47,5 +43,5 @@ pub fn get_temperature_data(
}
// TODO: Should we instead use a hashmap -> vec to skip dupes?
Ok(Some(temperature_vec))
Ok(Some(temperatures))
}

View File

@ -1,85 +0,0 @@
//! Data collection for temperature metrics.
//!
//! For Linux and macOS, this is handled by Heim.
//! For Windows, this is handled by sysinfo.
cfg_if::cfg_if! {
if #[cfg(target_os = "linux")] {
pub mod linux;
pub use self::linux::*;
} else if #[cfg(any(target_os = "freebsd", target_os = "macos", target_os = "windows", target_os = "android", target_os = "ios"))] {
pub mod sysinfo;
pub use self::sysinfo::*;
}
}
use std::str::FromStr;
#[derive(Default, Debug, Clone)]
pub struct TempHarvest {
pub name: String,
pub temperature: Option<f32>,
}
#[derive(Clone, Debug, Copy, PartialEq, Eq, Default)]
pub enum TemperatureType {
#[default]
Celsius,
Kelvin,
Fahrenheit,
}
impl FromStr for TemperatureType {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"fahrenheit" | "f" => Ok(TemperatureType::Fahrenheit),
"kelvin" | "k" => Ok(TemperatureType::Kelvin),
"celsius" | "c" => Ok(TemperatureType::Celsius),
_ => Err(format!(
"'{s}' is an invalid temperature type, use one of: [kelvin, k, celsius, c, fahrenheit, f]."
)),
}
}
}
impl TemperatureType {
/// Given a temperature in Celsius, covert it if necessary for a different
/// unit.
pub fn convert_temp_unit(&self, temp_celsius: f32) -> f32 {
fn convert_celsius_to_kelvin(celsius: f32) -> f32 {
celsius + 273.15
}
fn convert_celsius_to_fahrenheit(celsius: f32) -> f32 {
(celsius * (9.0 / 5.0)) + 32.0
}
match self {
TemperatureType::Celsius => temp_celsius,
TemperatureType::Kelvin => convert_celsius_to_kelvin(temp_celsius),
TemperatureType::Fahrenheit => convert_celsius_to_fahrenheit(temp_celsius),
}
}
}
#[cfg(test)]
mod test {
use crate::data_collection::temperature::TemperatureType;
#[test]
fn temp_conversions() {
const TEMP: f32 = 100.0;
assert_eq!(
TemperatureType::Celsius.convert_temp_unit(TEMP),
TEMP,
"celsius to celsius is the same"
);
assert_eq!(TemperatureType::Kelvin.convert_temp_unit(TEMP), 373.15);
assert_eq!(TemperatureType::Fahrenheit.convert_temp_unit(TEMP), 212.0);
}
}

View File

@ -1,253 +1,7 @@
//! This mainly concerns converting collected data into things that the canvas
//! can actually handle.
// TODO: Split this up!
use std::borrow::Cow;
use crate::{
app::{data_farmer::DataCollection, AxisScaling},
canvas::components::time_chart::Point,
data_collection::{cpu::CpuDataType, memory::MemHarvest, temperature::TemperatureType},
utils::{data_prefixes::*, data_units::DataUnit},
widgets::{DiskWidgetData, TempWidgetData},
};
// TODO: [NETWORKING] add min/max/mean of each
// min_rx : f64,
// max_rx : f64,
// mean_rx: f64,
// min_tx: f64,
// max_tx: f64,
// mean_tx: f64,
#[derive(Default, Debug)]
pub struct ConvertedNetworkData {
pub rx: Vec<Point>,
pub tx: Vec<Point>,
pub rx_display: String,
pub tx_display: String,
pub total_rx_display: Option<String>,
pub total_tx_display: Option<String>,
}
#[derive(Clone, Debug)]
pub enum CpuWidgetData {
All,
Entry {
data_type: CpuDataType,
/// A point here represents time (x) and value (y).
data: Vec<Point>,
last_entry: f64,
},
}
#[derive(Default)]
pub struct ConvertedData {
pub rx_display: String,
pub tx_display: String,
pub total_rx_display: String,
pub total_tx_display: String,
pub network_data_rx: Vec<Point>,
pub network_data_tx: Vec<Point>,
pub mem_labels: Option<(String, String)>,
#[cfg(not(target_os = "windows"))]
pub cache_labels: Option<(String, String)>,
pub swap_labels: Option<(String, String)>,
// TODO: Switch this and all data points over to a better data structure.
//
// We can dedupe the f64 for time by storing it alongside this data structure.
// We can also just store everything via an references and iterators to avoid
// duplicating data, I guess.
pub mem_data: Vec<Point>,
#[cfg(not(target_os = "windows"))]
pub cache_data: Vec<Point>,
pub swap_data: Vec<Point>,
#[cfg(feature = "zfs")]
pub arc_labels: Option<(String, String)>,
#[cfg(feature = "zfs")]
pub arc_data: Vec<Point>,
#[cfg(feature = "gpu")]
pub gpu_data: Option<Vec<ConvertedGpuData>>,
pub load_avg_data: [f32; 3],
pub cpu_data: Vec<CpuWidgetData>,
pub disk_data: Vec<DiskWidgetData>,
pub temp_data: Vec<TempWidgetData>,
}
impl ConvertedData {
// TODO: Can probably heavily reduce this step to avoid clones.
pub fn convert_disk_data(&mut self, data: &DataCollection) {
self.disk_data.clear();
data.disk_harvest
.iter()
.zip(&data.io_labels)
.for_each(|(disk, (io_read, io_write))| {
// Because this sometimes does *not* equal to disk.total.
let summed_total_bytes = match (disk.used_space, disk.free_space) {
(Some(used), Some(free)) => Some(used + free),
_ => None,
};
self.disk_data.push(DiskWidgetData {
name: Cow::Owned(disk.name.to_string()),
mount_point: Cow::Owned(disk.mount_point.to_string()),
free_bytes: disk.free_space,
used_bytes: disk.used_space,
total_bytes: disk.total_space,
summed_total_bytes,
io_read: Cow::Owned(io_read.to_string()),
io_write: Cow::Owned(io_write.to_string()),
});
});
self.disk_data.shrink_to_fit();
}
pub fn convert_temp_data(&mut self, data: &DataCollection, temperature_type: TemperatureType) {
self.temp_data.clear();
data.temp_harvest.iter().for_each(|temp_harvest| {
self.temp_data.push(TempWidgetData {
sensor: Cow::Owned(temp_harvest.name.to_string()),
temperature_value: temp_harvest.temperature.map(|temp| temp.ceil() as u64),
temperature_type,
});
});
self.temp_data.shrink_to_fit();
}
pub fn convert_cpu_data(&mut self, current_data: &DataCollection) {
let current_time = current_data.current_instant;
// (Re-)initialize the vector if the lengths don't match...
if let Some((_time, data)) = &current_data.timed_data_vec.last() {
if data.cpu_data.len() + 1 != self.cpu_data.len() {
self.cpu_data = Vec::with_capacity(data.cpu_data.len() + 1);
self.cpu_data.push(CpuWidgetData::All);
self.cpu_data.extend(
data.cpu_data
.iter()
.zip(&current_data.cpu_harvest)
.map(|(cpu_usage, data)| CpuWidgetData::Entry {
data_type: data.data_type,
data: vec![],
last_entry: *cpu_usage,
})
.collect::<Vec<CpuWidgetData>>(),
);
} else {
self.cpu_data
.iter_mut()
.skip(1)
.zip(&data.cpu_data)
.for_each(|(mut cpu, cpu_usage)| match &mut cpu {
CpuWidgetData::All => unreachable!(),
CpuWidgetData::Entry {
data_type: _,
data,
last_entry,
} => {
// A bit faster to just update all the times, so we just clear the
// vector.
data.clear();
*last_entry = *cpu_usage;
}
});
}
}
// TODO: [Opt] Can probably avoid data deduplication - store the shift + data +
// original once. Now push all the data.
for (itx, mut cpu) in &mut self.cpu_data.iter_mut().skip(1).enumerate() {
match &mut cpu {
CpuWidgetData::All => unreachable!(),
CpuWidgetData::Entry {
data_type: _,
data,
last_entry: _,
} => {
for (time, timed_data) in &current_data.timed_data_vec {
let time_start: f64 =
(current_time.duration_since(*time).as_millis() as f64).floor();
if let Some(val) = timed_data.cpu_data.get(itx) {
data.push((-time_start, *val));
}
if *time == current_time {
break;
}
}
data.shrink_to_fit();
}
}
}
}
}
pub fn convert_mem_data_points(data: &DataCollection) -> Vec<Point> {
let mut result: Vec<Point> = Vec::new();
let current_time = data.current_instant;
for (time, data) in &data.timed_data_vec {
if let Some(mem_data) = data.mem_data {
let time_from_start: f64 =
(current_time.duration_since(*time).as_millis() as f64).floor();
result.push((-time_from_start, mem_data));
if *time == current_time {
break;
}
}
}
result
}
#[cfg(not(target_os = "windows"))]
pub fn convert_cache_data_points(data: &DataCollection) -> Vec<Point> {
let mut result: Vec<Point> = Vec::new();
let current_time = data.current_instant;
for (time, data) in &data.timed_data_vec {
if let Some(cache_data) = data.cache_data {
let time_from_start: f64 =
(current_time.duration_since(*time).as_millis() as f64).floor();
result.push((-time_from_start, cache_data));
if *time == current_time {
break;
}
}
}
result
}
pub fn convert_swap_data_points(data: &DataCollection) -> Vec<Point> {
let mut result: Vec<Point> = Vec::new();
let current_time = data.current_instant;
for (time, data) in &data.timed_data_vec {
if let Some(swap_data) = data.swap_data {
let time_from_start: f64 =
(current_time.duration_since(*time).as_millis() as f64).floor();
result.push((-time_from_start, swap_data));
if *time == current_time {
break;
}
}
}
result
}
use crate::utils::data_units::*;
/// Returns the most appropriate binary prefix unit type (e.g. kibibyte) and
/// denominator for the given amount of bytes.
@ -256,7 +10,7 @@ pub fn convert_swap_data_points(data: &DataCollection) -> Vec<Point> {
/// denominator in order to be able to use it with the returned binary unit
/// (e.g. divide 3000 bytes by 1024 to have a value in KiB).
#[inline]
fn get_binary_unit_and_denominator(bytes: u64) -> (&'static str, f64) {
pub(crate) fn get_binary_unit_and_denominator(bytes: u64) -> (&'static str, f64) {
match bytes {
b if b < KIBI_LIMIT => ("B", 1.0),
b if b < MEBI_LIMIT => ("KiB", KIBI_LIMIT_F64),
@ -266,213 +20,11 @@ fn get_binary_unit_and_denominator(bytes: u64) -> (&'static str, f64) {
}
}
/// Returns the unit type and denominator for given total amount of memory in
/// kibibytes.
pub fn convert_mem_label(harvest: &MemHarvest) -> Option<(String, String)> {
(harvest.total_bytes > 0).then(|| {
let percentage = harvest.used_bytes as f64 / harvest.total_bytes as f64 * 100.0;
(format!("{percentage:3.0}%"), {
let (unit, denominator) = get_binary_unit_and_denominator(harvest.total_bytes);
format!(
" {:.1}{}/{:.1}{}",
harvest.used_bytes as f64 / denominator,
unit,
(harvest.total_bytes as f64 / denominator),
unit
)
})
})
}
pub fn get_network_points(
data: &DataCollection, scale_type: &AxisScaling, unit_type: &DataUnit, use_binary_prefix: bool,
) -> (Vec<Point>, Vec<Point>) {
let mut rx: Vec<Point> = Vec::new();
let mut tx: Vec<Point> = Vec::new();
let current_time = data.current_instant;
for (time, data) in &data.timed_data_vec {
let time_from_start: f64 = (current_time.duration_since(*time).as_millis() as f64).floor();
let (rx_data, tx_data) = match scale_type {
AxisScaling::Log => {
if use_binary_prefix {
match unit_type {
DataUnit::Byte => {
// As dividing by 8 is equal to subtracting 4 in base 2!
((data.rx_data).log2() - 4.0, (data.tx_data).log2() - 4.0)
}
DataUnit::Bit => ((data.rx_data).log2(), (data.tx_data).log2()),
}
} else {
match unit_type {
DataUnit::Byte => {
((data.rx_data / 8.0).log10(), (data.tx_data / 8.0).log10())
}
DataUnit::Bit => ((data.rx_data).log10(), (data.tx_data).log10()),
}
}
}
AxisScaling::Linear => match unit_type {
DataUnit::Byte => (data.rx_data / 8.0, data.tx_data / 8.0),
DataUnit::Bit => (data.rx_data, data.tx_data),
},
};
rx.push((-time_from_start, rx_data));
tx.push((-time_from_start, tx_data));
if *time == current_time {
break;
}
}
(rx, tx)
}
pub fn convert_network_points(
data: &DataCollection, need_four_points: bool, scale_type: &AxisScaling, unit_type: &DataUnit,
use_binary_prefix: bool,
) -> ConvertedNetworkData {
let (rx, tx) = get_network_points(data, scale_type, unit_type, use_binary_prefix);
let unit = match unit_type {
DataUnit::Byte => "B/s",
DataUnit::Bit => "b/s",
};
let (rx_data, tx_data, total_rx_data, total_tx_data) = match unit_type {
DataUnit::Byte => (
data.network_harvest.rx / 8,
data.network_harvest.tx / 8,
data.network_harvest.total_rx / 8,
data.network_harvest.total_tx / 8,
),
DataUnit::Bit => (
data.network_harvest.rx,
data.network_harvest.tx,
data.network_harvest.total_rx / 8, // We always make this bytes...
data.network_harvest.total_tx / 8,
),
};
let (rx_converted_result, total_rx_converted_result): ((f64, String), (f64, &'static str)) =
if use_binary_prefix {
(
get_binary_prefix(rx_data, unit), /* If this isn't obvious why there's two
* functions, one you can configure the unit,
* the other is always bytes */
get_binary_bytes(total_rx_data),
)
} else {
(
get_decimal_prefix(rx_data, unit),
get_decimal_bytes(total_rx_data),
)
};
let (tx_converted_result, total_tx_converted_result): ((f64, String), (f64, &'static str)) =
if use_binary_prefix {
(
get_binary_prefix(tx_data, unit),
get_binary_bytes(total_tx_data),
)
} else {
(
get_decimal_prefix(tx_data, unit),
get_decimal_bytes(total_tx_data),
)
};
if need_four_points {
let rx_display = format!("{:.1}{}", rx_converted_result.0, rx_converted_result.1);
let total_rx_display = Some(format!(
"{:.1}{}",
total_rx_converted_result.0, total_rx_converted_result.1
));
let tx_display = format!("{:.1}{}", tx_converted_result.0, tx_converted_result.1);
let total_tx_display = Some(format!(
"{:.1}{}",
total_tx_converted_result.0, total_tx_converted_result.1
));
ConvertedNetworkData {
rx,
tx,
rx_display,
tx_display,
total_rx_display,
total_tx_display,
}
} else {
let rx_display = format!(
"RX: {:<10} All: {}",
if use_binary_prefix {
format!("{:.1}{:3}", rx_converted_result.0, rx_converted_result.1)
} else {
format!("{:.1}{:2}", rx_converted_result.0, rx_converted_result.1)
},
if use_binary_prefix {
format!(
"{:.1}{:3}",
total_rx_converted_result.0, total_rx_converted_result.1
)
} else {
format!(
"{:.1}{:2}",
total_rx_converted_result.0, total_rx_converted_result.1
)
}
);
let tx_display = format!(
"TX: {:<10} All: {}",
if use_binary_prefix {
format!("{:.1}{:3}", tx_converted_result.0, tx_converted_result.1)
} else {
format!("{:.1}{:2}", tx_converted_result.0, tx_converted_result.1)
},
if use_binary_prefix {
format!(
"{:.1}{:3}",
total_tx_converted_result.0, total_tx_converted_result.1
)
} else {
format!(
"{:.1}{:2}",
total_tx_converted_result.0, total_tx_converted_result.1
)
}
);
ConvertedNetworkData {
rx,
tx,
rx_display,
tx_display,
total_rx_display: None,
total_tx_display: None,
}
}
}
/// Returns a string given a value that is converted to the closest binary
/// variant. If the value is greater than a gibibyte, then it will return a
/// decimal place.
#[inline]
pub fn binary_byte_string(value: u64) -> String {
let converted_values = get_binary_bytes(value);
if value >= GIBI_LIMIT {
format!("{:.1}{}", converted_values.0, converted_values.1)
} else {
format!("{:.0}{}", converted_values.0, converted_values.1)
}
}
/// Returns a string given a value that is converted to the closest SI-variant,
/// per second. If the value is greater than a giga-X, then it will return a
/// decimal place.
#[inline]
pub fn dec_bytes_per_second_string(value: u64) -> String {
pub(crate) fn dec_bytes_per_second_string(value: u64) -> String {
let converted_values = get_decimal_bytes(value);
if value >= GIGA_LIMIT {
format!("{:.1}{}/s", converted_values.0, converted_values.1)
@ -481,139 +33,10 @@ pub fn dec_bytes_per_second_string(value: u64) -> String {
}
}
/// Returns a string given a value that is converted to the closest SI-variant.
/// If the value is greater than a giga-X, then it will return a decimal place.
pub fn dec_bytes_string(value: u64) -> String {
let converted_values = get_decimal_bytes(value);
if value >= GIGA_LIMIT {
format!("{:.1}{}", converted_values.0, converted_values.1)
} else {
format!("{:.0}{}", converted_values.0, converted_values.1)
}
}
#[cfg(feature = "zfs")]
pub fn convert_arc_data_points(current_data: &DataCollection) -> Vec<Point> {
let mut result: Vec<Point> = Vec::new();
let current_time = current_data.current_instant;
for (time, data) in &current_data.timed_data_vec {
if let Some(arc_data) = data.arc_data {
let time_from_start: f64 =
(current_time.duration_since(*time).as_millis() as f64).floor();
result.push((-time_from_start, arc_data));
if *time == current_time {
break;
}
}
}
result
}
#[cfg(feature = "gpu")]
#[derive(Default, Debug)]
pub struct ConvertedGpuData {
pub name: String,
pub mem_total: String,
pub mem_percent: String,
pub points: Vec<Point>,
}
#[cfg(feature = "gpu")]
pub fn convert_gpu_data(current_data: &DataCollection) -> Option<Vec<ConvertedGpuData>> {
let current_time = current_data.current_instant;
// convert points
let mut point_vec: Vec<Vec<Point>> = Vec::with_capacity(current_data.gpu_harvest.len());
for (time, data) in &current_data.timed_data_vec {
data.gpu_data.iter().enumerate().for_each(|(index, point)| {
if let Some(data_point) = point {
let time_from_start: f64 =
(current_time.duration_since(*time).as_millis() as f64).floor();
if let Some(point_slot) = point_vec.get_mut(index) {
point_slot.push((-time_from_start, *data_point));
} else {
point_vec.push(vec![(-time_from_start, *data_point)]);
}
}
});
if *time == current_time {
break;
}
}
// convert labels
let results = current_data
.gpu_harvest
.iter()
.zip(point_vec)
.filter_map(|(gpu, points)| {
(gpu.1.total_bytes > 0).then(|| {
let short_name = {
let last_words = gpu.0.split_whitespace().rev().take(2).collect::<Vec<_>>();
let short_name = format!("{} {}", last_words[1], last_words[0]);
short_name
};
let percent = gpu.1.used_bytes as f64 / gpu.1.total_bytes as f64 * 100.0;
ConvertedGpuData {
name: short_name,
points,
mem_percent: format!("{percent:3.0}%"),
mem_total: {
let (unit, denominator) =
get_binary_unit_and_denominator(gpu.1.total_bytes);
format!(
" {:.1}{unit}/{:.1}{unit}",
gpu.1.used_bytes as f64 / denominator,
(gpu.1.total_bytes as f64 / denominator),
)
},
}
})
})
.collect::<Vec<ConvertedGpuData>>();
if !results.is_empty() {
Some(results)
} else {
None
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_binary_byte_string() {
assert_eq!(binary_byte_string(0), "0B".to_string());
assert_eq!(binary_byte_string(1), "1B".to_string());
assert_eq!(binary_byte_string(1000), "1000B".to_string());
assert_eq!(binary_byte_string(1023), "1023B".to_string());
assert_eq!(binary_byte_string(KIBI_LIMIT), "1KiB".to_string());
assert_eq!(binary_byte_string(KIBI_LIMIT + 1), "1KiB".to_string());
assert_eq!(binary_byte_string(MEBI_LIMIT), "1MiB".to_string());
assert_eq!(binary_byte_string(GIBI_LIMIT), "1.0GiB".to_string());
assert_eq!(binary_byte_string(2 * GIBI_LIMIT), "2.0GiB".to_string());
assert_eq!(
binary_byte_string((2.5 * GIBI_LIMIT as f64) as u64),
"2.5GiB".to_string()
);
assert_eq!(
binary_byte_string((10.34 * TEBI_LIMIT as f64) as u64),
"10.3TiB".to_string()
);
assert_eq!(
binary_byte_string((10.36 * TEBI_LIMIT as f64) as u64),
"10.4TiB".to_string()
);
}
#[test]
fn test_dec_bytes_per_second_string() {
assert_eq!(dec_bytes_per_second_string(0), "0B/s".to_string());

View File

@ -6,7 +6,7 @@ use crossterm::event::{KeyCode, KeyEvent, KeyModifiers, MouseEvent, MouseEventKi
use crate::{
app::{layout_manager::WidgetDirection, App},
data_collection::Data,
collection::Data,
};
/// Events sent to the main thread.

View File

@ -10,15 +10,14 @@
pub(crate) mod app;
mod utils {
pub(crate) mod cancellation_token;
pub(crate) mod data_prefixes;
pub(crate) mod data_units;
pub(crate) mod general;
pub(crate) mod logging;
pub(crate) mod strings;
}
pub(crate) mod canvas;
pub(crate) mod collection;
pub(crate) mod constants;
pub(crate) mod data_collection;
pub(crate) mod data_conversion;
pub(crate) mod event;
pub mod options;
@ -51,6 +50,7 @@ use event::{handle_key_event_or_break, handle_mouse_event, BottomEvent, Collecti
use options::{args, get_or_create_config, init_app};
use tui::{backend::CrosstermBackend, Terminal};
use utils::cancellation_token::CancellationToken;
#[allow(unused_imports, reason = "this is needed if logging is enabled")]
use utils::logging::*;
@ -218,17 +218,15 @@ fn create_collection_thread(
cancellation_token: Arc<CancellationToken>, app_config_fields: &AppConfigFields,
filters: DataFilters, used_widget_set: UsedWidgets,
) -> JoinHandle<()> {
let temp_type = app_config_fields.temperature_type;
let use_current_cpu_total = app_config_fields.use_current_cpu_total;
let unnormalized_cpu = app_config_fields.unnormalized_cpu;
let show_average_cpu = app_config_fields.show_average_cpu;
let update_time = app_config_fields.update_rate;
thread::spawn(move || {
let mut data_state = data_collection::DataCollector::new(filters);
let mut data_state = collection::DataCollector::new(filters);
data_state.set_data_collection(used_widget_set);
data_state.set_temperature_type(temp_type);
data_state.set_collection(used_widget_set);
data_state.set_use_current_cpu_total(use_current_cpu_total);
data_state.set_unnormalized_cpu(unnormalized_cpu);
data_state.set_show_average_cpu(show_average_cpu);
@ -262,7 +260,7 @@ fn create_collection_thread(
}
let event = BottomEvent::Update(Box::from(data_state.data));
data_state.data = data_collection::Data::default();
data_state.data = collection::Data::default();
if sender.send(event).is_err() {
break;
}
@ -326,9 +324,9 @@ pub fn start_bottom() -> anyhow::Result<()> {
let _cleaning_thread = {
let cancellation_token = cancellation_token.clone();
let cleaning_sender = sender.clone();
let offset_wait_time = app.app_config_fields.retention_ms + 60000;
let offset_wait = Duration::from_millis(app.app_config_fields.retention_ms + 60000);
thread::spawn(move || loop {
if cancellation_token.sleep_with_cancellation(Duration::from_millis(offset_wait_time)) {
if cancellation_token.sleep_with_cancellation(offset_wait) {
break;
}
@ -407,7 +405,7 @@ pub fn start_bottom() -> anyhow::Result<()> {
try_drawing(&mut terminal, &mut app, &mut painter)?;
}
BottomEvent::Update(data) => {
app.data_collection.eat_data(data);
app.data_store.eat_data(data, &app.app_config_fields);
// This thing is required as otherwise, some widgets can't draw correctly w/o
// some data (or they need to be re-drawn).
@ -416,111 +414,40 @@ pub fn start_bottom() -> anyhow::Result<()> {
app.is_force_redraw = true;
}
if !app.frozen_state.is_frozen() {
if !app.data_store.is_frozen() {
// Convert all data into data for the displayed widgets.
if app.used_widgets.use_net {
let network_data = convert_network_points(
&app.data_collection,
app.app_config_fields.use_basic_mode
|| app.app_config_fields.use_old_network_legend,
&app.app_config_fields.network_scale_type,
&app.app_config_fields.network_unit_type,
app.app_config_fields.network_use_binary_prefix,
);
app.converted_data.network_data_rx = network_data.rx;
app.converted_data.network_data_tx = network_data.tx;
app.converted_data.rx_display = network_data.rx_display;
app.converted_data.tx_display = network_data.tx_display;
if let Some(total_rx_display) = network_data.total_rx_display {
app.converted_data.total_rx_display = total_rx_display;
}
if let Some(total_tx_display) = network_data.total_tx_display {
app.converted_data.total_tx_display = total_tx_display;
}
}
if app.used_widgets.use_disk {
app.converted_data.convert_disk_data(&app.data_collection);
for disk in app.states.disk_state.widget_states.values_mut() {
disk.force_data_update();
}
}
if app.used_widgets.use_temp {
app.converted_data.convert_temp_data(
&app.data_collection,
app.app_config_fields.temperature_type,
);
for temp in app.states.temp_state.widget_states.values_mut() {
temp.force_data_update();
}
}
if app.used_widgets.use_mem {
app.converted_data.mem_data =
convert_mem_data_points(&app.data_collection);
#[cfg(not(target_os = "windows"))]
{
app.converted_data.cache_data =
convert_cache_data_points(&app.data_collection);
}
app.converted_data.swap_data =
convert_swap_data_points(&app.data_collection);
#[cfg(feature = "zfs")]
{
app.converted_data.arc_data =
convert_arc_data_points(&app.data_collection);
}
#[cfg(feature = "gpu")]
{
app.converted_data.gpu_data =
convert_gpu_data(&app.data_collection);
}
app.converted_data.mem_labels =
convert_mem_label(&app.data_collection.memory_harvest);
app.converted_data.swap_labels =
convert_mem_label(&app.data_collection.swap_harvest);
#[cfg(not(target_os = "windows"))]
{
app.converted_data.cache_labels =
convert_mem_label(&app.data_collection.cache_harvest);
}
#[cfg(feature = "zfs")]
{
app.converted_data.arc_labels =
convert_mem_label(&app.data_collection.arc_harvest);
}
}
if app.used_widgets.use_cpu {
app.converted_data.convert_cpu_data(&app.data_collection);
app.converted_data.load_avg_data = app.data_collection.load_avg_harvest;
}
if app.used_widgets.use_proc {
for proc in app.states.proc_state.widget_states.values_mut() {
proc.force_data_update();
}
}
if app.used_widgets.use_cpu {
for cpu in app.states.cpu_state.widget_states.values_mut() {
cpu.force_data_update();
}
}
app.update_data();
try_drawing(&mut terminal, &mut app, &mut painter)?;
}
}
BottomEvent::Clean => {
app.data_collection
.clean_data(app.app_config_fields.retention_ms);
app.data_store
.clean_data(Duration::from_millis(app.app_config_fields.retention_ms));
}
}
}

View File

@ -18,6 +18,7 @@ use std::{
use anyhow::{Context, Result};
use config::style::Styles;
pub use config::Config;
use data::TemperatureType;
pub(crate) use error::{OptionError, OptionResult};
use hashbrown::{HashMap, HashSet};
use indexmap::IndexSet;
@ -31,9 +32,8 @@ use self::{
};
use crate::{
app::{filter::Filter, layout_manager::*, *},
canvas::components::time_chart::LegendPosition,
canvas::components::time_graph::LegendPosition,
constants::*,
data_collection::temperature::TemperatureType,
utils::data_units::DataUnit,
widgets::*,
};

View File

@ -43,5 +43,5 @@ pub(crate) struct FlagConfig {
pub(crate) disable_gpu: Option<bool>,
pub(crate) enable_cache_memory: Option<bool>,
pub(crate) retention: Option<StringOrNum>,
pub(crate) average_cpu_row: Option<bool>,
pub(crate) average_cpu_row: Option<bool>, // FIXME: This makes no sense outside of basic mode, add a basic mode config section.
}

View File

@ -1,85 +0,0 @@
pub const KILO_LIMIT: u64 = 1000;
pub const MEGA_LIMIT: u64 = 1_000_000;
pub const GIGA_LIMIT: u64 = 1_000_000_000;
pub const TERA_LIMIT: u64 = 1_000_000_000_000;
pub const KIBI_LIMIT: u64 = 1024;
pub const MEBI_LIMIT: u64 = 1024 * 1024;
pub const GIBI_LIMIT: u64 = 1024 * 1024 * 1024;
pub const TEBI_LIMIT: u64 = 1024 * 1024 * 1024 * 1024;
pub const KILO_LIMIT_F64: f64 = 1000.0;
pub const MEGA_LIMIT_F64: f64 = 1_000_000.0;
pub const GIGA_LIMIT_F64: f64 = 1_000_000_000.0;
pub const TERA_LIMIT_F64: f64 = 1_000_000_000_000.0;
pub const KIBI_LIMIT_F64: f64 = 1024.0;
pub const MEBI_LIMIT_F64: f64 = 1024.0 * 1024.0;
pub const GIBI_LIMIT_F64: f64 = 1024.0 * 1024.0 * 1024.0;
pub const TEBI_LIMIT_F64: f64 = 1024.0 * 1024.0 * 1024.0 * 1024.0;
pub const LOG_KILO_LIMIT: f64 = 3.0;
pub const LOG_MEGA_LIMIT: f64 = 6.0;
pub const LOG_GIGA_LIMIT: f64 = 9.0;
pub const LOG_TERA_LIMIT: f64 = 12.0;
pub const LOG_PETA_LIMIT: f64 = 15.0;
pub const LOG_KIBI_LIMIT: f64 = 10.0;
pub const LOG_MEBI_LIMIT: f64 = 20.0;
pub const LOG_GIBI_LIMIT: f64 = 30.0;
pub const LOG_TEBI_LIMIT: f64 = 40.0;
pub const LOG_PEBI_LIMIT: f64 = 50.0;
/// Returns a tuple containing the value and the unit in bytes. In units of
/// 1024. This only supports up to a tebi. Note the "single" unit will have a
/// space appended to match the others if `spacing` is true.
#[inline]
pub fn get_binary_bytes(bytes: u64) -> (f64, &'static str) {
match bytes {
b if b < KIBI_LIMIT => (bytes as f64, "B"),
b if b < MEBI_LIMIT => (bytes as f64 / KIBI_LIMIT_F64, "KiB"),
b if b < GIBI_LIMIT => (bytes as f64 / MEBI_LIMIT_F64, "MiB"),
b if b < TEBI_LIMIT => (bytes as f64 / GIBI_LIMIT_F64, "GiB"),
_ => (bytes as f64 / TEBI_LIMIT_F64, "TiB"),
}
}
/// Returns a tuple containing the value and the unit in bytes. In units of
/// 1000. This only supports up to a tera. Note the "single" unit will have a
/// space appended to match the others if `spacing` is true.
#[inline]
pub fn get_decimal_bytes(bytes: u64) -> (f64, &'static str) {
match bytes {
b if b < KILO_LIMIT => (bytes as f64, "B"),
b if b < MEGA_LIMIT => (bytes as f64 / KILO_LIMIT_F64, "KB"),
b if b < GIGA_LIMIT => (bytes as f64 / MEGA_LIMIT_F64, "MB"),
b if b < TERA_LIMIT => (bytes as f64 / GIGA_LIMIT_F64, "GB"),
_ => (bytes as f64 / TERA_LIMIT_F64, "TB"),
}
}
/// Returns a tuple containing the value and the unit. In units of 1024.
/// This only supports up to a tebi. Note the "single" unit will have a space
/// appended to match the others if `spacing` is true.
#[inline]
pub fn get_binary_prefix(quantity: u64, unit: &str) -> (f64, String) {
match quantity {
b if b < KIBI_LIMIT => (quantity as f64, unit.to_string()),
b if b < MEBI_LIMIT => (quantity as f64 / KIBI_LIMIT_F64, format!("Ki{unit}")),
b if b < GIBI_LIMIT => (quantity as f64 / MEBI_LIMIT_F64, format!("Mi{unit}")),
b if b < TEBI_LIMIT => (quantity as f64 / GIBI_LIMIT_F64, format!("Gi{unit}")),
_ => (quantity as f64 / TEBI_LIMIT_F64, format!("Ti{unit}")),
}
}
/// Returns a tuple containing the value and the unit. In units of 1000.
/// This only supports up to a tera. Note the "single" unit will have a space
/// appended to match the others if `spacing` is true.
#[inline]
pub fn get_decimal_prefix(quantity: u64, unit: &str) -> (f64, String) {
match quantity {
b if b < KILO_LIMIT => (quantity as f64, unit.to_string()),
b if b < MEGA_LIMIT => (quantity as f64 / KILO_LIMIT_F64, format!("K{unit}")),
b if b < GIGA_LIMIT => (quantity as f64 / MEGA_LIMIT_F64, format!("M{unit}")),
b if b < TERA_LIMIT => (quantity as f64 / GIGA_LIMIT_F64, format!("G{unit}")),
_ => (quantity as f64 / TERA_LIMIT_F64, format!("T{unit}")),
}
}

View File

@ -1,6 +1,98 @@
#[derive(Debug, Clone, Eq, PartialEq, Default)]
#[derive(Debug, Clone, Copy, Eq, PartialEq, Default)]
pub enum DataUnit {
Byte,
#[default]
Bit,
}
pub const KILO_LIMIT: u64 = 1000;
pub const MEGA_LIMIT: u64 = 1_000_000;
pub const GIGA_LIMIT: u64 = 1_000_000_000;
pub const TERA_LIMIT: u64 = 1_000_000_000_000;
pub const KIBI_LIMIT: u64 = 1024;
pub const MEBI_LIMIT: u64 = 1024 * 1024;
pub const GIBI_LIMIT: u64 = 1024 * 1024 * 1024;
pub const TEBI_LIMIT: u64 = 1024 * 1024 * 1024 * 1024;
pub const KILO_LIMIT_F64: f64 = 1000.0;
pub const MEGA_LIMIT_F64: f64 = 1_000_000.0;
pub const GIGA_LIMIT_F64: f64 = 1_000_000_000.0;
pub const TERA_LIMIT_F64: f64 = 1_000_000_000_000.0;
pub const KIBI_LIMIT_F64: f64 = 1024.0;
pub const MEBI_LIMIT_F64: f64 = 1024.0 * 1024.0;
pub const GIBI_LIMIT_F64: f64 = 1024.0 * 1024.0 * 1024.0;
pub const TEBI_LIMIT_F64: f64 = 1024.0 * 1024.0 * 1024.0 * 1024.0;
pub const LOG_MEGA_LIMIT: f64 = 6.0;
pub const LOG_GIGA_LIMIT: f64 = 9.0;
pub const LOG_TERA_LIMIT: f64 = 12.0;
pub const LOG_PETA_LIMIT: f64 = 15.0;
pub const LOG_MEBI_LIMIT: f64 = 20.0;
pub const LOG_GIBI_LIMIT: f64 = 30.0;
pub const LOG_TEBI_LIMIT: f64 = 40.0;
pub const LOG_PEBI_LIMIT: f64 = 50.0;
/// Returns a tuple containing the value and the unit in bytes. In units of
/// 1024. This only supports up to a tebi. Note the "single" unit will have a
/// space appended to match the others if `spacing` is true.
#[inline]
pub fn get_binary_bytes(bytes: u64) -> (f64, &'static str) {
match bytes {
b if b < KIBI_LIMIT => (bytes as f64, "B"),
b if b < MEBI_LIMIT => (bytes as f64 / KIBI_LIMIT_F64, "KiB"),
b if b < GIBI_LIMIT => (bytes as f64 / MEBI_LIMIT_F64, "MiB"),
b if b < TEBI_LIMIT => (bytes as f64 / GIBI_LIMIT_F64, "GiB"),
_ => (bytes as f64 / TEBI_LIMIT_F64, "TiB"),
}
}
/// Returns a tuple containing the value and the unit in bytes. In units of
/// 1000. This only supports up to a tera. Note the "single" unit will have a
/// space appended to match the others if `spacing` is true.
#[inline]
pub fn get_decimal_bytes(bytes: u64) -> (f64, &'static str) {
match bytes {
b if b < KILO_LIMIT => (bytes as f64, "B"),
b if b < MEGA_LIMIT => (bytes as f64 / KILO_LIMIT_F64, "KB"),
b if b < GIGA_LIMIT => (bytes as f64 / MEGA_LIMIT_F64, "MB"),
b if b < TERA_LIMIT => (bytes as f64 / GIGA_LIMIT_F64, "GB"),
_ => (bytes as f64 / TERA_LIMIT_F64, "TB"),
}
}
/// Given a value in _bits_, turn a tuple containing the value and a unit.
#[inline]
pub fn convert_bits(bits: u64, base_two: bool) -> (f64, &'static str) {
let bytes = bits / 8;
if base_two {
get_binary_bytes(bytes)
} else {
get_decimal_bytes(bytes)
}
}
/// Return a tuple containing the value and a unit string to be used as a prefix.
#[inline]
pub fn get_unit_prefix(value: u64, base_two: bool) -> (f64, &'static str) {
let float_value = value as f64;
if base_two {
match value {
b if b < KIBI_LIMIT => (float_value, ""),
b if b < MEBI_LIMIT => (float_value / KIBI_LIMIT_F64, "Ki"),
b if b < GIBI_LIMIT => (float_value / MEBI_LIMIT_F64, "Mi"),
b if b < TEBI_LIMIT => (float_value / GIBI_LIMIT_F64, "Gi"),
_ => (float_value / TEBI_LIMIT_F64, "Ti"),
}
} else {
match value {
b if b < KILO_LIMIT => (float_value, ""),
b if b < MEGA_LIMIT => (float_value / KILO_LIMIT_F64, "K"),
b if b < GIGA_LIMIT => (float_value / MEGA_LIMIT_F64, "M"),
b if b < TERA_LIMIT => (float_value / GIGA_LIMIT_F64, "G"),
_ => (float_value / TERA_LIMIT_F64, "T"),
}
}
}

View File

@ -62,6 +62,24 @@ macro_rules! clamp_num_impl {
clamp_num_impl!(u8, u16, u32, u64, usize);
/// Checked log2.
pub fn saturating_log2(value: f64) -> f64 {
if value > 0.0 {
value.log2()
} else {
0.0
}
}
/// Checked log10.
pub fn saturating_log10(value: f64) -> f64 {
if value > 0.0 {
value.log10()
} else {
0.0
}
}
#[cfg(test)]
mod test {
use super::*;

View File

@ -2,7 +2,7 @@ pub mod battery_info;
pub mod cpu_graph;
pub mod disk_table;
pub mod mem_graph;
pub mod net_graph;
pub mod network_graph;
pub mod process_table;
pub mod temperature_table;
@ -10,6 +10,6 @@ pub use battery_info::*;
pub use cpu_graph::*;
pub use disk_table::*;
pub use mem_graph::*;
pub use net_graph::*;
pub use network_graph::*;
pub use process_table::*;
pub use temperature_table::*;

View File

@ -12,8 +12,7 @@ use crate::{
},
Painter,
},
data_collection::cpu::CpuDataType,
data_conversion::CpuWidgetData,
collection::cpu::{CpuData, CpuDataType},
options::config::{cpu::CpuDefault, style::Styles},
};
@ -33,24 +32,14 @@ impl ColumnHeader for CpuWidgetColumn {
pub enum CpuWidgetTableData {
All,
Entry {
data_type: CpuDataType,
last_entry: f64,
},
Entry { data_type: CpuDataType, usage: f64 },
}
impl CpuWidgetTableData {
pub fn from_cpu_widget_data(data: &CpuWidgetData) -> CpuWidgetTableData {
match data {
CpuWidgetData::All => CpuWidgetTableData::All,
CpuWidgetData::Entry {
data_type,
data: _,
last_entry,
} => CpuWidgetTableData::Entry {
data_type: *data_type,
last_entry: *last_entry,
},
pub fn from_cpu_data(data: &CpuData) -> CpuWidgetTableData {
CpuWidgetTableData::Entry {
data_type: data.data_type,
usage: data.cpu_usage,
}
}
}
@ -77,7 +66,7 @@ impl DataToCell<CpuWidgetColumn> for CpuWidgetTableData {
},
CpuWidgetTableData::Entry {
data_type,
last_entry,
usage: last_entry,
} => {
if calculated_width == 0 {
None
@ -109,7 +98,7 @@ impl DataToCell<CpuWidgetColumn> for CpuWidgetTableData {
CpuWidgetTableData::All => painter.styles.all_cpu_colour,
CpuWidgetTableData::Entry {
data_type,
last_entry: _,
usage: _,
} => match data_type {
CpuDataType::Avg => painter.styles.avg_cpu_colour,
CpuDataType::Cpu(index) => {
@ -136,6 +125,7 @@ pub struct CpuWidgetState {
pub is_legend_hidden: bool,
pub autohide_timer: Option<Instant>,
pub table: DataTable<CpuWidgetTableData, CpuWidgetColumn>,
pub force_update_data: bool,
}
impl CpuWidgetState {
@ -172,14 +162,22 @@ impl CpuWidgetState {
is_legend_hidden: false,
autohide_timer,
table,
force_update_data: false,
}
}
pub fn update_table(&mut self, data: &[CpuWidgetData]) {
/// Forces an update of the data stored.
#[inline]
pub fn force_data_update(&mut self) {
self.force_update_data = true;
}
pub fn set_legend_data(&mut self, data: &[CpuData]) {
self.table.set_data(
data.iter()
.map(CpuWidgetTableData::from_cpu_widget_data)
std::iter::once(CpuWidgetTableData::All)
.chain(data.iter().map(CpuWidgetTableData::from_cpu_data))
.collect(),
);
self.force_update_data = false;
}
}

View File

@ -3,25 +3,25 @@ use std::{borrow::Cow, cmp::max, num::NonZeroU16};
use serde::Deserialize;
use crate::{
app::AppConfigFields,
app::{data::StoredData, AppConfigFields},
canvas::components::data_table::{
ColumnHeader, DataTableColumn, DataTableProps, DataTableStyling, DataToCell, SortColumn,
SortDataTable, SortDataTableProps, SortOrder, SortsRow,
},
options::config::style::Styles,
utils::{data_prefixes::get_decimal_bytes, general::sort_partial_fn},
utils::{data_units::get_decimal_bytes, general::sort_partial_fn},
};
#[derive(Clone, Debug)]
pub struct DiskWidgetData {
pub name: Cow<'static, str>,
pub mount_point: Cow<'static, str>,
pub name: String,
pub mount_point: String,
pub free_bytes: Option<u64>,
pub used_bytes: Option<u64>,
pub total_bytes: Option<u64>,
pub summed_total_bytes: Option<u64>,
pub io_read: Cow<'static, str>,
pub io_write: Cow<'static, str>,
pub io_read: String,
pub io_write: String,
}
impl DiskWidgetData {
@ -158,6 +158,7 @@ impl ColumnHeader for DiskColumn {
}
impl DataToCell<DiskColumn> for DiskWidgetData {
// FIXME: (points_rework_v1) Can we change the return type to 'a instead of 'static?
fn to_cell(
&self, column: &DiskColumn, _calculated_width: NonZeroU16,
) -> Option<Cow<'static, str>> {
@ -169,15 +170,15 @@ impl DataToCell<DiskColumn> for DiskWidgetData {
}
let text = match column {
DiskColumn::Disk => self.name.clone(),
DiskColumn::Mount => self.mount_point.clone(),
DiskColumn::Disk => self.name.clone().into(),
DiskColumn::Mount => self.mount_point.clone().into(),
DiskColumn::Used => self.used_space(),
DiskColumn::Free => self.free_space(),
DiskColumn::UsedPercent => percent_string(self.used_percent()),
DiskColumn::FreePercent => percent_string(self.free_percent()),
DiskColumn::Total => self.total_space(),
DiskColumn::IoRead => self.io_read.clone(),
DiskColumn::IoWrite => self.io_write.clone(),
DiskColumn::IoRead => self.io_read.clone().into(),
DiskColumn::IoWrite => self.io_write.clone().into(),
};
Some(text)
@ -313,12 +314,14 @@ impl DiskTableWidget {
}
/// Update the current table data.
pub fn set_table_data(&mut self, data: &[DiskWidgetData]) {
let mut data = data.to_vec();
pub fn set_table_data(&mut self, data: &StoredData) {
let mut data = data.disk_harvest.clone();
if let Some(column) = self.table.columns.get(self.table.sort_index()) {
column.sort_by(&mut data, self.table.order());
}
self.table.set_data(data);
self.force_update_data = false;
}
pub fn set_index(&mut self, index: usize) {

View File

@ -15,14 +15,14 @@ use sort_table::SortTableColumn;
use crate::{
app::{
data_farmer::{DataCollection, ProcessData},
data::{ProcessData, StoredData},
AppConfigFields, AppSearchState,
},
canvas::components::data_table::{
Column, ColumnHeader, ColumnWidthBounds, DataTable, DataTableColumn, DataTableProps,
DataTableStyling, SortColumn, SortDataTable, SortDataTableProps, SortOrder, SortsRow,
},
data_collection::processes::{Pid, ProcessHarvest},
collection::processes::{Pid, ProcessHarvest},
options::config::style::Styles,
};
@ -395,20 +395,21 @@ impl ProcWidgetState {
/// This function *only* updates the displayed process data. If there is a
/// need to update the actual *stored* data, call it before this
/// function.
pub fn set_table_data(&mut self, data_collection: &DataCollection) {
pub fn set_table_data(&mut self, stored_data: &StoredData) {
let data = match &self.mode {
ProcWidgetMode::Grouped | ProcWidgetMode::Normal => {
self.get_normal_data(&data_collection.process_data.process_harvest)
self.get_normal_data(&stored_data.process_data.process_harvest)
}
ProcWidgetMode::Tree { collapsed_pids } => {
self.get_tree_data(collapsed_pids, data_collection)
self.get_tree_data(collapsed_pids, stored_data)
}
};
self.table.set_data(data);
self.force_update_data = false;
}
fn get_tree_data(
&self, collapsed_pids: &HashSet<Pid>, data_collection: &DataCollection,
&self, collapsed_pids: &HashSet<Pid>, stored_data: &StoredData,
) -> Vec<ProcWidgetData> {
const BRANCH_END: char = '└';
const BRANCH_SPLIT: char = '├';
@ -424,10 +425,10 @@ impl ProcWidgetState {
process_parent_mapping,
orphan_pids,
..
} = &data_collection.process_data;
} = &stored_data.process_data;
// Only keep a set of the kept PIDs.
let kept_pids = data_collection
let kept_pids = stored_data
.process_data
.process_harvest
.iter()

View File

@ -15,8 +15,9 @@ use crate::{
components::data_table::{DataTableColumn, DataToCell},
Painter,
},
data_collection::processes::{Pid, ProcessHarvest},
data_conversion::{binary_byte_string, dec_bytes_per_second_string, dec_bytes_string},
collection::processes::{Pid, ProcessHarvest},
data_conversion::dec_bytes_per_second_string,
utils::data_units::{get_binary_bytes, get_decimal_bytes, GIBI_LIMIT, GIGA_LIMIT},
};
#[derive(Clone, Debug)]
@ -166,6 +167,30 @@ fn format_time(dur: Duration) -> String {
}
}
/// Returns a string given a value that is converted to the closest binary
/// variant. If the value is greater than a gibibyte, then it will return a
/// decimal place.
#[inline]
fn binary_byte_string(value: u64) -> String {
let converted_values = get_binary_bytes(value);
if value >= GIBI_LIMIT {
format!("{:.1}{}", converted_values.0, converted_values.1)
} else {
format!("{:.0}{}", converted_values.0, converted_values.1)
}
}
/// Returns a string given a value that is converted to the closest SI-variant.
/// If the value is greater than a giga-X, then it will return a decimal place.
fn dec_bytes_string(value: u64) -> String {
let converted_values = get_decimal_bytes(value);
if value >= GIGA_LIMIT {
format!("{:.1}{}", converted_values.0, converted_values.1)
} else {
format!("{:.0}{}", converted_values.0, converted_values.1)
}
}
#[derive(Clone)]
pub struct ProcWidgetData {
pub pid: Pid,
@ -366,7 +391,9 @@ impl DataToCell<ProcColumn> for ProcWidgetData {
mod test {
use std::time::Duration;
use crate::widgets::process_data::format_time;
use crate::utils::data_units::*;
use super::*;
#[test]
fn test_format_time() {
@ -398,4 +425,29 @@ mod test {
"364d 23h 59m"
);
}
#[test]
fn test_binary_byte_string() {
assert_eq!(binary_byte_string(0), "0B".to_string());
assert_eq!(binary_byte_string(1), "1B".to_string());
assert_eq!(binary_byte_string(1000), "1000B".to_string());
assert_eq!(binary_byte_string(1023), "1023B".to_string());
assert_eq!(binary_byte_string(KIBI_LIMIT), "1KiB".to_string());
assert_eq!(binary_byte_string(KIBI_LIMIT + 1), "1KiB".to_string());
assert_eq!(binary_byte_string(MEBI_LIMIT), "1MiB".to_string());
assert_eq!(binary_byte_string(GIBI_LIMIT), "1.0GiB".to_string());
assert_eq!(binary_byte_string(2 * GIBI_LIMIT), "2.0GiB".to_string());
assert_eq!(
binary_byte_string((2.5 * GIBI_LIMIT as f64) as u64),
"2.5GiB".to_string()
);
assert_eq!(
binary_byte_string((10.34 * TEBI_LIMIT as f64) as u64),
"10.3TiB".to_string()
);
assert_eq!(
binary_byte_string((10.36 * TEBI_LIMIT as f64) as u64),
"10.4TiB".to_string()
);
}
}

View File

@ -9,7 +9,7 @@ use humantime::parse_duration;
use regex::Regex;
use crate::{
data_collection::processes::ProcessHarvest, multi_eq_ignore_ascii_case, utils::data_prefixes::*,
collection::processes::ProcessHarvest, multi_eq_ignore_ascii_case, utils::data_units::*,
};
#[derive(Debug)]

View File

@ -1,23 +1,19 @@
use std::{borrow::Cow, cmp::max, num::NonZeroU16};
use concat_string::concat_string;
use crate::{
app::AppConfigFields,
app::{data::TypedTemperature, AppConfigFields},
canvas::components::data_table::{
ColumnHeader, DataTableColumn, DataTableProps, DataTableStyling, DataToCell, SortColumn,
SortDataTable, SortDataTableProps, SortOrder, SortsRow,
},
data_collection::temperature::TemperatureType,
options::config::style::Styles,
utils::general::sort_partial_fn,
};
#[derive(Clone, Debug)]
pub struct TempWidgetData {
pub sensor: Cow<'static, str>,
pub temperature_value: Option<u64>,
pub temperature_type: TemperatureType,
pub sensor: String,
pub temperature: Option<TypedTemperature>,
}
pub enum TempWidgetColumn {
@ -36,16 +32,9 @@ impl ColumnHeader for TempWidgetColumn {
impl TempWidgetData {
pub fn temperature(&self) -> Cow<'static, str> {
match self.temperature_value {
Some(temp_val) => {
let temp_type = match self.temperature_type {
TemperatureType::Celsius => "°C",
TemperatureType::Kelvin => "K",
TemperatureType::Fahrenheit => "°F",
};
concat_string!(temp_val.to_string(), temp_type).into()
}
None => "N/A".to_string().into(),
match &self.temperature {
Some(temp) => temp.to_string().into(),
None => "N/A".into(),
}
}
}
@ -55,7 +44,7 @@ impl DataToCell<TempWidgetColumn> for TempWidgetData {
&self, column: &TempWidgetColumn, _calculated_width: NonZeroU16,
) -> Option<Cow<'static, str>> {
Some(match column {
TempWidgetColumn::Sensor => self.sensor.clone(),
TempWidgetColumn::Sensor => self.sensor.clone().into(),
TempWidgetColumn::Temp => self.temperature(),
})
}
@ -86,9 +75,7 @@ impl SortsRow for TempWidgetColumn {
data.sort_by(move |a, b| sort_partial_fn(descending)(&a.sensor, &b.sensor));
}
TempWidgetColumn::Temp => {
data.sort_by(|a, b| {
sort_partial_fn(descending)(a.temperature_value, b.temperature_value)
});
data.sort_by(|a, b| sort_partial_fn(descending)(&a.temperature, &b.temperature));
}
}
}
@ -140,5 +127,6 @@ impl TempWidgetState {
column.sort_by(&mut data, self.table.order());
}
self.table.set_data(data);
self.force_update_data = false;
}
}