refactor: mostly add back tree mode for process

Mouse control on collapse is not working yet, need to do some work
internally first.
This commit is contained in:
ClementTsang 2021-11-20 22:45:49 -05:00
parent 5833cb8ad1
commit cc66f1fcae
No known key found for this signature in database
GPG Key ID: C7D5235A136CD46F
26 changed files with 790 additions and 1040 deletions

48
Cargo.lock generated
View File

@ -239,7 +239,7 @@ dependencies = [
"cargo-husky",
"cfg-if",
"clap",
"crossterm",
"crossterm 0.22.1",
"ctrlc",
"dirs",
"enum_dispatch",
@ -247,7 +247,6 @@ dependencies = [
"float-ord",
"futures",
"futures-timer",
"fxhash",
"heim",
"indexmap",
"indextree",
@ -258,6 +257,7 @@ dependencies = [
"predicates 1.0.8",
"procfs",
"regex",
"rustc-hash",
"serde",
"smol",
"sysinfo",
@ -434,7 +434,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c0ebde6a9dd5e331cd6c6f48253254d117642c31653baa475e394657c59c1f7d"
dependencies = [
"bitflags",
"crossterm_winapi",
"crossterm_winapi 0.8.0",
"libc",
"mio",
"parking_lot",
"signal-hook",
"signal-hook-mio",
"winapi",
]
[[package]]
name = "crossterm"
version = "0.22.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c85525306c4291d1b73ce93c8acf9c339f9b213aef6c1d85c3830cbf1c16325c"
dependencies = [
"bitflags",
"crossterm_winapi 0.9.0",
"libc",
"mio",
"parking_lot",
@ -452,6 +468,15 @@ dependencies = [
"winapi",
]
[[package]]
name = "crossterm_winapi"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2ae1b35a484aa10e07fe0638d02301c5ad24de82d310ccbd2f3693da5f09bf1c"
dependencies = [
"winapi",
]
[[package]]
name = "ctrlc"
version = "3.2.1"
@ -684,15 +709,6 @@ dependencies = [
"slab",
]
[[package]]
name = "fxhash"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c"
dependencies = [
"byteorder",
]
[[package]]
name = "getrandom"
version = "0.2.3"
@ -1306,6 +1322,12 @@ version = "0.1.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342"
[[package]]
name = "rustc-hash"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
[[package]]
name = "scopeguard"
version = "1.1.0"
@ -1521,7 +1543,7 @@ checksum = "39c8ce4e27049eed97cfa363a5048b09d995e209994634a0efc26a14ab6c0c23"
dependencies = [
"bitflags",
"cassowary",
"crossterm",
"crossterm 0.20.0",
"unicode-segmentation",
"unicode-width",
]

View File

@ -25,8 +25,6 @@ doc = false
[profile.release]
debug = 0
lto = true
# debug = true
# lto = false
opt-level = 3
codegen-units = 1
@ -36,7 +34,7 @@ default = ["fern", "log", "battery"]
[dependencies]
anyhow = "1.0.40"
backtrace = "0.3.59"
crossterm = "0.20.0"
crossterm = "0.22.1"
ctrlc = { version = "3.1.9", features = ["termination"] }
clap = "2.33"
cfg-if = "1.0"
@ -45,12 +43,12 @@ enum_dispatch = "0.3.7"
float-ord = "0.3.2"
futures = "0.3.14"
futures-timer = "3.0.2"
fxhash = "0.2.1"
indexmap = "1.6.2"
indextree = "4.3.1"
itertools = "0.10.0"
once_cell = "1.5.2"
regex = "1.5.4"
rustc-hash = "1.1.0"
serde = { version = "1.0.125", features = ["derive"] }
# Sysinfo is still used in Linux for the ProcessStatus
sysinfo = "0.18.2"

View File

@ -17,8 +17,6 @@ The following flags can be provided to bottom in the command line to change the
| `-C, --config <CONFIG PATH>` | Sets the location of the config file. |
| `-u, --current_usage` | Sets process CPU% to be based on current CPU%. |
| `-t, --default_time_value <MS>` | Default time value for graphs in ms. |
| `--default_widget_count <INT>` | Sets the n'th selected widget type as the default. |
| `--default_widget_type <WIDGET TYPE>` | Sets the default widget type, use --help for more info. |
| `--disable_advanced_kill` | Hides advanced options to stop a process on Unix-like systems. |
| `--disable_click` | Disables mouse clicks. |
| `-m, --dot_marker` | Uses a dot marker for graphs. |

View File

@ -38,9 +38,6 @@
#time_delta = 15000
# Hides the time scale.
#hide_time = false
# Override layout default widget
#default_widget_type = "proc"
#default_widget_count = 1
# Use basic mode
#basic = false
# Use the old network legend style

View File

@ -11,5 +11,3 @@ group_processes = false
case_sensitive = false
whole_word = false
regex = true
default_widget_type = "cpu"
default_widget_count = 1

View File

@ -10,8 +10,8 @@ pub mod widgets;
use std::time::Instant;
use crossterm::event::{KeyCode, KeyEvent, KeyModifiers, MouseEvent};
use fxhash::FxHashMap;
use indextree::{Arena, NodeId};
use rustc_hash::FxHashMap;
pub use data_farmer::*;
use data_harvester::temperature;
@ -474,7 +474,7 @@ impl AppState {
}
#[cfg(target_family = "unix")]
pub fn on_number(&mut self, number_char: char) {
fn on_number(&mut self, number_char: char) {
if self.delete_dialog_state.is_showing_dd {
if self
.delete_dialog_state
@ -507,7 +507,7 @@ impl AppState {
}
}
pub fn on_left_key(&mut self) {
fn on_left_key(&mut self) {
// if !self.is_in_dialog() {
// match self.current_widget.widget_type {
// BottomWidgetType::ProcSearch => {
@ -566,7 +566,7 @@ impl AppState {
// }
}
pub fn on_right_key(&mut self) {
fn on_right_key(&mut self) {
// if !self.is_in_dialog() {
// match self.current_widget.widget_type {
// BottomWidgetType::ProcSearch => {
@ -626,7 +626,7 @@ impl AppState {
// }
}
pub fn start_killing_process(&mut self) {
fn start_killing_process(&mut self) {
todo!()
// if let Some(proc_widget_state) = self
@ -666,7 +666,7 @@ impl AppState {
// }
}
pub fn kill_highlighted_process(&mut self) -> Result<()> {
fn kill_highlighted_process(&mut self) -> Result<()> {
// if let BottomWidgetType::Proc = self.current_widget.widget_type {
// if let Some(current_selected_processes) = &self.to_delete_process_list {
// #[cfg(target_family = "unix")]

View File

@ -1,3 +1,4 @@
use itertools::Itertools;
/// In charge of cleaning, processing, and managing data. I couldn't think of
/// a better name for the file. Since I called data collection "harvesting",
/// then this is the farmer I guess.
@ -13,6 +14,7 @@
/// memory usage and higher CPU usage - you will be trying to process more and
/// more points as this is used!
use once_cell::sync::Lazy;
use rustc_hash::FxHashMap;
use std::{collections::HashMap, time::Instant, vec::Vec};
@ -26,6 +28,8 @@ use crate::{
};
use regex::Regex;
use super::data_harvester::processes::ProcessHarvest;
#[derive(Clone, Debug, Default)]
pub struct TimedData {
pub rx_data: f64,
@ -36,6 +40,90 @@ pub struct TimedData {
pub swap_data: Option<f64>,
}
#[derive(Clone, Debug, Default)]
pub struct ProcessData {
/// A PID to process data map.
pub process_harvest: FxHashMap<Pid, ProcessHarvest>,
/// A mapping from a process name to any PID with that name.
pub process_name_pid_map: HashMap<String, Vec<Pid>>,
/// A mapping from a process command to any PID with that name.
pub process_cmd_pid_map: HashMap<String, Vec<Pid>>,
/// A mapping between a process PID to any children process PIDs.
pub process_parent_mapping: FxHashMap<Pid, Vec<Pid>>,
/// PIDs corresponding to processes that have no parents.
pub orphan_pids: Vec<Pid>,
}
impl ProcessData {
fn ingest(&mut self, list_of_processes: Vec<ProcessHarvest>) {
// TODO: [Optimization] Probably more efficient to all of this in the data collection step, but it's fine for now.
self.process_name_pid_map.clear();
self.process_cmd_pid_map.clear();
self.process_parent_mapping.clear();
// Reverse as otherwise the pid mappings are in the wrong order.
list_of_processes.iter().rev().for_each(|process_harvest| {
if let Some(entry) = self.process_name_pid_map.get_mut(&process_harvest.name) {
entry.push(process_harvest.pid);
} else {
self.process_name_pid_map
.insert(process_harvest.name.to_string(), vec![process_harvest.pid]);
}
if let Some(entry) = self.process_cmd_pid_map.get_mut(&process_harvest.command) {
entry.push(process_harvest.pid);
} else {
self.process_cmd_pid_map.insert(
process_harvest.command.to_string(),
vec![process_harvest.pid],
);
}
if let Some(parent_pid) = process_harvest.parent_pid {
if let Some(entry) = self.process_parent_mapping.get_mut(&parent_pid) {
entry.push(process_harvest.pid);
} else {
self.process_parent_mapping
.insert(parent_pid, vec![process_harvest.pid]);
}
}
});
self.process_name_pid_map.shrink_to_fit();
self.process_cmd_pid_map.shrink_to_fit();
self.process_parent_mapping.shrink_to_fit();
let process_pid_map = list_of_processes
.into_iter()
.map(|process| (process.pid, process))
.collect();
self.process_harvest = process_pid_map;
// This also needs a quick sort + reverse to be in the correct order.
self.orphan_pids = self
.process_harvest
.iter()
.filter_map(|(pid, process_harvest)| {
if let Some(parent_pid) = process_harvest.parent_pid {
if self.process_harvest.contains_key(&parent_pid) {
None
} else {
Some(*pid)
}
} else {
Some(*pid)
}
})
.sorted()
.rev()
.collect();
}
}
/// AppCollection represents the pooled data stored within the main app
/// thread. Basically stores a (occasionally cleaned) record of the data
/// collected, and what is needed to convert into a displayable form.
@ -48,16 +136,14 @@ pub struct TimedData {
/// not the data collector.
#[derive(Clone, Debug)]
pub struct DataCollection {
pub current_instant: Instant,
pub current_instant: Instant, // TODO: [Refactor] Can I get rid of this? If I could, then I could just use #[derive(Default)] too!
pub timed_data_vec: Vec<(Instant, TimedData)>,
pub network_harvest: network::NetworkHarvest,
pub memory_harvest: memory::MemHarvest,
pub swap_harvest: memory::MemHarvest,
pub cpu_harvest: cpu::CpuHarvest,
pub load_avg_harvest: cpu::LoadAvgHarvest,
pub process_harvest: Vec<processes::ProcessHarvest>,
pub process_name_pid_map: HashMap<String, Vec<Pid>>,
pub process_cmd_pid_map: HashMap<String, Vec<Pid>>,
pub process_data: ProcessData,
pub disk_harvest: Vec<disks::DiskHarvest>,
pub io_harvest: disks::IoHarvest,
pub io_labels_and_prev: Vec<((u64, u64), (u64, u64))>,
@ -71,22 +157,20 @@ impl Default for DataCollection {
fn default() -> Self {
DataCollection {
current_instant: Instant::now(),
timed_data_vec: Vec::default(),
network_harvest: network::NetworkHarvest::default(),
memory_harvest: memory::MemHarvest::default(),
swap_harvest: memory::MemHarvest::default(),
cpu_harvest: cpu::CpuHarvest::default(),
load_avg_harvest: cpu::LoadAvgHarvest::default(),
process_harvest: Vec::default(),
process_name_pid_map: HashMap::default(),
process_cmd_pid_map: HashMap::default(),
disk_harvest: Vec::default(),
io_harvest: disks::IoHarvest::default(),
io_labels_and_prev: Vec::default(),
io_labels: Vec::default(),
temp_harvest: Vec::default(),
timed_data_vec: Default::default(),
network_harvest: Default::default(),
memory_harvest: Default::default(),
swap_harvest: Default::default(),
cpu_harvest: Default::default(),
load_avg_harvest: Default::default(),
process_data: Default::default(),
disk_harvest: Default::default(),
io_harvest: Default::default(),
io_labels_and_prev: Default::default(),
io_labels: Default::default(),
temp_harvest: Default::default(),
#[cfg(feature = "battery")]
battery_harvest: Vec::default(),
battery_harvest: Default::default(),
}
}
}
@ -98,9 +182,7 @@ impl DataCollection {
self.memory_harvest = Default::default();
self.swap_harvest = Default::default();
self.cpu_harvest = Default::default();
self.process_harvest = Default::default();
self.process_name_pid_map = Default::default();
self.process_cmd_pid_map = Default::default();
self.process_data = Default::default();
self.disk_harvest = Default::default();
self.io_harvest = Default::default();
self.io_labels_and_prev = Default::default();
@ -132,8 +214,6 @@ impl DataCollection {
pub fn eat_data(&mut self, harvested_data: Box<Data>) {
let harvested_time = harvested_data.last_collection_time;
// trace!("Harvested time: {:?}", harvested_time);
// trace!("New current instant: {:?}", self.current_instant);
let mut new_entry = TimedData::default();
// Network
@ -316,28 +396,7 @@ impl DataCollection {
}
fn eat_proc(&mut self, list_of_processes: Vec<processes::ProcessHarvest>) {
// TODO: [Optimization] Probably more efficient to do this in the data collection step, but it's fine for now.
self.process_name_pid_map.clear();
self.process_cmd_pid_map.clear();
list_of_processes.iter().for_each(|process_harvest| {
if let Some(entry) = self.process_name_pid_map.get_mut(&process_harvest.name) {
entry.push(process_harvest.pid);
} else {
self.process_name_pid_map
.insert(process_harvest.name.to_string(), vec![process_harvest.pid]);
}
if let Some(entry) = self.process_cmd_pid_map.get_mut(&process_harvest.command) {
entry.push(process_harvest.pid);
} else {
self.process_cmd_pid_map.insert(
process_harvest.command.to_string(),
vec![process_harvest.pid],
);
}
});
self.process_harvest = list_of_processes;
self.process_data.ingest(list_of_processes);
}
#[cfg(feature = "battery")]

View File

@ -3,7 +3,7 @@
use std::time::Instant;
#[cfg(target_os = "linux")]
use fxhash::FxHashMap;
use rustc_hash::FxHashMap;
#[cfg(not(target_os = "linux"))]
use sysinfo::{System, SystemExt};

View File

@ -45,41 +45,10 @@ pub enum ProcessSorting {
Count,
}
impl std::fmt::Display for ProcessSorting {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
match &self {
ProcessSorting::CpuPercent => "CPU%",
ProcessSorting::MemPercent => "Mem%",
ProcessSorting::Mem => "Mem",
ProcessSorting::ReadPerSecond => "R/s",
ProcessSorting::WritePerSecond => "W/s",
ProcessSorting::TotalRead => "T.Read",
ProcessSorting::TotalWrite => "T.Write",
ProcessSorting::State => "State",
ProcessSorting::ProcessName => "Name",
ProcessSorting::Command => "Command",
ProcessSorting::Pid => "PID",
ProcessSorting::Count => "Count",
ProcessSorting::User => "User",
}
)
}
}
impl Default for ProcessSorting {
fn default() -> Self {
ProcessSorting::CpuPercent
}
}
#[derive(Debug, Clone, Default)]
pub struct ProcessHarvest {
pub pid: Pid,
pub parent_pid: Option<Pid>,
pub children_pids: Vec<Pid>,
pub cpu_usage_percent: f64,
pub mem_usage_percent: f64,
pub mem_usage_bytes: u64,
@ -102,3 +71,15 @@ pub struct ProcessHarvest {
#[cfg(target_family = "unix")]
pub user: Cow<'static, str>,
}
impl ProcessHarvest {
pub(crate) fn add(&mut self, rhs: &ProcessHarvest) {
self.cpu_usage_percent += rhs.cpu_usage_percent;
self.mem_usage_bytes += rhs.mem_usage_bytes;
self.mem_usage_percent += rhs.mem_usage_percent;
self.read_bytes_per_sec += rhs.read_bytes_per_sec;
self.write_bytes_per_sec += rhs.write_bytes_per_sec;
self.total_read_bytes += rhs.total_read_bytes;
self.total_write_bytes += rhs.total_write_bytes;
}
}

View File

@ -11,7 +11,7 @@ use sysinfo::ProcessStatus;
use procfs::process::{Process, Stat};
use fxhash::{FxHashMap, FxHashSet};
use rustc_hash::{FxHashMap, FxHashSet};
/// Maximum character length of a /proc/<PID>/stat process name.
/// If it's equal or greater, then we instead refer to the command for the name.
@ -203,7 +203,6 @@ fn read_proc(
ProcessHarvest {
pid: process.pid,
parent_pid,
children_pids: vec![],
cpu_usage_percent,
mem_usage_percent,
mem_usage_bytes,

View File

@ -89,7 +89,6 @@ pub fn get_process_data(
process_vector.push(ProcessHarvest {
pid: process_val.pid(),
parent_pid: process_val.parent(),
children_pids: vec![],
name,
command,
mem_usage_percent: if mem_total_kb > 0 {

View File

@ -58,7 +58,6 @@ pub fn get_process_data(
process_vector.push(ProcessHarvest {
pid: process_val.pid(),
parent_pid: process_val.parent(),
children_pids: vec![],
name,
command,
mem_usage_percent: if mem_total_kb > 0 {

View File

@ -9,8 +9,8 @@ use crate::{
ProcessDefaults,
},
};
use fxhash::FxHashMap;
use indextree::{Arena, NodeId};
use rustc_hash::FxHashMap;
use std::cmp::min;
use tui::layout::Rect;

View File

@ -288,11 +288,16 @@ where
self.table.current_scroll_index()
}
/// Returns the current column the table is sorting by.
/// Returns a reference to the current column the table is sorting by.
pub fn current_sorting_column(&self) -> &S {
&self.table.columns[self.sort_index]
}
/// Returns a mutable reference to the current column the table is sorting by.
pub fn current_mut_sorting_column(&mut self) -> &mut S {
&mut self.table.columns[self.sort_index]
}
/// Returns the current column index the table is sorting by.
pub fn current_sorting_column_index(&self) -> usize {
self.sort_index

View File

@ -39,8 +39,9 @@ pub trait TableColumn {
fn set_x_bounds(&mut self, x_bounds: Option<(u16, u16)>);
}
pub type TextTableData = Vec<Vec<(Cow<'static, str>, Option<Cow<'static, str>>, Option<Style>)>>;
pub type TextTableDataRef = [Vec<(Cow<'static, str>, Option<Cow<'static, str>>, Option<Style>)>];
pub(crate) type TextTableRow = Vec<(Cow<'static, str>, Option<Cow<'static, str>>, Option<Style>)>;
pub(crate) type TextTableData = Vec<TextTableRow>;
pub(crate) type TextTableDataRef = [TextTableRow];
/// A [`SimpleColumn`] represents some column in a [`TextTable`].
#[derive(Debug)]
@ -468,14 +469,13 @@ where
.style(painter.colours.table_header_style)
.bottom_margin(table_gap);
let table = Table::new(rows)
let mut table = Table::new(rows)
.header(header)
.style(painter.colours.text_style)
.highlight_style(if show_selected_entry {
painter.colours.currently_selected_text_style
} else {
painter.colours.text_style
});
.style(painter.colours.text_style);
if show_selected_entry {
table = table.highlight_style(painter.colours.currently_selected_text_style);
}
if self.selectable {
f.render_stateful_widget(

View File

@ -1,13 +1,15 @@
use std::{borrow::Cow, collections::HashMap};
use std::{borrow::Cow, cell::RefCell, collections::HashMap};
use crossterm::event::{KeyCode, KeyEvent, KeyModifiers, MouseButton, MouseEvent, MouseEventKind};
use float_ord::FloatOrd;
use itertools::{Either, Itertools};
use once_cell::unsync::Lazy;
use rustc_hash::{FxHashMap, FxHashSet};
use tui::{
backend::Backend,
layout::{Constraint, Direction, Layout, Rect},
style::Style,
text::{Span, Spans},
widgets::{Borders, Paragraph},
Frame,
@ -18,14 +20,15 @@ use crate::{
data_harvester::processes::ProcessHarvest,
event::{ComponentEventResult, MultiKey, MultiKeyResult, ReturnSignal, SelectionAction},
query::*,
text_table::DesiredColumnWidth,
text_table::{DesiredColumnWidth, TextTableRow},
widgets::tui_stuff::BlockBuilder,
AppConfigFields, DataCollection,
AppConfigFields, DataCollection, ProcessData,
},
canvas::Painter,
data_conversion::get_string_with_bytes,
data_conversion::{get_string_with_bytes, get_string_with_bytes_per_second},
options::{layout_options::LayoutRule, ProcessDefaults},
utils::error::BottomError,
Pid,
};
use crate::app::{
@ -140,7 +143,7 @@ impl ProcessSortType {
ProcessSortType::Wps => Hard(Some(8)),
ProcessSortType::TotalRead => Hard(Some(7)),
ProcessSortType::TotalWrite => Hard(Some(8)),
ProcessSortType::User => Flex(0.08),
ProcessSortType::User => Flex(0.08), // FIXME: [URGENT] adjust this scaling
ProcessSortType::State => Hard(Some(8)),
}
}
@ -211,7 +214,7 @@ impl SortableColumn for ProcessSortColumn {
}
fn default_descending(&self) -> bool {
self.sortable_column.default_descending()
self.sortable_column.default_descending() // TODO: [Behaviour] not sure if I like this behaviour tbh
}
fn sorting_status(&self) -> SortStatus {
@ -239,35 +242,37 @@ impl SortableColumn for ProcessSortColumn {
}
}
#[derive(Default)]
struct TreeData {
user_collapsed_pids: FxHashSet<Pid>,
sorted_pids: RefCell<Vec<Pid>>,
}
enum ManagerMode {
Normal,
Grouped,
Tree(TreeData),
}
/// A searchable, sortable table to manage processes.
pub struct ProcessManager {
bounds: Rect,
process_table: SortableTextTable<ProcessSortColumn>,
sort_menu: SortMenu,
search_block_bounds: Rect,
search_input: TextInput,
dd_multi: MultiKey,
selected: ProcessManagerSelection,
prev_selected: ProcessManagerSelection,
in_tree_mode: bool,
manager_mode: ManagerMode,
show_sort: bool,
show_search: bool,
search_modifiers: SearchModifiers,
display_data: TextTableData,
process_filter: Option<Result<Query, BottomError>>,
block_border: Borders,
width: LayoutRule,
height: LayoutRule,
show_scroll_index: bool,
}
@ -299,7 +304,7 @@ impl ProcessManager {
dd_multi: MultiKey::register(vec!['d', 'd']), // TODO: [Optimization] Maybe use something static/const/arrayvec?...
selected: ProcessManagerSelection::Processes,
prev_selected: ProcessManagerSelection::Processes,
in_tree_mode: false,
manager_mode: ManagerMode::Normal,
show_sort: false,
show_search: false,
search_modifiers: SearchModifiers::default(),
@ -336,8 +341,12 @@ impl ProcessManager {
self
}
fn set_tree_mode(&mut self, in_tree_mode: bool) {
self.in_tree_mode = in_tree_mode;
fn set_tree_mode(&mut self, tree_mode: bool) {
self.manager_mode = if tree_mode {
ManagerMode::Tree(TreeData::default())
} else {
ManagerMode::Normal
};
}
/// Sets whether to show the scroll index.
@ -409,39 +418,58 @@ impl ProcessManager {
ComponentEventResult::Signal(ReturnSignal::Update)
}
fn is_grouped(&self) -> bool {
matches!(
self.process_table.columns()[0].sort_type,
ProcessSortType::Count
)
fn disable_grouped(&mut self) {
self.manager_mode = ManagerMode::Normal;
self.process_table
.set_column(ProcessSortColumn::new(ProcessSortType::Pid), 0);
self.process_table
.add_column(ProcessSortColumn::new(ProcessSortType::State), 8);
#[cfg(target_family = "unix")]
{
self.process_table
.add_column(ProcessSortColumn::new(ProcessSortType::User), 8);
}
}
fn enable_grouped(&mut self) {
self.manager_mode = ManagerMode::Grouped;
self.process_table
.set_column(ProcessSortColumn::new(ProcessSortType::Count), 0);
#[cfg(target_family = "unix")]
{
self.process_table.remove_column(9, Some(2));
}
self.process_table.remove_column(8, Some(2));
}
fn toggle_grouped(&mut self) -> ComponentEventResult {
if self.is_grouped() {
self.process_table
.set_column(ProcessSortColumn::new(ProcessSortType::Pid), 0);
self.process_table
.add_column(ProcessSortColumn::new(ProcessSortType::State), 8);
#[cfg(target_family = "unix")]
{
self.process_table
.add_column(ProcessSortColumn::new(ProcessSortType::User), 8);
}
} else {
self.process_table
.set_column(ProcessSortColumn::new(ProcessSortType::Count), 0);
#[cfg(target_family = "unix")]
{
self.process_table.remove_column(9, Some(2));
}
self.process_table.remove_column(8, Some(2));
match self.manager_mode {
ManagerMode::Grouped => self.disable_grouped(),
ManagerMode::Normal | ManagerMode::Tree { .. } => self.enable_grouped(),
}
// Invalidate row cache.
self.process_table.invalidate_cached_columns();
ComponentEventResult::Signal(ReturnSignal::Update)
}
/// Toggles tree mode.
fn toggle_tree_mode(&mut self) -> ComponentEventResult {
match self.manager_mode {
ManagerMode::Normal => {
self.set_tree_mode(true);
}
ManagerMode::Grouped => {
self.disable_grouped();
self.set_tree_mode(true);
}
ManagerMode::Tree { .. } => {
self.set_tree_mode(false);
}
}
self.process_table.invalidate_cached_columns();
ComponentEventResult::Signal(ReturnSignal::Update)
}
@ -497,10 +525,493 @@ impl ProcessManager {
ComponentEventResult::Signal(ReturnSignal::Update)
}
/// Toggles tree mode.
fn toggle_tree_mode(&mut self) -> ComponentEventResult {
self.in_tree_mode = !self.in_tree_mode;
ComponentEventResult::Signal(ReturnSignal::Update)
/// Returns whether a [`ProcessHarvest`] matches the [`ProcessManager`]'s query. If there
/// is no query then it will always return true.
fn does_process_match_query(&self, process: &ProcessHarvest) -> bool {
if let Some(Ok(query)) = &self.process_filter {
query.check(process, self.is_using_command())
} else {
true
}
}
fn get_display_tree(
&self, tree_data: &TreeData, data_collection: &DataCollection,
) -> TextTableData {
fn build_tree(
manager: &ProcessManager, data_collection: &DataCollection,
filtered_tree: &FxHashMap<Pid, Vec<Pid>>, matching_pids: &FxHashMap<Pid, bool>,
current_process: &ProcessHarvest, mut prefixes: Vec<String>, is_last: bool,
collapsed_pids: &FxHashSet<Pid>, sorted_pids: &mut Vec<Pid>,
) -> TextTableData {
const BRANCH_ENDING: char = '└';
const BRANCH_VERTICAL: char = '│';
const BRANCH_SPLIT: char = '├';
const BRANCH_HORIZONTAL: char = '─';
sorted_pids.push(current_process.pid);
let ProcessData {
process_harvest,
process_name_pid_map,
process_cmd_pid_map,
..
} = &data_collection.process_data;
let is_disabled = !*matching_pids.get(&current_process.pid).unwrap_or(&false);
if collapsed_pids.contains(&current_process.pid) {
let mut queue = if let Some(children) = filtered_tree.get(&current_process.pid) {
children
.iter()
.filter_map(|child_pid| process_harvest.get(child_pid))
.collect_vec()
} else {
vec![]
};
let mut summed_process = current_process.clone();
while let Some(process) = queue.pop() {
summed_process.add(process);
if let Some(children) = filtered_tree.get(&process.pid) {
queue.extend(
children
.iter()
.filter_map(|child_pid| process_harvest.get(child_pid))
.collect_vec(),
);
}
}
let prefix = if prefixes.is_empty() {
"+ ".to_string()
} else {
format!(
"{}{}{} + ",
prefixes.join(""),
if is_last { BRANCH_ENDING } else { BRANCH_SPLIT },
BRANCH_HORIZONTAL
)
};
let process_text = manager.process_to_text(
&summed_process,
process_cmd_pid_map,
process_name_pid_map,
prefix,
is_disabled,
);
vec![process_text]
} else {
let prefix = if prefixes.is_empty() {
String::default()
} else {
format!(
"{}{}{} ",
prefixes.join(""),
if is_last { BRANCH_ENDING } else { BRANCH_SPLIT },
BRANCH_HORIZONTAL
)
};
let process_text = manager.process_to_text(
current_process,
process_cmd_pid_map,
process_name_pid_map,
prefix,
is_disabled,
);
if let Some(children) = filtered_tree.get(&current_process.pid) {
if prefixes.is_empty() {
prefixes.push(String::default());
} else {
prefixes.push(if is_last {
" ".to_string()
} else {
format!("{} ", BRANCH_VERTICAL)
});
}
let mut children = children
.iter()
.filter_map(|child_pid| process_harvest.get(child_pid))
.collect_vec();
manager.sort_process_vec(&mut children, data_collection);
let children_length = children.len();
let children_text = children
.into_iter()
.enumerate()
.map(|(itx, child_process)| {
build_tree(
manager,
data_collection,
filtered_tree,
matching_pids,
child_process,
prefixes.clone(),
itx + 1 == children_length,
collapsed_pids,
sorted_pids,
)
})
.flatten()
.collect_vec();
std::iter::once(process_text)
.chain(children_text)
.collect_vec()
} else {
vec![process_text]
}
}
}
fn filter_tree(
process_data: &ProcessData, matching_pids: &FxHashMap<Pid, bool>,
) -> FxHashMap<Pid, Vec<Pid>> {
let ProcessData {
process_harvest,
orphan_pids,
..
} = process_data;
let mut filtered_tree = FxHashMap::default();
fn traverse(
current_process: &ProcessHarvest, process_data: &ProcessData,
new_tree: &mut FxHashMap<Pid, Vec<Pid>>, matching_pids: &FxHashMap<Pid, bool>,
) -> bool {
let ProcessData {
process_harvest,
process_parent_mapping,
..
} = process_data;
let is_process_matching =
*matching_pids.get(&current_process.pid).unwrap_or(&false);
if let Some(children) = process_parent_mapping.get(&current_process.pid) {
let results = children
.iter()
.filter_map(|pid| process_harvest.get(pid))
.map(|child_process| {
let contains_match =
traverse(child_process, process_data, new_tree, matching_pids);
if contains_match {
new_tree
.entry(current_process.pid)
.or_default()
.push(child_process.pid);
}
contains_match || is_process_matching
})
.collect_vec();
let has_matching_child = results.into_iter().any(|x| x);
is_process_matching || has_matching_child
} else {
is_process_matching
}
}
for orphan_pid in orphan_pids {
if let Some(orphan_process) = process_harvest.get(orphan_pid) {
traverse(
orphan_process,
process_data,
&mut filtered_tree,
matching_pids,
);
}
}
filtered_tree
}
let ProcessData {
process_harvest,
orphan_pids,
..
} = &data_collection.process_data;
let TreeData {
user_collapsed_pids,
sorted_pids,
} = tree_data;
let sorted_pids = &mut *sorted_pids.borrow_mut();
let matching_pids = data_collection
.process_data
.process_harvest
.iter()
.map(|(pid, harvest)| (*pid, self.does_process_match_query(harvest)))
.collect::<FxHashMap<_, _>>();
let filtered_tree = filter_tree(&data_collection.process_data, &matching_pids);
let mut orphan_processes = orphan_pids
.iter()
.filter_map(|child| process_harvest.get(child))
.collect_vec();
self.sort_process_vec(&mut orphan_processes, data_collection);
let orphan_length = orphan_processes.len();
let mut new_sorted_pids = vec![];
let resulting_strings = orphan_processes
.into_iter()
.enumerate()
.filter_map(|(itx, p)| {
if filtered_tree.contains_key(&p.pid) {
Some(build_tree(
self,
data_collection,
&filtered_tree,
&matching_pids,
p,
vec![],
itx + 1 == orphan_length,
&user_collapsed_pids,
&mut new_sorted_pids,
))
} else {
None
}
})
.flatten()
.collect_vec();
*sorted_pids = new_sorted_pids;
resulting_strings
}
fn get_display_normal(&self, data_collection: &DataCollection) -> TextTableData {
let mut id_pid_map: HashMap<String, ProcessHarvest>;
let filtered_iter = data_collection
.process_data
.process_harvest
.values()
.filter(|process| self.does_process_match_query(process));
let mut filtered_grouped_vec = if let ManagerMode::Grouped = self.manager_mode {
id_pid_map = HashMap::new();
filtered_iter.for_each(|process| {
let id = if self.is_using_command() {
&process.command
} else {
&process.name
};
if let Some(grouped_process_harvest) = id_pid_map.get_mut(id) {
grouped_process_harvest.add(process);
} else {
id_pid_map.insert(id.clone(), process.clone());
}
});
Either::Left(id_pid_map.values())
} else {
Either::Right(filtered_iter)
}
.collect::<Vec<_>>();
self.sort_process_vec(&mut filtered_grouped_vec, data_collection);
let cmd_pid_map = &data_collection.process_data.process_cmd_pid_map;
let name_pid_map = &data_collection.process_data.process_name_pid_map;
filtered_grouped_vec
.into_iter()
.map(|process| self.process_to_text(process, cmd_pid_map, name_pid_map, "", false))
.collect::<Vec<_>>()
}
fn is_reverse_sort(&self) -> bool {
matches!(
self.process_table
.current_sorting_column()
.sortable_column
.sorting_status(),
SortStatus::SortDescending
)
}
fn sort_process_vec(
&self, process_vec: &mut [&ProcessHarvest], data_collection: &DataCollection,
) {
match self.process_table.current_sorting_column().sort_type {
ProcessSortType::Pid => {
process_vec.sort_by_key(|p| p.pid);
}
ProcessSortType::Count => {
if self.is_using_command() {
process_vec.sort_by_cached_key(|p| {
data_collection
.process_data
.process_cmd_pid_map
.get(&p.command)
.map(|v| v.len())
.unwrap_or(0)
});
} else {
process_vec.sort_by_cached_key(|p| {
data_collection
.process_data
.process_name_pid_map
.get(&p.name)
.map(|v| v.len())
.unwrap_or(0)
});
}
}
ProcessSortType::Name => {
process_vec.sort_by_key(|p| &p.name);
}
ProcessSortType::Command => {
process_vec.sort_by_key(|p| &p.command);
}
ProcessSortType::Cpu => {
process_vec.sort_by_key(|p| FloatOrd(p.cpu_usage_percent));
}
ProcessSortType::Mem => {
process_vec.sort_by_key(|p| p.mem_usage_bytes);
}
ProcessSortType::MemPercent => {
process_vec.sort_by_key(|p| FloatOrd(p.mem_usage_percent));
}
ProcessSortType::Rps => {
process_vec.sort_by_key(|p| p.read_bytes_per_sec);
}
ProcessSortType::Wps => {
process_vec.sort_by_key(|p| p.write_bytes_per_sec);
}
ProcessSortType::TotalRead => {
process_vec.sort_by_key(|p| p.total_read_bytes);
}
ProcessSortType::TotalWrite => {
process_vec.sort_by_key(|p| p.total_write_bytes);
}
ProcessSortType::User => {
// This comment prevents rustfmt from breaking the cfg block. Yeah, uh, don't ask.
#[cfg(target_family = "unix")]
{
process_vec.sort_by_key(|p| &p.user);
}
}
ProcessSortType::State => {
process_vec.sort_by_key(|p| &p.process_state);
}
}
if self.is_reverse_sort() {
process_vec.reverse();
}
}
fn process_to_text<D: std::fmt::Display>(
&self, process: &ProcessHarvest, cmd_pid_map: &HashMap<String, Vec<Pid>>,
name_pid_map: &HashMap<String, Vec<Pid>>, prefix: D, disabled: bool,
) -> TextTableRow {
let style = if disabled {
Some(Style::default())
} else {
None
};
self.process_table
.columns()
.iter()
.map(|column| match &column.sort_type {
ProcessSortType::Pid => (process.pid.to_string().into(), None, None),
ProcessSortType::Count => (
if self.is_using_command() {
cmd_pid_map
.get(&process.command)
.map(|v| v.len())
.unwrap_or(0)
.to_string()
.into()
} else {
name_pid_map
.get(&process.name)
.map(|v| v.len())
.unwrap_or(0)
.to_string()
.into()
},
None,
style,
),
ProcessSortType::Name => {
(format!("{}{}", prefix, process.name).into(), None, style)
}
ProcessSortType::Command => {
(format!("{}{}", prefix, process.command).into(), None, None)
}
ProcessSortType::Cpu => (
format!("{:.1}%", process.cpu_usage_percent).into(),
None,
style,
),
ProcessSortType::Mem => (
get_string_with_bytes(process.mem_usage_bytes).into(),
None,
style,
),
ProcessSortType::MemPercent => (
format!("{:.1}%", process.mem_usage_percent).into(),
None,
style,
),
ProcessSortType::Rps => (
get_string_with_bytes_per_second(process.read_bytes_per_sec).into(),
None,
style,
),
ProcessSortType::Wps => (
get_string_with_bytes_per_second(process.write_bytes_per_sec).into(),
None,
style,
),
ProcessSortType::TotalRead => (
get_string_with_bytes(process.total_read_bytes).into(),
None,
style,
),
ProcessSortType::TotalWrite => (
get_string_with_bytes(process.total_write_bytes).into(),
None,
style,
),
ProcessSortType::User => (process.user.clone(), None, style),
ProcessSortType::State => (
process.process_state.clone().into(),
None, // Currently disabled; what happens if you try to sort in the shortened form?
style,
),
})
.collect::<Vec<_>>()
}
fn tree_toggle_current_process(&mut self) -> ComponentEventResult {
if let ManagerMode::Tree(tree_data) = &mut self.manager_mode {
let TreeData {
user_collapsed_pids,
sorted_pids,
} = tree_data;
let sorted_pids = &*sorted_pids.borrow();
if let Some(current_pid) = sorted_pids.get(self.process_table.current_scroll_index()) {
if user_collapsed_pids.contains(current_pid) {
user_collapsed_pids.remove(current_pid);
} else {
user_collapsed_pids.insert(*current_pid);
}
return ComponentEventResult::Signal(ReturnSignal::Update);
}
}
ComponentEventResult::NoRedraw
}
}
@ -514,8 +1025,6 @@ impl Component for ProcessManager {
}
fn handle_key_event(&mut self, event: KeyEvent) -> ComponentEventResult {
// "Global" handling:
if let KeyCode::Esc = event.code {
match self.selected {
ProcessManagerSelection::Processes => {
@ -541,21 +1050,19 @@ impl Component for ProcessManager {
match self.selected {
ProcessManagerSelection::Processes => {
// Try to catch some stuff first...
if event.modifiers.is_empty() {
match event.code {
KeyCode::Tab => {
// Handle grouping/ungrouping
return self.toggle_grouped();
}
KeyCode::Char('P') => {
// Show full command/process name
return self.toggle_command();
}
KeyCode::Char('d') => {
match self.dd_multi.input('d') {
MultiKeyResult::Completed => {
// Kill the selected process(es)
todo!()
}
MultiKeyResult::Accepted | MultiKeyResult::Rejected => {
return ComponentEventResult::NoRedraw;
@ -568,11 +1075,8 @@ impl Component for ProcessManager {
KeyCode::Char('%') => {
return self.toggle_memory();
}
KeyCode::Char('+') => {
// Expand a branch
}
KeyCode::Char('-') => {
// Collapse a branch
KeyCode::Char('+') | KeyCode::Char('-') | KeyCode::Char('=') => {
return self.tree_toggle_current_process();
}
KeyCode::Char('t') | KeyCode::F(5) => {
return self.toggle_tree_mode();
@ -582,6 +1086,7 @@ impl Component for ProcessManager {
}
KeyCode::F(9) => {
// Kill the selected process(es)
todo!()
}
_ => {}
}
@ -591,7 +1096,6 @@ impl Component for ProcessManager {
}
} else if let KeyModifiers::SHIFT = event.modifiers {
if let KeyCode::Char('P') = event.code {
// Show full command/process name
return self.toggle_command();
}
}
@ -667,12 +1171,14 @@ impl Component for ProcessManager {
match &event.kind {
MouseEventKind::Down(MouseButton::Left) => {
if self.process_table.does_border_intersect_mouse(&event) {
let event_result = self.process_table.handle_mouse_event(event);
if let ProcessManagerSelection::Processes = self.selected {
self.process_table.handle_mouse_event(event)
event_result
} else {
self.prev_selected = self.selected;
self.selected = ProcessManagerSelection::Processes;
match self.process_table.handle_mouse_event(event) {
match event_result {
ComponentEventResult::Unhandled
| ComponentEventResult::Redraw
| ComponentEventResult::NoRedraw => ComponentEventResult::Redraw,
@ -884,6 +1390,15 @@ impl Widget for ProcessManager {
.borders(self.block_border)
.show_esc(expanded && !self.show_sort && !self.show_search);
// TODO: [Refactor] This is an ugly hack to add the disabled style... this could be solved by storing style locally to the widget.
self.display_data.iter_mut().for_each(|row| {
row.iter_mut().for_each(|col| {
if let Some(style) = &mut col.2 {
*style = style.patch(painter.colours.disabled_text_style);
}
})
});
self.process_table.draw_tui_table(
painter,
f,
@ -896,203 +1411,10 @@ impl Widget for ProcessManager {
}
fn update_data(&mut self, data_collection: &DataCollection) {
let mut id_pid_map: HashMap<String, ProcessHarvest>;
let filtered_iter = data_collection.process_harvest.iter().filter(|process| {
if let Some(Ok(query)) = &self.process_filter {
query.check(process, self.is_using_command())
} else {
true
}
});
let filtered_grouped_iter = if self.is_grouped() {
id_pid_map = HashMap::new();
filtered_iter.for_each(|process_harvest| {
let id = if self.is_using_command() {
&process_harvest.command
} else {
&process_harvest.name
};
if let Some(grouped_process_harvest) = id_pid_map.get_mut(id) {
grouped_process_harvest.cpu_usage_percent += process_harvest.cpu_usage_percent;
grouped_process_harvest.mem_usage_bytes += process_harvest.mem_usage_bytes;
grouped_process_harvest.mem_usage_percent += process_harvest.mem_usage_percent;
grouped_process_harvest.read_bytes_per_sec +=
process_harvest.read_bytes_per_sec;
grouped_process_harvest.write_bytes_per_sec +=
process_harvest.write_bytes_per_sec;
grouped_process_harvest.total_read_bytes += process_harvest.total_read_bytes;
grouped_process_harvest.total_write_bytes += process_harvest.total_write_bytes;
} else {
id_pid_map.insert(id.clone(), process_harvest.clone());
}
});
Either::Left(id_pid_map.values())
} else {
Either::Right(filtered_iter)
self.display_data = match &self.manager_mode {
ManagerMode::Normal | ManagerMode::Grouped => self.get_display_normal(data_collection),
ManagerMode::Tree(tree_data) => self.get_display_tree(tree_data, data_collection),
};
let filtered_sorted_iter = if let ProcessSortType::Count =
self.process_table.current_sorting_column().sort_type
{
let mut v = filtered_grouped_iter.collect::<Vec<_>>();
v.sort_by_cached_key(|k| {
if self.is_using_command() {
data_collection
.process_cmd_pid_map
.get(&k.command)
.map(|v| v.len())
.unwrap_or(0)
} else {
data_collection
.process_name_pid_map
.get(&k.name)
.map(|v| v.len())
.unwrap_or(0)
}
});
Either::Left(v.into_iter())
} else {
Either::Right(filtered_grouped_iter.sorted_by(
match self.process_table.current_sorting_column().sort_type {
ProcessSortType::Pid => {
|a: &&ProcessHarvest, b: &&ProcessHarvest| a.pid.cmp(&b.pid)
}
ProcessSortType::Count => {
// This case should be impossible by the above check.
unreachable!()
}
ProcessSortType::Name => {
|a: &&ProcessHarvest, b: &&ProcessHarvest| a.name.cmp(&b.name)
}
ProcessSortType::Command => {
|a: &&ProcessHarvest, b: &&ProcessHarvest| a.command.cmp(&b.command)
}
ProcessSortType::Cpu => |a: &&ProcessHarvest, b: &&ProcessHarvest| {
FloatOrd(a.cpu_usage_percent).cmp(&FloatOrd(b.cpu_usage_percent))
},
ProcessSortType::Mem => |a: &&ProcessHarvest, b: &&ProcessHarvest| {
a.mem_usage_bytes.cmp(&b.mem_usage_bytes)
},
ProcessSortType::MemPercent => |a: &&ProcessHarvest, b: &&ProcessHarvest| {
FloatOrd(a.mem_usage_percent).cmp(&FloatOrd(b.mem_usage_percent))
},
ProcessSortType::Rps => |a: &&ProcessHarvest, b: &&ProcessHarvest| {
a.read_bytes_per_sec.cmp(&b.read_bytes_per_sec)
},
ProcessSortType::Wps => |a: &&ProcessHarvest, b: &&ProcessHarvest| {
a.write_bytes_per_sec.cmp(&b.write_bytes_per_sec)
},
ProcessSortType::TotalRead => |a: &&ProcessHarvest, b: &&ProcessHarvest| {
a.total_read_bytes.cmp(&b.total_read_bytes)
},
ProcessSortType::TotalWrite => |a: &&ProcessHarvest, b: &&ProcessHarvest| {
a.total_write_bytes.cmp(&b.total_write_bytes)
},
ProcessSortType::User => {
#[cfg(target_family = "unix")]
{
|a: &&ProcessHarvest, b: &&ProcessHarvest| a.user.cmp(&b.user)
}
#[cfg(not(target_family = "unix"))]
{
|_a: &&ProcessHarvest, _b: &&ProcessHarvest| std::cmp::Ordering::Equal
}
}
ProcessSortType::State => |a: &&ProcessHarvest, b: &&ProcessHarvest| {
a.process_state.cmp(&b.process_state)
},
},
))
};
self.display_data = if let SortStatus::SortDescending = self
.process_table
.current_sorting_column()
.sortable_column
.sorting_status()
{
Either::Left(filtered_sorted_iter.rev())
} else {
Either::Right(filtered_sorted_iter)
}
.map(|process| {
self.process_table
.columns()
.iter()
.map(|column| match &column.sort_type {
ProcessSortType::Pid => (process.pid.to_string().into(), None, None),
ProcessSortType::Count => (
if self.is_using_command() {
data_collection
.process_cmd_pid_map
.get(&process.command)
.map(|v| v.len())
.unwrap_or(0)
.to_string()
.into()
} else {
data_collection
.process_name_pid_map
.get(&process.name)
.map(|v| v.len())
.unwrap_or(0)
.to_string()
.into()
},
None,
None,
),
ProcessSortType::Name => (process.name.clone().into(), None, None),
ProcessSortType::Command => (process.command.clone().into(), None, None),
ProcessSortType::Cpu => (
format!("{:.1}%", process.cpu_usage_percent).into(),
None,
None,
),
ProcessSortType::Mem => (
get_string_with_bytes(process.mem_usage_bytes).into(),
None,
None,
),
ProcessSortType::MemPercent => (
format!("{:.1}%", process.mem_usage_percent).into(),
None,
None,
),
ProcessSortType::Rps => (
get_string_with_bytes(process.read_bytes_per_sec).into(),
None,
None,
),
ProcessSortType::Wps => (
get_string_with_bytes(process.write_bytes_per_sec).into(),
None,
None,
),
ProcessSortType::TotalRead => (
get_string_with_bytes(process.total_read_bytes).into(),
None,
None,
),
ProcessSortType::TotalWrite => (
get_string_with_bytes(process.total_write_bytes).into(),
None,
None,
),
ProcessSortType::User => (process.user.clone(), None, None),
ProcessSortType::State => (
process.process_state.clone().into(),
None, // Currently disabled; what happens if you try to sort in the shortened form?
None,
),
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
}
fn width(&self) -> LayoutRule {

View File

@ -1,8 +1,8 @@
use std::cmp::min;
use crossterm::event::{KeyEvent, KeyModifiers, MouseEvent, MouseEventKind};
use fxhash::FxHashMap;
use itertools::{EitherOrBoth, Itertools};
use rustc_hash::FxHashMap;
use tui::{
backend::Backend,
layout::{Constraint, Layout, Rect},

View File

@ -1,7 +1,7 @@
use std::str::FromStr;
use fxhash::FxHashMap;
use indextree::{Arena, NodeId};
use rustc_hash::FxHashMap;
use tui::{
backend::Backend,
layout::{Constraint, Direction, Layout, Rect},

View File

@ -14,72 +14,6 @@ FLAGS:
const USAGE: &str = "
btm [FLAG]";
const DEFAULT_WIDGET_TYPE_STR: &str = if cfg!(feature = "battery") {
"\
Sets which widget type to use as the default widget.
For the default layout, this defaults to the 'process' widget.
For a custom layout, it defaults to the first widget it sees.
For example, suppose we have a layout that looks like:
+-------------------+-----------------------+
| CPU (1) | CPU (2) |
+---------+---------+-------------+---------+
| Process | CPU (3) | Temperature | CPU (4) |
+---------+---------+-------------+---------+
Setting '--default_widget_type Temp' will make the Temperature
widget selected by default.
Supported widget names:
+--------------------------+
| cpu |
+--------------------------+
| mem, memory |
+--------------------------+
| net, network |
+--------------------------+
| proc, process, processes |
+--------------------------+
| temp, temperature |
+--------------------------+
| disk |
+--------------------------+
| batt, battery |
+--------------------------+
\n\n"
} else {
"\
Sets which widget type to use as the default widget.
For the default layout, this defaults to the 'process' widget.
For a custom layout, it defaults to the first widget it sees.
For example, suppose we have a layout that looks like:
+-------------------+-----------------------+
| CPU (1) | CPU (2) |
+---------+---------+-------------+---------+
| Process | CPU (3) | Temperature | CPU (4) |
+---------+---------+-------------+---------+
Setting '--default_widget_type Temp' will make the Temperature
widget selected by default.
Supported widget names:
+--------------------------+
| cpu |
+--------------------------+
| mem, memory |
+--------------------------+
| net, network |
+--------------------------+
| proc, process, processes |
+--------------------------+
| temp, temperature |
+--------------------------+
| disk |
+--------------------------+
\n\n"
};
pub fn get_matches() -> clap::ArgMatches<'static> {
build_app().get_matches()
}
@ -345,37 +279,6 @@ it defaults to showing it by percentage.\n\n",
Default time value for graphs in milliseconds. The minimum
time is 30s (30000), and the default is 60s (60000).\n\n\n",
);
let default_widget_count = Arg::with_name("default_widget_count")
.long("default_widget_count")
.takes_value(true)
.requires_all(&["default_widget_type"])
.value_name("INT")
.help("Sets the n'th selected widget type as the default.")
.long_help(
"\
Sets the n'th selected widget type to use as the default widget.
Requires 'default_widget_type' to also be set, and defaults to 1.
This reads from left to right, top to bottom. For example, suppose
we have a layout that looks like:
+-------------------+-----------------------+
| CPU (1) | CPU (2) |
+---------+---------+-------------+---------+
| Process | CPU (3) | Temperature | CPU (4) |
+---------+---------+-------------+---------+
And we set our default widget type to 'CPU'. If we set
'--default_widget_count 1', then it would use the CPU (1) as
the default widget. If we set '--default_widget_count 3', it would
use CPU (3) as the default instead.
\n\n",
);
let default_widget_type = Arg::with_name("default_widget_type")
.long("default_widget_type")
.takes_value(true)
.value_name("WIDGET TYPE")
.help("Sets the default widget type, use --help for more info.")
.long_help(DEFAULT_WIDGET_TYPE_STR);
let rate = Arg::with_name("rate")
.short("r")
.long("rate")
@ -453,8 +356,6 @@ Displays the network widget with binary prefixes (i.e. kibibits, mebibits) rathe
.arg(color)
.arg(mem_as_value)
.arg(default_time_value)
.arg(default_widget_count)
.arg(default_widget_type)
.arg(disable_click)
.arg(dot_marker)
.arg(group)

View File

@ -299,7 +299,10 @@ pub const PROCESS_HELP_TEXT: [[&str; 2]; 14] = [
"Toggle between values and percentages for memory usage",
],
["t, F5", "Toggle tree mode"],
["+, -, click", "Collapse/expand a branch while in tree mode"],
[
"+, -, =, click",
"Collapse/expand a branch while in tree mode",
],
[
"click on header",
"Sorts the entries by that column, click again to invert the sort",
@ -539,9 +542,6 @@ pub const CONFIG_TEXT: &str = r##"# This is a default config file for bottom. A
#time_delta = 15000
# Hides the time scale.
#hide_time = false
# Override layout default widget
#default_widget_type = "proc"
#default_widget_count = 1
# Use basic mode
#basic = false
# Use the old network legend style

View File

@ -3,16 +3,10 @@
use crate::app::data_harvester::temperature::TemperatureType;
use crate::app::text_table::TextTableData;
use crate::app::DataCollection;
use crate::{
app::data_harvester,
utils::{self, gen_util::*},
};
use crate::{app::data_harvester, utils::gen_util::*};
use crate::{app::AxisScaling, units::data_units::DataUnit, Pid};
use data_harvester::processes::ProcessSorting;
use fxhash::FxBuildHasher;
use indexmap::IndexSet;
use std::borrow::Cow;
use std::collections::{HashMap, VecDeque};
/// Point is of time, data
type Point = (f64, f64);
@ -558,9 +552,9 @@ pub fn get_disk_io_strings(
)
}
/// Returns a string given a value that is converted to the closest SI-variant.
/// Returns a string given a value that is converted to the closest SI-variant, per second.
/// If the value is greater than a giga-X, then it will return a decimal place.
pub fn get_string_with_bytes(value: u64) -> String {
pub fn get_string_with_bytes_per_second(value: u64) -> String {
let converted_values = get_decimal_bytes(value);
if value >= GIGA_LIMIT {
format!("{:.*}{}/s", 1, converted_values.0, converted_values.1)
@ -569,476 +563,15 @@ pub fn get_string_with_bytes(value: u64) -> String {
}
}
fn tree_process_data(
filtered_process_data: &[ConvertedProcessData], is_using_command: bool,
sorting_type: &ProcessSorting, is_sort_descending: bool,
) -> Vec<ConvertedProcessData> {
const BRANCH_ENDING: char = '└';
const BRANCH_VERTICAL: char = '│';
const BRANCH_SPLIT: char = '├';
const BRANCH_HORIZONTAL: char = '─';
// TODO: [Feature] Option to sort usage by total branch usage or individual value usage?
// Let's first build up a (really terrible) parent -> child mapping...
// At the same time, let's make a mapping of PID -> process data!
let mut parent_child_mapping: HashMap<Pid, IndexSet<Pid, FxBuildHasher>> = HashMap::default();
let mut pid_process_mapping: HashMap<Pid, &ConvertedProcessData> = HashMap::default(); // We actually already have this stored, but it's unfiltered... oh well.
let mut orphan_set: IndexSet<Pid, FxBuildHasher> =
IndexSet::with_hasher(FxBuildHasher::default());
let mut collapsed_set: IndexSet<Pid, FxBuildHasher> =
IndexSet::with_hasher(FxBuildHasher::default());
filtered_process_data.iter().for_each(|process| {
if let Some(ppid) = process.ppid {
orphan_set.insert(ppid);
}
orphan_set.insert(process.pid);
});
filtered_process_data.iter().for_each(|process| {
// Create a mapping for the process if it DNE.
parent_child_mapping
.entry(process.pid)
.or_insert_with(|| IndexSet::with_hasher(FxBuildHasher::default()));
pid_process_mapping.insert(process.pid, process);
if process.is_collapsed_entry {
collapsed_set.insert(process.pid);
}
// Insert its mapping to the process' parent if needed (create if it DNE).
if let Some(ppid) = process.ppid {
orphan_set.remove(&process.pid);
parent_child_mapping
.entry(ppid)
.or_insert_with(|| IndexSet::with_hasher(FxBuildHasher::default()))
.insert(process.pid);
}
});
// Keep only orphans, or promote children of orphans to a top-level orphan
// if their parents DNE in our pid to process mapping...
let old_orphan_set = orphan_set.clone();
old_orphan_set.iter().for_each(|pid| {
if pid_process_mapping.get(pid).is_none() {
// DNE! Promote the mapped children and remove the current parent...
orphan_set.remove(pid);
if let Some(children) = parent_child_mapping.get(pid) {
orphan_set.extend(children);
}
}
});
// Turn the parent-child mapping into a "list" via DFS...
let mut pids_to_explore: VecDeque<Pid> = orphan_set.into_iter().collect();
let mut explored_pids: Vec<Pid> = vec![];
let mut lines: Vec<String> = vec![];
/// A post-order traversal to correctly prune entire branches that only contain children
/// that are disabled and themselves are also disabled ~~wait that sounds wrong~~.
/// Basically, go through the hashmap, and prune out all branches that are no longer relevant.
fn prune_disabled_pids(
current_pid: Pid, parent_child_mapping: &mut HashMap<Pid, IndexSet<Pid, FxBuildHasher>>,
pid_process_mapping: &HashMap<Pid, &ConvertedProcessData>,
) -> bool {
// Let's explore all the children first, and make sure they (and their children)
// aren't all disabled...
let mut are_all_children_disabled = true;
if let Some(children) = parent_child_mapping.get(&current_pid) {
for child_pid in children.clone() {
let is_child_disabled =
prune_disabled_pids(child_pid, parent_child_mapping, pid_process_mapping);
if is_child_disabled {
if let Some(current_mapping) = parent_child_mapping.get_mut(&current_pid) {
current_mapping.remove(&child_pid);
}
} else if are_all_children_disabled {
are_all_children_disabled = false;
}
}
}
// Now consider the current pid and whether to prune...
// If the node itself is not disabled, then never prune. If it is, then check if all
// of its are disabled.
if let Some(process) = pid_process_mapping.get(&current_pid) {
if process.is_disabled_entry && are_all_children_disabled {
parent_child_mapping.remove(&current_pid);
return true;
}
}
false
/// Returns a string given a value that is converted to the closest SI-variant.
/// If the value is greater than a giga-X, then it will return a decimal place.
pub fn get_string_with_bytes(value: u64) -> String {
let converted_values = get_decimal_bytes(value);
if value >= GIGA_LIMIT {
format!("{:.*}{}", 1, converted_values.0, converted_values.1)
} else {
format!("{:.*}{}", 0, converted_values.0, converted_values.1)
}
fn sort_remaining_pids(
current_pid: Pid, sort_type: &ProcessSorting, is_sort_descending: bool,
parent_child_mapping: &mut HashMap<Pid, IndexSet<Pid, FxBuildHasher>>,
pid_process_mapping: &HashMap<Pid, &ConvertedProcessData>,
) {
// Sorting is special for tree data. So, by default, things are "sorted"
// via the DFS. Otherwise, since this is DFS of the scanned PIDs (which are in order),
// you actually get a REVERSE order --- so, you get higher PIDs earlier than lower ones.
//
// So how do we "sort"? The current idea is that:
// - We sort *per-level*. Say, I want to sort by CPU. The "first level" is sorted
// by CPU in terms of its usage. All its direct children are sorted by CPU
// with *their* siblings. Etc.
// - The default is thus PIDs in ascending order. We set it to this when
// we first enable the mode.
// So first, let's look at the children... (post-order again)
if let Some(children) = parent_child_mapping.get(&current_pid) {
let mut to_sort_vec: Vec<(Pid, &ConvertedProcessData)> = vec![];
for child_pid in children.clone() {
if let Some(child_process) = pid_process_mapping.get(&child_pid) {
to_sort_vec.push((child_pid, child_process));
}
sort_remaining_pids(
child_pid,
sort_type,
is_sort_descending,
parent_child_mapping,
pid_process_mapping,
);
}
// Now let's sort the immediate children!
sort_vec(&mut to_sort_vec, sort_type, is_sort_descending);
// Need to reverse what we got, apparently...
if let Some(current_mapping) = parent_child_mapping.get_mut(&current_pid) {
*current_mapping = to_sort_vec
.iter()
.rev()
.map(|(pid, _proc)| *pid)
.collect::<IndexSet<Pid, FxBuildHasher>>();
}
}
}
fn sort_vec(
to_sort_vec: &mut Vec<(Pid, &ConvertedProcessData)>, sort_type: &ProcessSorting,
is_sort_descending: bool,
) {
// Sort by PID first (descending)
to_sort_vec.sort_by(|a, b| utils::gen_util::get_ordering(a.1.pid, b.1.pid, false));
match sort_type {
ProcessSorting::CpuPercent => {
to_sort_vec.sort_by(|a, b| {
utils::gen_util::get_ordering(
a.1.cpu_percent_usage,
b.1.cpu_percent_usage,
is_sort_descending,
)
});
}
ProcessSorting::Mem => {
to_sort_vec.sort_by(|a, b| {
utils::gen_util::get_ordering(
a.1.mem_usage_bytes,
b.1.mem_usage_bytes,
is_sort_descending,
)
});
}
ProcessSorting::MemPercent => {
to_sort_vec.sort_by(|a, b| {
utils::gen_util::get_ordering(
a.1.mem_percent_usage,
b.1.mem_percent_usage,
is_sort_descending,
)
});
}
ProcessSorting::ProcessName => {
to_sort_vec.sort_by(|a, b| {
utils::gen_util::get_ordering(
&a.1.name.to_lowercase(),
&b.1.name.to_lowercase(),
is_sort_descending,
)
});
}
ProcessSorting::Command => to_sort_vec.sort_by(|a, b| {
utils::gen_util::get_ordering(
&a.1.command.to_lowercase(),
&b.1.command.to_lowercase(),
is_sort_descending,
)
}),
ProcessSorting::Pid => {
if is_sort_descending {
to_sort_vec.sort_by(|a, b| {
utils::gen_util::get_ordering(a.0, b.0, is_sort_descending)
});
}
}
ProcessSorting::ReadPerSecond => {
to_sort_vec.sort_by(|a, b| {
utils::gen_util::get_ordering(a.1.rps_f64, b.1.rps_f64, is_sort_descending)
});
}
ProcessSorting::WritePerSecond => {
to_sort_vec.sort_by(|a, b| {
utils::gen_util::get_ordering(a.1.wps_f64, b.1.wps_f64, is_sort_descending)
});
}
ProcessSorting::TotalRead => {
to_sort_vec.sort_by(|a, b| {
utils::gen_util::get_ordering(a.1.tr_f64, b.1.tr_f64, is_sort_descending)
});
}
ProcessSorting::TotalWrite => {
to_sort_vec.sort_by(|a, b| {
utils::gen_util::get_ordering(a.1.tw_f64, b.1.tw_f64, is_sort_descending)
});
}
ProcessSorting::State => to_sort_vec.sort_by(|a, b| {
utils::gen_util::get_ordering(
&a.1.process_state.to_lowercase(),
&b.1.process_state.to_lowercase(),
is_sort_descending,
)
}),
ProcessSorting::User => to_sort_vec.sort_by(|a, b| match (&a.1.user, &b.1.user) {
(Some(user_a), Some(user_b)) => utils::gen_util::get_ordering(
user_a.to_lowercase(),
user_b.to_lowercase(),
is_sort_descending,
),
(Some(_), None) => std::cmp::Ordering::Less,
(None, Some(_)) => std::cmp::Ordering::Greater,
(None, None) => std::cmp::Ordering::Less,
}),
ProcessSorting::Count => {
// Should never occur in this case, tree mode explicitly disables grouping.
}
}
}
/// A DFS traversal to correctly build the prefix lines (the pretty '├' and '─' lines) and
/// the correct order to the PID tree as a vector.
fn build_explored_pids(
current_pid: Pid, parent_child_mapping: &HashMap<Pid, IndexSet<Pid, FxBuildHasher>>,
prev_drawn_lines: &str, collapsed_set: &IndexSet<Pid, FxBuildHasher>,
) -> (Vec<Pid>, Vec<String>) {
let mut explored_pids: Vec<Pid> = vec![current_pid];
let mut lines: Vec<String> = vec![];
if collapsed_set.contains(&current_pid) {
return (explored_pids, lines);
} else if let Some(children) = parent_child_mapping.get(&current_pid) {
for (itx, child) in children.iter().rev().enumerate() {
let new_drawn_lines = if itx == children.len() - 1 {
format!("{} ", prev_drawn_lines)
} else {
format!("{}{} ", prev_drawn_lines, BRANCH_VERTICAL)
};
let (pid_res, branch_res) = build_explored_pids(
*child,
parent_child_mapping,
new_drawn_lines.as_str(),
collapsed_set,
);
if itx == children.len() - 1 {
lines.push(format!(
"{}{}",
prev_drawn_lines,
if !new_drawn_lines.is_empty() {
format!("{}{} ", BRANCH_ENDING, BRANCH_HORIZONTAL)
} else {
String::default()
}
));
} else {
lines.push(format!(
"{}{}",
prev_drawn_lines,
if !new_drawn_lines.is_empty() {
format!("{}{} ", BRANCH_SPLIT, BRANCH_HORIZONTAL)
} else {
String::default()
}
));
}
explored_pids.extend(pid_res);
lines.extend(branch_res);
}
}
(explored_pids, lines)
}
/// Returns the total sum of CPU, MEM%, MEM, R/s, W/s, Total Read, and Total Write via DFS traversal.
fn get_usage_of_all_children(
parent_pid: Pid, parent_child_mapping: &HashMap<Pid, IndexSet<Pid, FxBuildHasher>>,
pid_process_mapping: &HashMap<Pid, &ConvertedProcessData>,
) -> (f64, f64, u64, f64, f64, f64, f64) {
if let Some(&converted_process_data) = pid_process_mapping.get(&parent_pid) {
let (
mut cpu,
mut mem_percent,
mut mem,
mut rps,
mut wps,
mut total_read,
mut total_write,
) = (
(converted_process_data.cpu_percent_usage * 10.0).round() / 10.0,
(converted_process_data.mem_percent_usage * 10.0).round() / 10.0,
converted_process_data.mem_usage_bytes,
(converted_process_data.rps_f64 * 10.0).round() / 10.0,
(converted_process_data.wps_f64 * 10.0).round() / 10.0,
(converted_process_data.tr_f64 * 10.0).round() / 10.0,
(converted_process_data.tw_f64 * 10.0).round() / 10.0,
);
if let Some(children) = parent_child_mapping.get(&parent_pid) {
for &child_pid in children {
let (
child_cpu,
child_mem_percent,
child_mem,
child_rps,
child_wps,
child_total_read,
child_total_write,
) = get_usage_of_all_children(
child_pid,
parent_child_mapping,
pid_process_mapping,
);
cpu += child_cpu;
mem_percent += child_mem_percent;
mem += child_mem;
rps += child_rps;
wps += child_wps;
total_read += child_total_read;
total_write += child_total_write;
}
}
(cpu, mem_percent, mem, rps, wps, total_read, total_write)
} else {
(0.0_f64, 0.0_f64, 0, 0.0_f64, 0.0_f64, 0.0_f64, 0.0_f64)
}
}
let mut to_sort_vec = Vec::new();
for pid in pids_to_explore {
if let Some(process) = pid_process_mapping.get(&pid) {
to_sort_vec.push((pid, *process));
}
}
sort_vec(&mut to_sort_vec, sorting_type, is_sort_descending);
pids_to_explore = to_sort_vec.iter().map(|(pid, _proc)| *pid).collect();
while let Some(current_pid) = pids_to_explore.pop_front() {
if !prune_disabled_pids(current_pid, &mut parent_child_mapping, &pid_process_mapping) {
sort_remaining_pids(
current_pid,
sorting_type,
is_sort_descending,
&mut parent_child_mapping,
&pid_process_mapping,
);
let (pid_res, branch_res) =
build_explored_pids(current_pid, &parent_child_mapping, "", &collapsed_set);
lines.push(String::default());
lines.extend(branch_res);
explored_pids.extend(pid_res);
}
}
// Now let's "rearrange" our current list of converted process data into the correct
// order required... and we're done!
explored_pids
.iter()
.zip(lines)
.filter_map(|(pid, prefix)| match pid_process_mapping.get(pid) {
Some(process) => {
let mut p = (*process).clone();
p.process_description_prefix = Some(format!(
"{}{}{}",
prefix,
if p.is_collapsed_entry { "+ " } else { "" }, // I do the + sign thing here because I'm kinda too lazy to do it in the prefix, tbh.
if is_using_command {
&p.command
} else {
&p.name
}
));
// As part of https://github.com/ClementTsang/bottom/issues/424, also append their statistics to the parent if
// collapsed.
//
// Note that this will technically be "missing" entries, it collapses + sums based on what is visible
// since this runs *after* pruning steps.
if p.is_collapsed_entry {
if let Some(children) = parent_child_mapping.get(&p.pid) {
// Do some rounding.
p.cpu_percent_usage = (p.cpu_percent_usage * 10.0).round() / 10.0;
p.mem_percent_usage = (p.mem_percent_usage * 10.0).round() / 10.0;
p.rps_f64 = (p.rps_f64 * 10.0).round() / 10.0;
p.wps_f64 = (p.wps_f64 * 10.0).round() / 10.0;
p.tr_f64 = (p.tr_f64 * 10.0).round() / 10.0;
p.tw_f64 = (p.tw_f64 * 10.0).round() / 10.0;
for &child_pid in children {
// Let's just do a simple DFS traversal...
let (
child_cpu,
child_mem_percent,
child_mem,
child_rps,
child_wps,
child_total_read,
child_total_write,
) = get_usage_of_all_children(
child_pid,
&parent_child_mapping,
&pid_process_mapping,
);
p.cpu_percent_usage += child_cpu;
p.mem_percent_usage += child_mem_percent;
p.mem_usage_bytes += child_mem;
p.rps_f64 += child_rps;
p.wps_f64 += child_wps;
p.tr_f64 += child_total_read;
p.tw_f64 += child_total_write;
}
let disk_io_strings = get_disk_io_strings(
p.rps_f64 as u64,
p.wps_f64 as u64,
p.tr_f64 as u64,
p.tw_f64 as u64,
);
p.mem_usage_str = get_binary_bytes(p.mem_usage_bytes);
p.read_per_sec = disk_io_strings.0;
p.write_per_sec = disk_io_strings.1;
p.total_read = disk_io_strings.2;
p.total_write = disk_io_strings.3;
}
}
Some(p)
}
None => None,
})
.collect::<Vec<_>>()
}
#[cfg(feature = "battery")]
@ -1065,7 +598,12 @@ pub fn convert_battery_harvest(current_data: &DataCollection) -> Vec<ConvertedBa
num_seconds,
if num_seconds == 1 { "" } else { "s" },
),
short: format!("{}:{:02}:{:02}", time.whole_hours(), num_minutes, num_seconds),
short: format!(
"{}:{:02}:{:02}",
time.whole_hours(),
num_minutes,
num_seconds
),
}
} else if let Some(secs_till_full) = battery_harvest.secs_until_full {
let time = time::Duration::seconds(secs_till_full); // TODO: [Dependencies] Can I get rid of chrono?
@ -1081,7 +619,12 @@ pub fn convert_battery_harvest(current_data: &DataCollection) -> Vec<ConvertedBa
num_seconds,
if num_seconds == 1 { "" } else { "s" },
),
short: format!("{}:{:02}:{:02}", time.whole_hours(), num_minutes, num_seconds),
short: format!(
"{}:{:02}:{:02}",
time.whole_hours(),
num_minutes,
num_seconds
),
}
} else {
BatteryDuration::Neither

View File

@ -59,10 +59,6 @@ pub struct ConfigFlags {
pub hide_time: Option<bool>,
pub default_widget_type: Option<String>,
pub default_widget_count: Option<u64>,
pub use_old_network_legend: Option<bool>,
pub hide_table_gap: Option<bool>,

View File

@ -134,45 +134,3 @@ fn test_conflicting_temps() {
"cannot be used with one or more of the other specified arguments",
));
}
#[test]
fn test_invalid_default_widget_1() {
Command::new(get_binary_location())
.arg("-C")
.arg("./tests/empty_config.toml")
.arg("--default_widget_type")
.arg("fake_widget")
.assert()
.failure()
.stderr(predicate::str::contains("invalid widget name"));
}
#[test]
fn test_invalid_default_widget_2() {
Command::new(get_binary_location())
.arg("-C")
.arg("./tests/empty_config.toml")
.arg("--default_widget_type")
.arg("cpu")
.arg("--default_widget_count")
.arg("18446744073709551616")
.assert()
.failure()
.stderr(predicate::str::contains(
"set your widget count to be at most unsigned INT_MAX",
));
}
#[test]
fn test_missing_default_widget_type() {
Command::new(get_binary_location())
.arg("-C")
.arg("./tests/empty_config.toml")
.arg("--default_widget_count")
.arg("3")
.assert()
.failure()
.stderr(predicate::str::contains(
"The following required arguments were not provided",
));
}

View File

@ -25,7 +25,7 @@ fn test_empty_layout() {
.arg("./tests/invalid_configs/empty_layout.toml")
.assert()
.failure()
.stderr(predicate::str::contains("at least one widget"));
.stderr(predicate::str::contains("Configuration file error")); // FIXME: [Urgent] Use a const for the error pattern
}
#[test]
@ -123,23 +123,3 @@ fn test_invalid_colour_string() {
.failure()
.stderr(predicate::str::contains("invalid named colour"));
}
#[test]
fn test_lone_default_widget_count() {
Command::new(get_binary_location())
.arg("-C")
.arg("./tests/invalid_configs/lone_default_widget_count.toml")
.assert()
.failure()
.stderr(predicate::str::contains("it must be used with"));
}
#[test]
fn test_invalid_default_widget_count() {
Command::new(get_binary_location())
.arg("-C")
.arg("./tests/invalid_configs/invalid_default_widget_count.toml")
.assert()
.failure()
.stderr(predicate::str::contains("invalid number"));
}

View File

@ -1,3 +0,0 @@
[flags]
default_widget_type="CPU"
default_widget_count=18446744073709551616

View File

@ -1,2 +0,0 @@
[flags]
default_widget_count = 3