bug: fix grouping being broken after refactor (#201)

Fixes grouping not working properly after some refactoring done in 0.4.6.
This commit is contained in:
Clement Tsang 2020-08-25 03:36:29 -04:00 committed by GitHub
parent e08eda8edc
commit 9158c5f6d5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 179 additions and 141 deletions

View File

@ -254,6 +254,13 @@ impl App {
.unwrap()
.enabled = !(proc_widget_state.is_grouped);
proc_widget_state
.columns
.toggle(&processes::ProcessSorting::Count);
proc_widget_state
.columns
.toggle(&processes::ProcessSorting::Pid);
self.proc_state.force_update = Some(self.current_widget.widget_id);
}
}

View File

@ -24,6 +24,7 @@ pub enum ProcessSorting {
TotalRead,
TotalWrite,
State,
Count,
}
impl std::fmt::Display for ProcessSorting {
@ -44,6 +45,7 @@ impl std::fmt::Display for ProcessSorting {
ProcessName => "Name",
Command => "Command",
Pid => "PID",
Count => "Count",
}
)
}

View File

@ -459,8 +459,10 @@ impl Query {
Ok(())
}
pub fn check(&self, process: &ConvertedProcessData) -> bool {
self.query.iter().all(|ok| ok.check(process))
pub fn check(&self, process: &ConvertedProcessData, is_using_command: bool) -> bool {
self.query
.iter()
.all(|ok| ok.check(process, is_using_command))
}
}
@ -497,11 +499,11 @@ impl Or {
Ok(())
}
pub fn check(&self, process: &ConvertedProcessData) -> bool {
pub fn check(&self, process: &ConvertedProcessData, is_using_command: bool) -> bool {
if let Some(rhs) = &self.rhs {
self.lhs.check(process) || rhs.check(process)
self.lhs.check(process, is_using_command) || rhs.check(process, is_using_command)
} else {
self.lhs.check(process)
self.lhs.check(process, is_using_command)
}
}
}
@ -542,11 +544,11 @@ impl And {
Ok(())
}
pub fn check(&self, process: &ConvertedProcessData) -> bool {
pub fn check(&self, process: &ConvertedProcessData, is_using_command: bool) -> bool {
if let Some(rhs) = &self.rhs {
self.lhs.check(process) && rhs.check(process)
self.lhs.check(process, is_using_command) && rhs.check(process, is_using_command)
} else {
self.lhs.check(process)
self.lhs.check(process, is_using_command)
}
}
}
@ -651,7 +653,7 @@ impl Prefix {
Ok(())
}
pub fn check(&self, process: &ConvertedProcessData) -> bool {
pub fn check(&self, process: &ConvertedProcessData, is_using_command: bool) -> bool {
fn matches_condition(condition: &QueryComparison, lhs: f64, rhs: f64) -> bool {
match condition {
QueryComparison::Equal => (lhs - rhs).abs() < std::f64::EPSILON,
@ -663,7 +665,7 @@ impl Prefix {
}
if let Some(and) = &self.or {
and.check(process)
and.check(process, is_using_command)
} else if let Some((prefix_type, query_content)) = &self.regex_prefix {
if let StringQuery::Regex(r) = query_content {
match prefix_type {

View File

@ -161,6 +161,7 @@ impl Default for ProcColumn {
fn default() -> Self {
use ProcessSorting::*;
let ordered_columns = vec![
Count,
Pid,
ProcessName,
Command,
@ -233,6 +234,15 @@ impl Default for ProcColumn {
},
);
}
Count => {
column_mapping.insert(
column,
ColumnInfo {
enabled: false,
shortcut: None,
},
);
}
_ => {
column_mapping.insert(
column,
@ -369,6 +379,10 @@ impl ProcWidgetState {
// TODO: If we add customizable columns, this should pull from config
let mut columns = ProcColumn::default();
columns.set_to_sorted_index(&process_sorting_type);
if is_grouped {
columns.toggle(&ProcessSorting::Count);
columns.toggle(&ProcessSorting::Pid);
}
ProcWidgetState {
process_search_state,
@ -627,7 +641,6 @@ impl MemWidgetState {
}
}
}
pub struct MemState {
pub force_update: Option<u64>,
pub widget_states: HashMap<u64, MemWidgetState>,

View File

@ -38,10 +38,9 @@ pub struct DisplayableData {
pub network_data_tx: Vec<(f64, f64)>,
pub disk_data: Vec<Vec<String>>,
pub temp_sensor_data: Vec<Vec<String>>,
// Not the final value
pub process_data: Vec<ConvertedProcessData>,
// What's actually displayed
pub finalized_process_data_map: HashMap<u64, Vec<ConvertedProcessData>>,
pub single_process_data: Vec<ConvertedProcessData>, // Contains single process data
pub process_data: Vec<ConvertedProcessData>, // Not the final value, may be grouped or single
pub finalized_process_data_map: HashMap<u64, Vec<ConvertedProcessData>>, // What's actually displayed
pub mem_label_percent: String,
pub swap_label_percent: String,
pub mem_label_frac: String,

View File

@ -152,6 +152,7 @@ impl ProcessTableWidget for Painter {
// Draw!
let is_proc_widget_grouped = proc_widget_state.is_grouped;
let is_using_command = proc_widget_state.is_using_command;
let mem_enabled = proc_widget_state.columns.is_enabled(&ProcessSorting::Mem);
let process_rows = sliced_vec.iter().map(|process| {
Row::Data(
@ -161,7 +162,11 @@ impl ProcessTableWidget for Painter {
} else {
process.pid.to_string()
},
process.name.clone(),
if is_using_command {
process.command.clone()
} else {
process.name.clone()
},
format!("{:.1}%", process.cpu_percent_usage),
if mem_enabled {
format!("{:.0}{}", process.mem_usage_str.0, process.mem_usage_str.1)

View File

@ -34,6 +34,7 @@ pub struct ConvertedNetworkData {
pub struct ConvertedProcessData {
pub pid: u32,
pub name: String,
pub command: String,
pub cpu_percent_usage: f64,
pub mem_percent_usage: f64,
pub mem_usage_bytes: u64,
@ -372,116 +373,112 @@ pub enum ProcessNamingType {
}
pub fn convert_process_data(
current_data: &data_farmer::DataCollection, grouping_type: ProcessGroupingType,
name_type: ProcessNamingType,
current_data: &data_farmer::DataCollection,
) -> Vec<ConvertedProcessData> {
match grouping_type {
ProcessGroupingType::Ungrouped => current_data
.process_harvest
.iter()
.map(|process| {
let converted_rps = get_exact_byte_values(process.read_bytes_per_sec, false);
let converted_wps = get_exact_byte_values(process.write_bytes_per_sec, false);
let converted_total_read = get_exact_byte_values(process.total_read_bytes, false);
let converted_total_write = get_exact_byte_values(process.total_write_bytes, false);
current_data
.process_harvest
.iter()
.map(|process| {
let converted_rps = get_exact_byte_values(process.read_bytes_per_sec, false);
let converted_wps = get_exact_byte_values(process.write_bytes_per_sec, false);
let converted_total_read = get_exact_byte_values(process.total_read_bytes, false);
let converted_total_write = get_exact_byte_values(process.total_write_bytes, false);
let read_per_sec = format!("{:.*}{}/s", 0, converted_rps.0, converted_rps.1);
let write_per_sec = format!("{:.*}{}/s", 0, converted_wps.0, converted_wps.1);
let total_read =
format!("{:.*}{}", 0, converted_total_read.0, converted_total_read.1);
let total_write = format!(
"{:.*}{}",
0, converted_total_write.0, converted_total_write.1
);
let read_per_sec = format!("{:.*}{}/s", 0, converted_rps.0, converted_rps.1);
let write_per_sec = format!("{:.*}{}/s", 0, converted_wps.0, converted_wps.1);
let total_read = format!("{:.*}{}", 0, converted_total_read.0, converted_total_read.1);
let total_write = format!(
"{:.*}{}",
0, converted_total_write.0, converted_total_write.1
);
ConvertedProcessData {
pid: process.pid,
name: match name_type {
ProcessNamingType::Name => process.name.to_string(),
ProcessNamingType::Path => process.command.to_string(),
},
cpu_percent_usage: process.cpu_usage_percent,
mem_percent_usage: process.mem_usage_percent,
mem_usage_bytes: process.mem_usage_bytes,
mem_usage_str: get_exact_byte_values(process.mem_usage_bytes, false),
group_pids: vec![process.pid],
read_per_sec,
write_per_sec,
total_read,
total_write,
rps_f64: process.read_bytes_per_sec as f64,
wps_f64: process.write_bytes_per_sec as f64,
tr_f64: process.total_read_bytes as f64,
tw_f64: process.total_write_bytes as f64,
process_state: process.process_state.to_owned(),
}
ConvertedProcessData {
pid: process.pid,
name: process.name.to_string(),
command: process.command.to_string(),
cpu_percent_usage: process.cpu_usage_percent,
mem_percent_usage: process.mem_usage_percent,
mem_usage_bytes: process.mem_usage_bytes,
mem_usage_str: get_exact_byte_values(process.mem_usage_bytes, false),
group_pids: vec![process.pid],
read_per_sec,
write_per_sec,
total_read,
total_write,
rps_f64: process.read_bytes_per_sec as f64,
wps_f64: process.write_bytes_per_sec as f64,
tr_f64: process.total_read_bytes as f64,
tw_f64: process.total_write_bytes as f64,
process_state: process.process_state.to_owned(),
}
})
.collect::<Vec<_>>()
}
pub fn group_process_data(
single_process_data: &[ConvertedProcessData], is_using_command: ProcessNamingType,
) -> Vec<ConvertedProcessData> {
let mut grouped_hashmap: HashMap<String, SingleProcessData> = std::collections::HashMap::new();
single_process_data.iter().for_each(|process| {
let entry = grouped_hashmap
.entry(match is_using_command {
ProcessNamingType::Name => process.name.to_string(),
ProcessNamingType::Path => process.command.to_string(),
})
.collect::<Vec<_>>(),
ProcessGroupingType::Grouped => {
let mut grouped_hashmap: HashMap<String, SingleProcessData> =
std::collections::HashMap::new();
current_data.process_harvest.iter().for_each(|process| {
let entry = grouped_hashmap
.entry(match name_type {
ProcessNamingType::Name => process.name.to_string(),
ProcessNamingType::Path => process.command.to_string(),
})
.or_insert(SingleProcessData {
pid: process.pid,
..SingleProcessData::default()
});
(*entry).cpu_percent_usage += process.cpu_usage_percent;
(*entry).mem_percent_usage += process.mem_usage_percent;
(*entry).mem_usage_bytes += process.mem_usage_bytes;
(*entry).group_pids.push(process.pid);
(*entry).read_per_sec += process.read_bytes_per_sec;
(*entry).write_per_sec += process.write_bytes_per_sec;
(*entry).total_read += process.total_read_bytes;
(*entry).total_write += process.total_write_bytes;
.or_insert(SingleProcessData {
pid: process.pid,
..SingleProcessData::default()
});
grouped_hashmap
.iter()
.map(|(identifier, process_details)| {
let p = process_details.clone();
let converted_rps = get_exact_byte_values(p.read_per_sec, false);
let converted_wps = get_exact_byte_values(p.write_per_sec, false);
let converted_total_read = get_exact_byte_values(p.total_read, false);
let converted_total_write = get_exact_byte_values(p.total_write, false);
(*entry).cpu_percent_usage += process.cpu_percent_usage;
(*entry).mem_percent_usage += process.mem_percent_usage;
(*entry).mem_usage_bytes += process.mem_usage_bytes;
(*entry).group_pids.push(process.pid);
(*entry).read_per_sec += process.rps_f64 as u64;
(*entry).write_per_sec += process.wps_f64 as u64;
(*entry).total_read += process.tr_f64 as u64;
(*entry).total_write += process.tw_f64 as u64;
});
let read_per_sec = format!("{:.*}{}/s", 0, converted_rps.0, converted_rps.1);
let write_per_sec = format!("{:.*}{}/s", 0, converted_wps.0, converted_wps.1);
let total_read =
format!("{:.*}{}", 0, converted_total_read.0, converted_total_read.1);
let total_write = format!(
"{:.*}{}",
0, converted_total_write.0, converted_total_write.1
);
grouped_hashmap
.iter()
.map(|(identifier, process_details)| {
let p = process_details.clone();
let converted_rps = get_exact_byte_values(p.read_per_sec, false);
let converted_wps = get_exact_byte_values(p.write_per_sec, false);
let converted_total_read = get_exact_byte_values(p.total_read, false);
let converted_total_write = get_exact_byte_values(p.total_write, false);
ConvertedProcessData {
pid: p.pid,
name: identifier.to_string(),
cpu_percent_usage: p.cpu_percent_usage,
mem_percent_usage: p.mem_percent_usage,
mem_usage_bytes: p.mem_usage_bytes,
mem_usage_str: get_exact_byte_values(p.mem_usage_bytes, false),
group_pids: p.group_pids,
read_per_sec,
write_per_sec,
total_read,
total_write,
rps_f64: p.read_per_sec as f64,
wps_f64: p.write_per_sec as f64,
tr_f64: p.total_read as f64,
tw_f64: p.total_write as f64,
process_state: p.process_state,
}
})
.collect::<Vec<_>>()
}
}
let read_per_sec = format!("{:.*}{}/s", 0, converted_rps.0, converted_rps.1);
let write_per_sec = format!("{:.*}{}/s", 0, converted_wps.0, converted_wps.1);
let total_read = format!("{:.*}{}", 0, converted_total_read.0, converted_total_read.1);
let total_write = format!(
"{:.*}{}",
0, converted_total_write.0, converted_total_write.1
);
ConvertedProcessData {
pid: p.pid,
name: identifier.to_string(),
command: identifier.to_string(),
cpu_percent_usage: p.cpu_percent_usage,
mem_percent_usage: p.mem_percent_usage,
mem_usage_bytes: p.mem_usage_bytes,
mem_usage_str: get_exact_byte_values(p.mem_usage_bytes, false),
group_pids: p.group_pids,
read_per_sec,
write_per_sec,
total_read,
total_write,
rps_f64: p.read_per_sec as f64,
wps_f64: p.write_per_sec as f64,
tr_f64: p.total_read as f64,
tw_f64: p.total_write as f64,
process_state: p.process_state,
}
})
.collect::<Vec<_>>()
}
pub fn convert_battery_harvest(

View File

@ -434,31 +434,34 @@ pub fn update_all_process_lists(app: &mut App) {
}
pub fn update_final_process_list(app: &mut App, widget_id: u64) {
let is_invalid_or_blank = match app.proc_state.widget_states.get(&widget_id) {
Some(process_state) => process_state
.process_search_state
.search_state
.is_invalid_or_blank_search(),
None => false,
let (is_invalid_or_blank, is_using_command) = match app.proc_state.widget_states.get(&widget_id)
{
Some(process_state) => (
process_state
.process_search_state
.search_state
.is_invalid_or_blank_search(),
process_state.is_using_command,
),
None => (false, false),
};
let is_grouped = app.is_grouped(widget_id);
if !app.is_frozen {
if let Some(proc_widget_state) = app.proc_state.get_mut_widget_state(widget_id) {
app.canvas_data.process_data = convert_process_data(
&app.data_collection,
if is_grouped {
ProcessGroupingType::Grouped
} else {
ProcessGroupingType::Ungrouped
},
if proc_widget_state.is_using_command {
ProcessNamingType::Path
} else {
ProcessNamingType::Name
},
);
}
app.canvas_data.single_process_data = convert_process_data(&app.data_collection);
}
if is_grouped {
app.canvas_data.process_data = group_process_data(
&app.canvas_data.single_process_data,
if is_using_command {
ProcessNamingType::Path
} else {
ProcessNamingType::Name
},
);
} else {
app.canvas_data.process_data = app.canvas_data.single_process_data.clone();
}
let process_filter = app.get_process_filter(widget_id);
@ -469,7 +472,7 @@ pub fn update_final_process_list(app: &mut App, widget_id: u64) {
.filter(|process| {
if !is_invalid_or_blank {
if let Some(process_filter) = process_filter {
process_filter.check(&process)
process_filter.check(&process, is_using_command)
} else {
true
}
@ -543,7 +546,7 @@ pub fn sort_process_data(
)
});
}
ProcessSorting::ProcessName | ProcessSorting::Command => {
ProcessSorting::ProcessName => {
// Don't repeat if false... it sorts by name by default anyways.
if proc_widget_state.process_sorting_reverse {
to_sort_vec.sort_by(|a, b| {
@ -555,6 +558,13 @@ pub fn sort_process_data(
})
}
}
ProcessSorting::Command => to_sort_vec.sort_by(|a, b| {
utils::gen_util::get_ordering(
&a.command.to_lowercase(),
&b.command.to_lowercase(),
proc_widget_state.process_sorting_reverse,
)
}),
ProcessSorting::Pid => {
if !proc_widget_state.is_grouped {
to_sort_vec.sort_by(|a, b| {
@ -609,6 +619,9 @@ pub fn sort_process_data(
proc_widget_state.process_sorting_reverse,
)
}),
ProcessSorting::Count => {
// Nothing should happen here.
}
}
}