refactor: migrate memory collection to sysinfo for all platforms (#1036)

* cleanup

* refactor: remove heim memory code

* add missing updates, remove heim memory feature

* restrict export visibility

* some refactoring, remove smol

* gpu feature
This commit is contained in:
Clement Tsang 2023-03-03 00:06:19 -05:00 committed by GitHub
parent c2d94900f3
commit 8cc763cc1b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 115 additions and 475 deletions

21
Cargo.lock generated
View File

@ -240,7 +240,6 @@ dependencies = [
"regex",
"serde",
"serde_json",
"smol",
"starship-battery",
"sysctl",
"sysinfo",
@ -821,7 +820,6 @@ checksum = "b8a653442b9bdd11a77d3753a60443c60c4437d3acac8e6c3d4a6a9acd7cceed"
dependencies = [
"heim-common",
"heim-disk",
"heim-memory",
"heim-net",
"heim-runtime",
"heim-sensors",
@ -863,21 +861,6 @@ dependencies = [
"winapi",
]
[[package]]
name = "heim-memory"
version = "0.1.0-rc.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6fa81bccc5e81ab0c68f520ecba5cb42817bacabfc6120160de886754ad0e3e1"
dependencies = [
"cfg-if",
"heim-common",
"heim-runtime",
"lazy_static",
"libc",
"mach",
"winapi",
]
[[package]]
name = "heim-net"
version = "0.1.0-rc.1"
@ -1656,9 +1639,9 @@ dependencies = [
[[package]]
name = "sysinfo"
version = "0.26.7"
version = "0.26.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c375d5fd899e32847b8566e10598d6e9f1d9b55ec6de3cdf9e7da4bdc51371bc"
checksum = "5c18a6156d1f27a9592ee18c1a846ca8dd5c258b7179fc193ae87c74ebb666f5"
dependencies = [
"cfg-if",
"core-foundation-sys 0.8.3",

View File

@ -108,16 +108,15 @@ unicode-width = "0.1.10"
libc = "0.2.124"
[target.'cfg(target_os = "linux")'.dependencies]
heim = { version = "0.1.0-rc.1", features = ["disk", "memory", "net", "sensors"] }
heim = { version = "0.1.0-rc.1", features = ["disk", "net", "sensors"] }
procfs = { version = "0.15.1", default-features = false }
smol = "1.2.5"
[target.'cfg(target_os = "macos")'.dependencies]
heim = { version = "0.1.0-rc.1", features = ["disk", "memory", "net"] }
heim = { version = "0.1.0-rc.1", features = ["disk", "net"] }
mach2 = "0.4.1"
[target.'cfg(target_os = "windows")'.dependencies]
heim = { version = "0.1.0-rc.1", features = ["disk", "memory"] }
heim = { version = "0.1.0-rc.1", features = ["disk"] }
windows = { version = "0.44.0", features = [
"Win32_System_Threading",
"Win32_Foundation",

View File

@ -12,6 +12,8 @@ use starship_battery::{Battery, Manager};
use sysinfo::{System, SystemExt};
use self::memory::MemCollect;
use super::DataFilters;
use crate::app::layout_manager::UsedWidgets;
@ -155,15 +157,11 @@ impl DataCollector {
}
pub fn init(&mut self) {
#[cfg(target_os = "linux")]
{
futures::executor::block_on(self.initialize_memory_size());
}
self.sys.refresh_memory();
self.mem_total_kb = self.sys.total_memory();
#[cfg(not(target_os = "linux"))]
{
self.sys.refresh_memory();
self.mem_total_kb = self.sys.total_memory();
// TODO: Would be good to get this and network list running on a timer instead...?
// Refresh components list once...
if self.widgets_to_harvest.use_temp {
@ -211,15 +209,6 @@ impl DataCollector {
self.data.cleanup();
}
#[cfg(target_os = "linux")]
async fn initialize_memory_size(&mut self) {
self.mem_total_kb = if let Ok(mem) = heim::memory::memory().await {
mem.total().get::<heim::units::information::kilobyte>()
} else {
1
};
}
pub fn set_data_collection(&mut self, used_widgets: UsedWidgets) {
self.widgets_to_harvest = used_widgets;
}
@ -245,6 +234,10 @@ impl DataCollector {
self.sys.refresh_cpu();
}
if self.widgets_to_harvest.use_mem {
self.sys.refresh_memory();
}
#[cfg(not(target_os = "linux"))]
{
if self.widgets_to_harvest.use_proc {
@ -266,9 +259,6 @@ impl DataCollector {
if self.widgets_to_harvest.use_disk {
self.sys.refresh_disks();
}
if self.widgets_to_harvest.use_mem {
self.sys.refresh_memory();
}
}
}
@ -378,6 +368,30 @@ impl DataCollector {
}
}
if self.widgets_to_harvest.use_mem {
let MemCollect {
ram,
swap,
#[cfg(feature = "gpu")]
gpus,
#[cfg(feature = "zfs")]
arc,
} = memory::get_mem_data(&self.sys, self.widgets_to_harvest.use_gpu);
self.data.memory = ram;
self.data.swap = swap;
#[cfg(feature = "zfs")]
{
self.data.arc = arc;
}
#[cfg(feature = "gpu")]
{
self.data.gpu = gpus;
}
}
let network_data_fut = {
#[cfg(any(target_os = "windows", target_os = "freebsd"))]
{
@ -403,23 +417,7 @@ impl DataCollector {
)
}
};
let mem_data_fut = {
#[cfg(not(target_os = "freebsd"))]
{
memory::get_mem_data(
self.widgets_to_harvest.use_mem,
self.widgets_to_harvest.use_gpu,
)
}
#[cfg(target_os = "freebsd")]
{
memory::get_mem_data(
&self.sys,
self.widgets_to_harvest.use_mem,
self.widgets_to_harvest.use_gpu,
)
}
};
let disk_data_fut = disks::get_disk_usage(
self.widgets_to_harvest.use_disk,
&self.filters.disk_filter,
@ -427,12 +425,8 @@ impl DataCollector {
);
let disk_io_usage_fut = disks::get_io_usage(self.widgets_to_harvest.use_disk);
let (net_data, mem_res, disk_res, io_res) = join!(
network_data_fut,
mem_data_fut,
disk_data_fut,
disk_io_usage_fut,
);
let (net_data, disk_res, io_res) =
join!(network_data_fut, disk_data_fut, disk_io_usage_fut,);
if let Ok(net_data) = net_data {
if let Some(net_data) = &net_data {
@ -442,24 +436,6 @@ impl DataCollector {
self.data.network = net_data;
}
if let Ok(memory) = mem_res.ram {
self.data.memory = memory;
}
if let Ok(swap) = mem_res.swap {
self.data.swap = swap;
}
#[cfg(feature = "zfs")]
if let Ok(arc) = mem_res.arc {
self.data.arc = arc;
}
#[cfg(feature = "gpu")]
if let Ok(gpu) = mem_res.gpus {
self.data.gpu = gpu;
}
if let Ok(disks) = disk_res {
self.data.disks = disks;
}

View File

@ -1,10 +1,21 @@
//! Data collection for memory.
//!
//! For Linux, macOS, and Windows, this is handled by Heim. On FreeBSD it is handled by sysinfo.
//! Memory data collection.
cfg_if::cfg_if! {
if #[cfg(any(target_os = "freebsd", target_os = "linux", target_os = "macos", target_os = "windows"))] {
pub mod general;
pub use self::general::*;
}
pub mod sysinfo;
pub(crate) use self::sysinfo::*;
#[derive(Debug, Clone, Default)]
pub struct MemHarvest {
pub total_kib: u64,
pub used_kib: u64,
pub use_percent: Option<f64>,
}
#[derive(Debug)]
pub struct MemCollect {
pub ram: Option<MemHarvest>,
pub swap: Option<MemHarvest>,
#[cfg(feature = "zfs")]
pub arc: Option<MemHarvest>,
#[cfg(feature = "gpu")]
pub gpus: Option<Vec<(String, MemHarvest)>>,
}

View File

@ -1,26 +0,0 @@
cfg_if::cfg_if! {
if #[cfg(any(target_os = "linux", target_os = "macos", target_os = "windows"))] {
pub mod heim;
pub use self::heim::*;
} else if #[cfg(target_os = "freebsd")] {
pub mod sysinfo;
pub use self::sysinfo::*;
}
}
#[derive(Debug, Clone, Default)]
pub struct MemHarvest {
pub mem_total_in_kib: u64,
pub mem_used_in_kib: u64,
pub use_percent: Option<f64>,
}
#[derive(Debug)]
pub struct MemCollect {
pub ram: crate::utils::error::Result<Option<MemHarvest>>,
pub swap: crate::utils::error::Result<Option<MemHarvest>>,
#[cfg(feature = "zfs")]
pub arc: crate::utils::error::Result<Option<MemHarvest>>,
#[cfg(feature = "gpu")]
pub gpus: crate::utils::error::Result<Option<Vec<(String, MemHarvest)>>>,
}

View File

@ -1,296 +0,0 @@
//! Data collection for memory via heim.
use crate::data_harvester::memory::{MemCollect, MemHarvest};
pub async fn get_mem_data(actually_get: bool, _get_gpu: bool) -> MemCollect {
if !actually_get {
MemCollect {
ram: Ok(None),
swap: Ok(None),
#[cfg(feature = "zfs")]
arc: Ok(None),
#[cfg(feature = "gpu")]
gpus: Ok(None),
}
} else {
MemCollect {
ram: get_ram_data().await,
swap: get_swap_data().await,
#[cfg(feature = "zfs")]
arc: get_arc_data().await,
#[cfg(feature = "gpu")]
gpus: if _get_gpu {
get_gpu_data().await
} else {
Ok(None)
},
}
}
}
pub async fn get_ram_data() -> crate::utils::error::Result<Option<MemHarvest>> {
let (mem_total_in_kib, mem_used_in_kib) = {
#[cfg(target_os = "linux")]
{
// TODO: [OPT] is this efficient?
use smol::fs::read_to_string;
let meminfo = read_to_string("/proc/meminfo").await?;
// All values are in KiB by default.
let mut mem_total = 0;
let mut cached = 0;
let mut s_reclaimable = 0;
let mut shmem = 0;
let mut buffers = 0;
let mut mem_free = 0;
let mut keys_read: u8 = 0;
const TOTAL_KEYS_NEEDED: u8 = 6;
for line in meminfo.lines() {
if let Some((label, value)) = line.split_once(':') {
let to_write = match label {
"MemTotal" => &mut mem_total,
"MemFree" => &mut mem_free,
"Buffers" => &mut buffers,
"Cached" => &mut cached,
"Shmem" => &mut shmem,
"SReclaimable" => &mut s_reclaimable,
_ => {
continue;
}
};
if let Some((number, _unit)) = value.trim_start().split_once(' ') {
// Parse the value, remember it's in KiB!
if let Ok(number) = number.parse::<u64>() {
*to_write = number;
// We only need a few keys, so we can bail early.
keys_read += 1;
if keys_read == TOTAL_KEYS_NEEDED {
break;
}
}
}
}
}
// Let's preface this by saying that memory usage calculations are... not straightforward.
// There are conflicting implementations everywhere.
//
// Now that we've added this preface (mainly for future reference), the current implementation below for usage
// is based on htop's calculation formula. See
// https://github.com/htop-dev/htop/blob/976c6123f41492aaf613b9d172eef1842fb7b0a3/linux/LinuxProcessList.c#L1584
// for implementation details as of writing.
//
// Another implementation, commonly used in other things, is to skip the shmem part of the calculation,
// which matches gopsutil and stuff like free.
let total = mem_total;
let cached_mem = cached + s_reclaimable - shmem;
let used_diff = mem_free + cached_mem + buffers;
let used = if total >= used_diff {
total - used_diff
} else {
total - mem_free
};
(total, used)
}
#[cfg(target_os = "macos")]
{
let memory = heim::memory::memory().await?;
use heim::memory::os::macos::MemoryExt;
use heim::units::information::kibibyte;
(
memory.total().get::<kibibyte>(),
memory.active().get::<kibibyte>() + memory.wire().get::<kibibyte>(),
)
}
#[cfg(target_os = "windows")]
{
let memory = heim::memory::memory().await?;
use heim::units::information::kibibyte;
let mem_total_in_kib = memory.total().get::<kibibyte>();
(
mem_total_in_kib,
mem_total_in_kib - memory.available().get::<kibibyte>(),
)
}
#[cfg(target_os = "freebsd")]
{
let mut s = System::new();
s.refresh_memory();
(s.total_memory(), s.used_memory())
}
};
Ok(Some(MemHarvest {
mem_total_in_kib,
mem_used_in_kib,
use_percent: if mem_total_in_kib == 0 {
None
} else {
Some(mem_used_in_kib as f64 / mem_total_in_kib as f64 * 100.0)
},
}))
}
pub async fn get_swap_data() -> crate::utils::error::Result<Option<MemHarvest>> {
#[cfg(any(target_os = "linux", target_os = "macos", target_os = "windows"))]
let memory = heim::memory::swap().await?;
#[cfg(target_os = "freebsd")]
let mut memory = System::new();
let (mem_total_in_kib, mem_used_in_kib) = {
#[cfg(target_os = "linux")]
{
// Similar story to above - heim parses this information incorrectly as far as I can tell, so kilobytes = kibibytes here.
use heim::units::information::kilobyte;
(
memory.total().get::<kilobyte>(),
memory.used().get::<kilobyte>(),
)
}
#[cfg(any(target_os = "windows", target_os = "macos"))]
{
use heim::units::information::kibibyte;
(
memory.total().get::<kibibyte>(),
memory.used().get::<kibibyte>(),
)
}
#[cfg(target_os = "freebsd")]
{
memory.refresh_memory();
(memory.total_swap(), memory.used_swap())
}
};
Ok(Some(MemHarvest {
mem_total_in_kib,
mem_used_in_kib,
use_percent: if mem_total_in_kib == 0 {
None
} else {
Some(mem_used_in_kib as f64 / mem_total_in_kib as f64 * 100.0)
},
}))
}
#[cfg(feature = "zfs")]
pub async fn get_arc_data() -> crate::utils::error::Result<Option<MemHarvest>> {
let (mem_total_in_kib, mem_used_in_kib) = {
#[cfg(target_os = "linux")]
{
let mut mem_arc = 0;
let mut mem_total = 0;
let mut zfs_keys_read: u8 = 0;
const ZFS_KEYS_NEEDED: u8 = 2;
use smol::fs::read_to_string;
let arcinfo = read_to_string("/proc/spl/kstat/zfs/arcstats").await?;
for line in arcinfo.lines() {
if let Some((label, value)) = line.split_once(' ') {
let to_write = match label {
"size" => &mut mem_arc,
"memory_all_bytes" => &mut mem_total,
_ => {
continue;
}
};
if let Some((_type, number)) = value.trim_start().rsplit_once(' ') {
// Parse the value, remember it's in bytes!
if let Ok(number) = number.parse::<u64>() {
*to_write = number;
// We only need a few keys, so we can bail early.
zfs_keys_read += 1;
if zfs_keys_read == ZFS_KEYS_NEEDED {
break;
}
}
}
}
}
(mem_total / 1024, mem_arc / 1024)
}
#[cfg(target_os = "freebsd")]
{
use sysctl::Sysctl;
if let (Ok(mem_arc_value), Ok(mem_sys_value)) = (
sysctl::Ctl::new("kstat.zfs.misc.arcstats.size"),
sysctl::Ctl::new("hw.physmem"),
) {
if let (Ok(sysctl::CtlValue::U64(arc)), Ok(sysctl::CtlValue::Ulong(mem))) =
(mem_arc_value.value(), mem_sys_value.value())
{
(mem / 1024, arc / 1024)
} else {
(0, 0)
}
} else {
(0, 0)
}
}
#[cfg(target_os = "macos")]
{
(0, 0)
}
#[cfg(target_os = "windows")]
{
(0, 0)
}
};
Ok(Some(MemHarvest {
mem_total_in_kib,
mem_used_in_kib,
use_percent: if mem_total_in_kib == 0 {
None
} else {
Some(mem_used_in_kib as f64 / mem_total_in_kib as f64 * 100.0)
},
}))
}
// FIXME: Can unify this with the sysinfo impl
#[cfg(feature = "nvidia")]
pub async fn get_gpu_data() -> crate::utils::error::Result<Option<Vec<(String, MemHarvest)>>> {
use crate::data_harvester::nvidia::NVML_DATA;
if let Ok(nvml) = &*NVML_DATA {
if let Ok(ngpu) = nvml.device_count() {
let mut results = Vec::with_capacity(ngpu as usize);
for i in 0..ngpu {
if let Ok(device) = nvml.device_by_index(i) {
if let (Ok(name), Ok(mem)) = (device.name(), device.memory_info()) {
// add device memory in bytes
let mem_total_in_kib = mem.total / 1024;
let mem_used_in_kib = mem.used / 1024;
results.push((
name,
MemHarvest {
mem_total_in_kib,
mem_used_in_kib,
use_percent: if mem_total_in_kib == 0 {
None
} else {
Some(mem_used_in_kib as f64 / mem_total_in_kib as f64 * 100.0)
},
},
));
}
}
}
Ok(Some(results))
} else {
Ok(None)
}
} else {
Ok(None)
}
}

View File

@ -1,65 +1,56 @@
//! Data collection for memory via sysinfo.
//! Collecting memory data using sysinfo.
use sysinfo::{System, SystemExt};
use crate::data_harvester::memory::{MemCollect, MemHarvest};
pub async fn get_mem_data(sys: &System, actually_get: bool, _get_gpu: bool) -> MemCollect {
if !actually_get {
MemCollect {
ram: Ok(None),
swap: Ok(None),
#[cfg(feature = "zfs")]
arc: Ok(None),
#[cfg(feature = "gpu")]
gpus: Ok(None),
}
} else {
MemCollect {
ram: get_ram_data(sys).await,
swap: get_swap_data(sys).await,
#[cfg(feature = "zfs")]
arc: get_arc_data().await,
#[cfg(feature = "gpu")]
gpus: if _get_gpu {
get_gpu_data().await
} else {
Ok(None)
},
}
/// Returns all memory data.
pub(crate) fn get_mem_data(sys: &System, _get_gpu: bool) -> MemCollect {
MemCollect {
ram: get_ram_data(sys),
swap: get_swap_data(sys),
#[cfg(feature = "zfs")]
arc: get_arc_data(),
#[cfg(feature = "gpu")]
gpus: if _get_gpu { get_gpu_data() } else { None },
}
}
pub async fn get_ram_data(sys: &System) -> crate::utils::error::Result<Option<MemHarvest>> {
let (mem_total_in_kib, mem_used_in_kib) = (sys.total_memory() / 1024, sys.used_memory() / 1024);
/// Returns RAM usage.
pub(crate) fn get_ram_data(sys: &System) -> Option<MemHarvest> {
let mem_used_in_kib = sys.used_memory() / 1024;
let mem_total_in_kib = sys.total_memory() / 1024;
Ok(Some(MemHarvest {
mem_total_in_kib,
mem_used_in_kib,
Some(MemHarvest {
total_kib: mem_total_in_kib,
used_kib: mem_used_in_kib,
use_percent: if mem_total_in_kib == 0 {
None
} else {
Some(mem_used_in_kib as f64 / mem_total_in_kib as f64 * 100.0)
},
}))
})
}
pub async fn get_swap_data(sys: &System) -> crate::utils::error::Result<Option<MemHarvest>> {
let (mem_total_in_kib, mem_used_in_kib) = (sys.total_swap() / 1024, sys.used_swap() / 1024);
/// Returns SWAP usage.
pub(crate) fn get_swap_data(sys: &System) -> Option<MemHarvest> {
let mem_used_in_kib = sys.used_swap() / 1024;
let mem_total_in_kib = sys.total_swap() / 1024;
Ok(Some(MemHarvest {
mem_total_in_kib,
mem_used_in_kib,
Some(MemHarvest {
total_kib: mem_total_in_kib,
used_kib: mem_used_in_kib,
use_percent: if mem_total_in_kib == 0 {
None
} else {
Some(mem_used_in_kib as f64 / mem_total_in_kib as f64 * 100.0)
},
}))
})
}
/// Return ARC usage.
#[cfg(feature = "zfs")]
pub async fn get_arc_data() -> crate::utils::error::Result<Option<MemHarvest>> {
pub(crate) fn get_arc_data() -> Option<MemHarvest> {
let (mem_total_in_kib, mem_used_in_kib) = {
#[cfg(target_os = "freebsd")]
{
@ -80,7 +71,7 @@ pub async fn get_arc_data() -> crate::utils::error::Result<Option<MemHarvest>> {
}
}
};
Ok(Some(MemHarvest {
Some(MemHarvest {
mem_total_in_kib,
mem_used_in_kib,
use_percent: if mem_total_in_kib == 0 {
@ -88,12 +79,14 @@ pub async fn get_arc_data() -> crate::utils::error::Result<Option<MemHarvest>> {
} else {
Some(mem_used_in_kib as f64 / mem_total_in_kib as f64 * 100.0)
},
}))
})
}
/// Return GPU data. Currently only supports NVIDIA cards.
#[cfg(feature = "nvidia")]
pub async fn get_gpu_data() -> crate::utils::error::Result<Option<Vec<(String, MemHarvest)>>> {
pub(crate) fn get_gpu_data() -> Option<Vec<(String, MemHarvest)>> {
use crate::data_harvester::nvidia::NVML_DATA;
if let Ok(nvml) = &*NVML_DATA {
if let Ok(ngpu) = nvml.device_count() {
let mut results = Vec::with_capacity(ngpu as usize);
@ -106,8 +99,8 @@ pub async fn get_gpu_data() -> crate::utils::error::Result<Option<Vec<(String, M
results.push((
name,
MemHarvest {
mem_total_in_kib,
mem_used_in_kib,
total_kib: mem_total_in_kib,
used_kib: mem_used_in_kib,
use_percent: if mem_total_in_kib == 0 {
None
} else {
@ -118,11 +111,11 @@ pub async fn get_gpu_data() -> crate::utils::error::Result<Option<Vec<(String, M
}
}
}
Ok(Some(results))
Some(results)
} else {
Ok(None)
None
}
} else {
Ok(None)
None
}
}

View File

@ -262,7 +262,7 @@ pub fn convert_mem_labels(
}
(
if current_data.memory_harvest.mem_total_in_kib > 0 {
if current_data.memory_harvest.total_kib > 0 {
Some((
format!(
"{:3.0}%",
@ -270,14 +270,14 @@ pub fn convert_mem_labels(
),
{
let (unit, denominator) = return_unit_and_denominator_for_mem_kib(
current_data.memory_harvest.mem_total_in_kib,
current_data.memory_harvest.total_kib,
);
format!(
" {:.1}{}/{:.1}{}",
current_data.memory_harvest.mem_used_in_kib as f64 / denominator,
current_data.memory_harvest.used_kib as f64 / denominator,
unit,
(current_data.memory_harvest.mem_total_in_kib as f64 / denominator),
(current_data.memory_harvest.total_kib as f64 / denominator),
unit
)
},
@ -285,7 +285,7 @@ pub fn convert_mem_labels(
} else {
None
},
if current_data.swap_harvest.mem_total_in_kib > 0 {
if current_data.swap_harvest.total_kib > 0 {
Some((
format!(
"{:3.0}%",
@ -293,14 +293,14 @@ pub fn convert_mem_labels(
),
{
let (unit, denominator) = return_unit_and_denominator_for_mem_kib(
current_data.swap_harvest.mem_total_in_kib,
current_data.swap_harvest.total_kib,
);
format!(
" {:.1}{}/{:.1}{}",
current_data.swap_harvest.mem_used_in_kib as f64 / denominator,
current_data.swap_harvest.used_kib as f64 / denominator,
unit,
(current_data.swap_harvest.mem_total_in_kib as f64 / denominator),
(current_data.swap_harvest.total_kib as f64 / denominator),
unit
)
},
@ -688,13 +688,13 @@ pub fn convert_gpu_data(
mem_percent: format!("{:3.0}%", gpu.1.use_percent.unwrap_or(0.0)),
mem_total: {
let (unit, denominator) =
return_unit_and_denominator_for_mem_kib(gpu.1.mem_total_in_kib);
return_unit_and_denominator_for_mem_kib(gpu.1.total_kib);
format!(
" {:.1}{}/{:.1}{}",
gpu.1.mem_used_in_kib as f64 / denominator,
gpu.1.used_kib as f64 / denominator,
unit,
(gpu.1.mem_total_in_kib as f64 / denominator),
(gpu.1.total_kib as f64 / denominator),
unit
)
},

View File

@ -9,18 +9,18 @@ pub const MEGA_LIMIT: u64 = 1_000_000;
pub const GIGA_LIMIT: u64 = 1_000_000_000;
pub const TERA_LIMIT: u64 = 1_000_000_000_000;
pub const KIBI_LIMIT: u64 = 1024;
pub const MEBI_LIMIT: u64 = 1_048_576;
pub const GIBI_LIMIT: u64 = 1_073_741_824;
pub const TEBI_LIMIT: u64 = 1_099_511_627_776;
pub const MEBI_LIMIT: u64 = 1024 * 1024;
pub const GIBI_LIMIT: u64 = 1024 * 1024 * 1024;
pub const TEBI_LIMIT: u64 = 1024 * 1024 * 1024 * 1024;
pub const KILO_LIMIT_F64: f64 = 1000.0;
pub const MEGA_LIMIT_F64: f64 = 1_000_000.0;
pub const GIGA_LIMIT_F64: f64 = 1_000_000_000.0;
pub const TERA_LIMIT_F64: f64 = 1_000_000_000_000.0;
pub const KIBI_LIMIT_F64: f64 = 1024.0;
pub const MEBI_LIMIT_F64: f64 = 1_048_576.0;
pub const GIBI_LIMIT_F64: f64 = 1_073_741_824.0;
pub const TEBI_LIMIT_F64: f64 = 1_099_511_627_776.0;
pub const MEBI_LIMIT_F64: f64 = 1024.0 * 1024.0;
pub const GIBI_LIMIT_F64: f64 = 1024.0 * 1024.0 * 1024.0;
pub const TEBI_LIMIT_F64: f64 = 1024.0 * 1024.0 * 1024.0 * 1024.0;
pub const LOG_KILO_LIMIT: f64 = 3.0;
pub const LOG_MEGA_LIMIT: f64 = 6.0;