Add an additional NUM_CPUS atomic to store the actual size. CPU

utilization is updated in a lock-free array of atomics. Another
lock removed (and another unmeasurably small gain)
This commit is contained in:
Herbert Wolverson 2023-02-14 21:27:37 +00:00
parent c0f83dbc51
commit dcb0ae8444
5 changed files with 37 additions and 16 deletions

1
src/rust/Cargo.lock generated
View File

@ -1359,6 +1359,7 @@ dependencies = [
"lqos_config", "lqos_config",
"lqos_utils", "lqos_utils",
"nix", "nix",
"once_cell",
"parking_lot", "parking_lot",
"rocket", "rocket",
"rocket_async_compression", "rocket_async_compression",

View File

@ -19,6 +19,7 @@ anyhow = "1"
sysinfo = "0" sysinfo = "0"
default-net = "0" default-net = "0"
nix = "0" nix = "0"
once_cell = "1"
# Support JemAlloc on supported platforms # Support JemAlloc on supported platforms
[target.'cfg(any(target_arch = "x86", target_arch = "x86_64"))'.dependencies] [target.'cfg(any(target_arch = "x86", target_arch = "x86_64"))'.dependencies]

View File

@ -1,12 +1,24 @@
use std::sync::atomic::AtomicU64; use std::sync::atomic::{AtomicU64, AtomicU32, AtomicUsize};
use once_cell::sync::Lazy;
use lazy_static::*; const MAX_CPUS_COUNTED: usize = 128;
use parking_lot::RwLock;
lazy_static! { /// Stores overall CPU usage
/// Global storage of current CPU usage pub static CPU_USAGE: Lazy<[AtomicU32; MAX_CPUS_COUNTED]> = Lazy::new(build_empty_cpu_list);
pub static ref CPU_USAGE : RwLock<Vec<f32>> = RwLock::new(Vec::with_capacity(128));
}
/// Total number of CPUs detected
pub static NUM_CPUS: AtomicUsize = AtomicUsize::new(0);
/// Total RAM used (bytes)
pub static RAM_USED: AtomicU64 = AtomicU64::new(0); pub static RAM_USED: AtomicU64 = AtomicU64::new(0);
pub static TOTAL_RAM: AtomicU64 = AtomicU64::new(0);
/// Total RAM installed (bytes)
pub static TOTAL_RAM: AtomicU64 = AtomicU64::new(0);
fn build_empty_cpu_list() -> [AtomicU32; MAX_CPUS_COUNTED] {
let mut temp = Vec::with_capacity(MAX_CPUS_COUNTED);
for _ in 0..MAX_CPUS_COUNTED {
temp.push(AtomicU32::new(0));
}
temp.try_into().expect("This should never happen, sizes are constant.")
}

View File

@ -53,12 +53,15 @@ pub async fn update_tracking() {
sys.refresh_cpu(); sys.refresh_cpu();
sys.refresh_memory(); sys.refresh_memory();
let cpu_usage = sys
sys
.cpus() .cpus()
.iter() .iter()
.map(|cpu| cpu.cpu_usage()) .enumerate()
.collect::<Vec<f32>>(); .map(|(i, cpu)| (i, cpu.cpu_usage() as u32)) // Always rounds down
*CPU_USAGE.write() = cpu_usage; .for_each(|(i, cpu)| CPU_USAGE[i].store(cpu, std::sync::atomic::Ordering::Relaxed));
NUM_CPUS.store(sys.cpus().len(), std::sync::atomic::Ordering::Relaxed);
RAM_USED.store(sys.used_memory(), std::sync::atomic::Ordering::Relaxed); RAM_USED.store(sys.used_memory(), std::sync::atomic::Ordering::Relaxed);
TOTAL_RAM.store(sys.total_memory(), std::sync::atomic::Ordering::Relaxed); TOTAL_RAM.store(sys.total_memory(), std::sync::atomic::Ordering::Relaxed);
let error = get_data_from_server().await; // Ignoring errors to keep running let error = get_data_from_server().await; // Ignoring errors to keep running

View File

@ -2,7 +2,7 @@ mod cache;
mod cache_manager; mod cache_manager;
use self::cache::{ use self::cache::{
CPU_USAGE, CURRENT_THROUGHPUT, HOST_COUNTS, RAM_USED, TOTAL_RAM, RTT_HISTOGRAM, CPU_USAGE, CURRENT_THROUGHPUT, HOST_COUNTS, RAM_USED, TOTAL_RAM, RTT_HISTOGRAM,
THROUGHPUT_BUFFER, TOP_10_DOWNLOADERS, WORST_10_RTT, THROUGHPUT_BUFFER, TOP_10_DOWNLOADERS, WORST_10_RTT, NUM_CPUS,
}; };
use crate::{auth_guard::AuthGuard, tracker::cache::ThroughputPerSecond}; use crate::{auth_guard::AuthGuard, tracker::cache::ThroughputPerSecond};
pub use cache::{SHAPED_DEVICES, UNKNOWN_DEVICES}; pub use cache::{SHAPED_DEVICES, UNKNOWN_DEVICES};
@ -70,10 +70,14 @@ pub fn throughput_ring(_auth: AuthGuard) -> Json<Vec<ThroughputPerSecond>> {
} }
#[get("/api/cpu")] #[get("/api/cpu")]
pub fn cpu_usage(_auth: AuthGuard) -> Json<Vec<f32>> { pub fn cpu_usage(_auth: AuthGuard) -> Json<Vec<u32>> {
let cpu_usage = CPU_USAGE.read().clone(); let usage: Vec<u32> = CPU_USAGE
.iter()
.take(NUM_CPUS.load(std::sync::atomic::Ordering::Relaxed))
.map(|cpu| cpu.load(std::sync::atomic::Ordering::Relaxed))
.collect();
Json(cpu_usage) Json(usage)
} }
#[get("/api/ram")] #[get("/api/ram")]