Large batch of improvements:

* The JavaScript RingBuffer structure updated correctly.
* Replaced the funnel graph with text - easier to read.
* Discovered that the current "parking_lot" could become unstable
  under very heavy load, and only with "fat" LTO. Since it's
  no longer recommended (recent change), removed it.
* Replaced the "lazy_static" macro suite with the newly recommended
  "once_cell" system. Less code.
* Full source format.
* Update some dependency versions.
This commit is contained in:
Herbert Wolverson 2023-03-07 21:37:23 +00:00
parent 9fa1318350
commit 67cc8d8e99
37 changed files with 510 additions and 483 deletions

24
src/rust/Cargo.lock generated
View File

@ -587,9 +587,9 @@ dependencies = [
[[package]] [[package]]
name = "csv" name = "csv"
version = "1.2.0" version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af91f40b7355f82b0a891f50e70399475945bb0b0da4f1700ce60761c9d3e359" checksum = "0b015497079b9a9d69c02ad25de6c0a6edef051ea6360a327d0bd05802ef64ad"
dependencies = [ dependencies = [
"csv-core", "csv-core",
"itoa", "itoa",
@ -1116,9 +1116,9 @@ dependencies = [
[[package]] [[package]]
name = "io-lifetimes" name = "io-lifetimes"
version = "1.0.5" version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1abeb7a0dd0f8181267ff8adc397075586500b81b28a73e8a0208b00fc170fb3" checksum = "cfa919a82ea574332e2de6e74b4c36e74d41982b335080fa59d4ef31be20fdf3"
dependencies = [ dependencies = [
"libc", "libc",
"windows-sys 0.45.0", "windows-sys 0.45.0",
@ -1348,13 +1348,11 @@ dependencies = [
"anyhow", "anyhow",
"default-net", "default-net",
"jemallocator", "jemallocator",
"lazy_static",
"lqos_bus", "lqos_bus",
"lqos_config", "lqos_config",
"lqos_utils", "lqos_utils",
"nix", "nix",
"once_cell", "once_cell",
"parking_lot",
"rocket", "rocket",
"rocket_async_compression", "rocket_async_compression",
"sysinfo", "sysinfo",
@ -1378,15 +1376,13 @@ name = "lqos_queue_tracker"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"criterion", "criterion",
"lazy_static",
"log", "log",
"log-once", "log-once",
"lqos_bus", "lqos_bus",
"lqos_config", "lqos_config",
"lqos_sys", "lqos_sys",
"lqos_utils", "lqos_utils",
"parking_lot", "once_cell",
"rayon",
"serde", "serde",
"serde_json", "serde_json",
"thiserror", "thiserror",
@ -1445,8 +1441,6 @@ dependencies = [
"lqos_utils", "lqos_utils",
"nix", "nix",
"once_cell", "once_cell",
"parking_lot",
"rayon",
"serde", "serde",
"serde_json", "serde_json",
"signal-hook", "signal-hook",
@ -2200,18 +2194,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]] [[package]]
name = "serde" name = "serde"
version = "1.0.152" version = "1.0.153"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" checksum = "3a382c72b4ba118526e187430bb4963cd6d55051ebf13d9b25574d379cc98d20"
dependencies = [ dependencies = [
"serde_derive", "serde_derive",
] ]
[[package]] [[package]]
name = "serde_derive" name = "serde_derive"
version = "1.0.152" version = "1.0.153"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" checksum = "1ef476a5790f0f6decbc66726b6e5d63680ed518283e64c7df415989d880954f"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",

View File

@ -112,9 +112,9 @@ pub enum BusRequest {
ValidateShapedDevicesCsv, ValidateShapedDevicesCsv,
/// Request details of part of the network tree /// Request details of part of the network tree
GetNetworkMap{ GetNetworkMap {
/// The parent of the map to retrieve /// The parent of the map to retrieve
parent: usize parent: usize,
}, },
/// Retrieves the top N queues from the root level, and summarizes /// Retrieves the top N queues from the root level, and summarizes
@ -124,6 +124,12 @@ pub enum BusRequest {
/// Retrieve node names from network.json /// Retrieve node names from network.json
GetNodeNamesFromIds(Vec<usize>), GetNodeNamesFromIds(Vec<usize>),
/// Retrieve stats for all queues above a named circuit id
GetFunnel {
/// Circuit being analyzed, as the named circuit id
target: String,
},
/// If running on Equinix (the `equinix_test` feature is enabled), /// If running on Equinix (the `equinix_test` feature is enabled),
/// display a "run bandwidht test" link. /// display a "run bandwidht test" link.
#[cfg(feature = "equinix_tests")] #[cfg(feature = "equinix_tests")]

View File

@ -9,16 +9,16 @@
mod authentication; mod authentication;
mod etc; mod etc;
mod libre_qos_config; mod libre_qos_config;
mod network_json;
mod program_control; mod program_control;
mod shaped_devices; mod shaped_devices;
mod network_json;
pub use authentication::{UserRole, WebUsers}; pub use authentication::{UserRole, WebUsers};
pub use etc::{BridgeConfig, BridgeInterface, BridgeVlan, EtcLqos, Tunables}; pub use etc::{BridgeConfig, BridgeInterface, BridgeVlan, EtcLqos, Tunables};
pub use libre_qos_config::LibreQoSConfig; pub use libre_qos_config::LibreQoSConfig;
pub use network_json::{NetworkJson, NetworkJsonNode};
pub use program_control::load_libreqos; pub use program_control::load_libreqos;
pub use shaped_devices::{ConfigShapedDevices, ShapedDevice}; pub use shaped_devices::{ConfigShapedDevices, ShapedDevice};
pub use network_json::{NetworkJson, NetworkJsonNode};
/// Used as a constant in determining buffer preallocation /// Used as a constant in determining buffer preallocation
pub const SUPPORTED_CUSTOMERS: usize = 16_000_000; pub const SUPPORTED_CUSTOMERS: usize = 16_000_000;

View File

@ -122,12 +122,15 @@ impl NetworkJson {
/// Retrieve a cloned copy of all children with a parent containing a specific /// Retrieve a cloned copy of all children with a parent containing a specific
/// node index. /// node index.
pub fn get_cloned_children(&self, index: usize) -> Vec<(usize, NetworkJsonNode)> { pub fn get_cloned_children(
&self,
index: usize,
) -> Vec<(usize, NetworkJsonNode)> {
self self
.nodes .nodes
.iter() .iter()
.enumerate() .enumerate()
.filter(|(_i,n)| n.immediate_parent == Some(index)) .filter(|(_i, n)| n.immediate_parent == Some(index))
.map(|(i, n)| (i, n.clone())) .map(|(i, n)| (i, n.clone()))
.collect() .collect()
} }
@ -158,16 +161,24 @@ impl NetworkJson {
&mut self, &mut self,
targets: &[usize], targets: &[usize],
bytes: (u64, u64), bytes: (u64, u64),
median_rtt: f32,
) { ) {
for idx in targets { for idx in targets {
// Safety first: use "get" to ensure that the node exists // Safety first: use "get" to ensure that the node exists
if let Some(node) = self.nodes.get_mut(*idx) { if let Some(node) = self.nodes.get_mut(*idx) {
node.current_throughput.0 += bytes.0; node.current_throughput.0 += bytes.0;
node.current_throughput.1 += bytes.1; node.current_throughput.1 += bytes.1;
if median_rtt > 0.0 { } else {
node.rtts.push(median_rtt); warn!("No network tree entry for index {idx}");
} }
}
}
/// Record RTT time in the tree
pub fn add_rtt_cycle(&mut self, targets: &[usize], rtt: f32) {
for idx in targets {
// Safety first: use "get" to ensure that the node exists
if let Some(node) = self.nodes.get_mut(*idx) {
node.rtts.push(rtt);
} else { } else {
warn!("No network tree entry for index {idx}"); warn!("No network tree entry for index {idx}");
} }
@ -195,14 +206,13 @@ fn recurse_node(
immediate_parent: usize, immediate_parent: usize,
) { ) {
info!("Mapping {name} from network.json"); info!("Mapping {name} from network.json");
/*let my_id = if name != "children" { let mut parents = parents.to_vec();
let my_id = if name != "children" {
parents.push(nodes.len());
nodes.len() nodes.len()
} else { } else {
nodes.len()-1 nodes.len() - 1
};*/ };
let my_id = nodes.len();
let mut parents = parents.to_vec();
parents.push(my_id);
let node = NetworkJsonNode { let node = NetworkJsonNode {
parents: parents.to_vec(), parents: parents.to_vec(),
max_throughput: ( max_throughput: (
@ -215,9 +225,9 @@ fn recurse_node(
rtts: Vec::new(), rtts: Vec::new(),
}; };
//if node.name != "children" { if node.name != "children" {
nodes.push(node); nodes.push(node);
//} }
// Recurse children // Recurse children
for (key, value) in json.iter() { for (key, value) in json.iter() {

View File

@ -10,8 +10,6 @@ equinix_tests = []
[dependencies] [dependencies]
rocket = { version = "0.5.0-rc.2", features = [ "json", "msgpack", "uuid" ] } rocket = { version = "0.5.0-rc.2", features = [ "json", "msgpack", "uuid" ] }
rocket_async_compression = "0.2.0" rocket_async_compression = "0.2.0"
lazy_static = "1.4"
parking_lot = "0.12"
lqos_bus = { path = "../lqos_bus" } lqos_bus = { path = "../lqos_bus" }
lqos_config = { path = "../lqos_config" } lqos_config = { path = "../lqos_config" }
lqos_utils = { path = "../lqos_utils" } lqos_utils = { path = "../lqos_utils" }

View File

@ -1,7 +1,8 @@
use std::sync::Mutex;
use anyhow::Error; use anyhow::Error;
use lazy_static::*;
use lqos_config::{UserRole, WebUsers}; use lqos_config::{UserRole, WebUsers};
use parking_lot::Mutex; use once_cell::sync::Lazy;
use rocket::serde::{json::Json, Deserialize, Serialize}; use rocket::serde::{json::Json, Deserialize, Serialize};
use rocket::{ use rocket::{
http::{Cookie, CookieJar, Status}, http::{Cookie, CookieJar, Status},
@ -9,9 +10,8 @@ use rocket::{
Request, Request,
}; };
lazy_static! { static WEB_USERS: Lazy<Mutex<Option<WebUsers>>> =
static ref WEB_USERS: Mutex<Option<WebUsers>> = Mutex::new(None); Lazy::new(|| Mutex::new(None));
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)] #[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AuthGuard { pub enum AuthGuard {
@ -27,7 +27,7 @@ impl<'r> FromRequest<'r> for AuthGuard {
async fn from_request( async fn from_request(
request: &'r Request<'_>, request: &'r Request<'_>,
) -> Outcome<Self, Self::Error> { ) -> Outcome<Self, Self::Error> {
let mut lock = WEB_USERS.lock(); let mut lock = WEB_USERS.lock().unwrap();
if lock.is_none() { if lock.is_none() {
if WebUsers::does_users_file_exist().unwrap() { if WebUsers::does_users_file_exist().unwrap() {
*lock = Some(WebUsers::load_or_create().unwrap()); *lock = Some(WebUsers::load_or_create().unwrap());
@ -82,7 +82,7 @@ pub fn create_first_user(
if WebUsers::does_users_file_exist().unwrap() { if WebUsers::does_users_file_exist().unwrap() {
return Json("ERROR".to_string()); return Json("ERROR".to_string());
} }
let mut lock = WEB_USERS.lock(); let mut lock = WEB_USERS.lock().unwrap();
let mut users = WebUsers::load_or_create().unwrap(); let mut users = WebUsers::load_or_create().unwrap();
users.allow_anonymous(info.allow_anonymous).unwrap(); users.allow_anonymous(info.allow_anonymous).unwrap();
let token = users let token = users
@ -102,7 +102,7 @@ pub struct LoginAttempt {
#[post("/api/login", data = "<info>")] #[post("/api/login", data = "<info>")]
pub fn login(cookies: &CookieJar, info: Json<LoginAttempt>) -> Json<String> { pub fn login(cookies: &CookieJar, info: Json<LoginAttempt>) -> Json<String> {
let mut lock = WEB_USERS.lock(); let mut lock = WEB_USERS.lock().unwrap();
if lock.is_none() && WebUsers::does_users_file_exist().unwrap() { if lock.is_none() && WebUsers::does_users_file_exist().unwrap() {
*lock = Some(WebUsers::load_or_create().unwrap()); *lock = Some(WebUsers::load_or_create().unwrap());
} }
@ -126,7 +126,7 @@ pub fn admin_check(auth: AuthGuard) -> Json<bool> {
#[get("/api/username")] #[get("/api/username")]
pub fn username(_auth: AuthGuard, cookies: &CookieJar) -> Json<String> { pub fn username(_auth: AuthGuard, cookies: &CookieJar) -> Json<String> {
if let Some(token) = cookies.get("User-Token") { if let Some(token) = cookies.get("User-Token") {
let lock = WEB_USERS.lock(); let lock = WEB_USERS.lock().unwrap();
if let Some(users) = &*lock { if let Some(users) = &*lock {
return Json(users.get_username(token.value())); return Json(users.get_username(token.value()));
} }

View File

@ -9,8 +9,8 @@ mod unknown_devices;
use rocket_async_compression::Compression; use rocket_async_compression::Compression;
mod auth_guard; mod auth_guard;
mod config_control; mod config_control;
mod queue_info;
mod network_tree; mod network_tree;
mod queue_info;
// Use JemAllocator only on supported platforms // Use JemAllocator only on supported platforms
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
@ -81,6 +81,7 @@ fn rocket() -> _ {
network_tree::tree_clients, network_tree::tree_clients,
network_tree::network_tree_summary, network_tree::network_tree_summary,
network_tree::node_names, network_tree::node_names,
network_tree::funnel_for_queue,
// Supporting files // Supporting files
static_pages::bootsrap_css, static_pages::bootsrap_css,
static_pages::plotly_js, static_pages::plotly_js,

View File

@ -2,7 +2,10 @@ use std::net::IpAddr;
use lqos_bus::{bus_request, BusRequest, BusResponse}; use lqos_bus::{bus_request, BusRequest, BusResponse};
use lqos_config::NetworkJsonNode; use lqos_config::NetworkJsonNode;
use rocket::{fs::NamedFile, serde::{json::Json, Serialize}}; use rocket::{
fs::NamedFile,
serde::{json::Json, Serialize},
};
use crate::{cache_control::NoCache, tracker::SHAPED_DEVICES}; use crate::{cache_control::NoCache, tracker::SHAPED_DEVICES};
@ -28,7 +31,8 @@ pub async fn tree_entry(
} }
#[get("/api/network_tree_summary")] #[get("/api/network_tree_summary")]
pub async fn network_tree_summary() -> NoCache<Json<Vec<(usize, NetworkJsonNode)>>> { pub async fn network_tree_summary(
) -> NoCache<Json<Vec<(usize, NetworkJsonNode)>>> {
let responses = let responses =
bus_request(vec![BusRequest::TopMapQueues(4)]).await.unwrap(); bus_request(vec![BusRequest::TopMapQueues(4)]).await.unwrap();
let result = match &responses[0] { let result = match &responses[0] {
@ -55,7 +59,7 @@ pub async fn tree_clients(
for msg in for msg in
bus_request(vec![BusRequest::GetHostCounter]).await.unwrap().iter() bus_request(vec![BusRequest::GetHostCounter]).await.unwrap().iter()
{ {
let devices = SHAPED_DEVICES.read(); let devices = SHAPED_DEVICES.read().unwrap();
if let BusResponse::HostCounters(hosts) = msg { if let BusResponse::HostCounters(hosts) = msg {
for (ip, down, up) in hosts.iter() { for (ip, down, up) in hosts.iter() {
let lookup = match ip { let lookup = match ip {
@ -71,7 +75,7 @@ pub async fn tree_clients(
limit: ( limit: (
devices.devices[*c.1].download_max_mbps as u64, devices.devices[*c.1].download_max_mbps as u64,
devices.devices[*c.1].upload_max_mbps as u64, devices.devices[*c.1].upload_max_mbps as u64,
) ),
}); });
} }
} }
@ -81,11 +85,15 @@ pub async fn tree_clients(
NoCache::new(Json(result)) NoCache::new(Json(result))
} }
#[post("/api/node_names", data= "<nodes>")] #[post("/api/node_names", data = "<nodes>")]
pub async fn node_names(nodes: Json<Vec<usize>>) -> NoCache<Json<Vec<(usize, String)>>> { pub async fn node_names(
nodes: Json<Vec<usize>>,
) -> NoCache<Json<Vec<(usize, String)>>> {
let mut result = Vec::new(); let mut result = Vec::new();
for msg in for msg in bus_request(vec![BusRequest::GetNodeNamesFromIds(nodes.0)])
bus_request(vec![BusRequest::GetNodeNamesFromIds(nodes.0)]).await.unwrap().iter() .await
.unwrap()
.iter()
{ {
if let BusResponse::NodeNames(map) = msg { if let BusResponse::NodeNames(map) = msg {
result.extend_from_slice(map); result.extend_from_slice(map);
@ -93,4 +101,31 @@ pub async fn node_names(nodes: Json<Vec<usize>>) -> NoCache<Json<Vec<(usize, Str
} }
NoCache::new(Json(result)) NoCache::new(Json(result))
} }
#[get("/api/funnel_for_queue/<circuit_id>")]
pub async fn funnel_for_queue(
circuit_id: String,
) -> NoCache<Json<Vec<(usize, NetworkJsonNode)>>> {
let mut result = Vec::new();
let target = SHAPED_DEVICES
.read()
.unwrap()
.devices
.iter()
.find(|d| d.circuit_id == circuit_id)
.as_ref()
.unwrap()
.parent_node
.clone();
for msg in
bus_request(vec![BusRequest::GetFunnel { target }]).await.unwrap().iter()
{
if let BusResponse::NetworkMap(map) = msg {
result.extend_from_slice(map);
}
}
NoCache::new(Json(result))
}

View File

@ -29,8 +29,12 @@ pub async fn circuit_info(
circuit_id: String, circuit_id: String,
_auth: AuthGuard, _auth: AuthGuard,
) -> NoCache<Json<CircuitInfo>> { ) -> NoCache<Json<CircuitInfo>> {
if let Some(device) = if let Some(device) = SHAPED_DEVICES
SHAPED_DEVICES.read().devices.iter().find(|d| d.circuit_id == circuit_id) .read()
.unwrap()
.devices
.iter()
.find(|d| d.circuit_id == circuit_id)
{ {
let result = CircuitInfo { let result = CircuitInfo {
name: device.circuit_name.clone(), name: device.circuit_name.clone(),
@ -63,7 +67,7 @@ pub async fn current_circuit_throughput(
bus_request(vec![BusRequest::GetHostCounter]).await.unwrap().iter() bus_request(vec![BusRequest::GetHostCounter]).await.unwrap().iter()
{ {
if let BusResponse::HostCounters(hosts) = msg { if let BusResponse::HostCounters(hosts) = msg {
let devices = SHAPED_DEVICES.read(); let devices = SHAPED_DEVICES.read().unwrap();
for (ip, down, up) in hosts.iter() { for (ip, down, up) in hosts.iter() {
let lookup = match ip { let lookup = match ip {
IpAddr::V4(ip) => ip.to_ipv6_mapped(), IpAddr::V4(ip) => ip.to_ipv6_mapped(),

View File

@ -13,12 +13,12 @@ static RELOAD_REQUIRED: AtomicBool = AtomicBool::new(false);
pub fn all_shaped_devices( pub fn all_shaped_devices(
_auth: AuthGuard, _auth: AuthGuard,
) -> NoCache<Json<Vec<ShapedDevice>>> { ) -> NoCache<Json<Vec<ShapedDevice>>> {
NoCache::new(Json(SHAPED_DEVICES.read().devices.clone())) NoCache::new(Json(SHAPED_DEVICES.read().unwrap().devices.clone()))
} }
#[get("/api/shaped_devices_count")] #[get("/api/shaped_devices_count")]
pub fn shaped_devices_count(_auth: AuthGuard) -> NoCache<Json<usize>> { pub fn shaped_devices_count(_auth: AuthGuard) -> NoCache<Json<usize>> {
NoCache::new(Json(SHAPED_DEVICES.read().devices.len())) NoCache::new(Json(SHAPED_DEVICES.read().unwrap().devices.len()))
} }
#[get("/api/shaped_devices_range/<start>/<end>")] #[get("/api/shaped_devices_range/<start>/<end>")]
@ -27,7 +27,7 @@ pub fn shaped_devices_range(
end: usize, end: usize,
_auth: AuthGuard, _auth: AuthGuard,
) -> NoCache<Json<Vec<ShapedDevice>>> { ) -> NoCache<Json<Vec<ShapedDevice>>> {
let reader = SHAPED_DEVICES.read(); let reader = SHAPED_DEVICES.read().unwrap();
let result: Vec<ShapedDevice> = let result: Vec<ShapedDevice> =
reader.devices.iter().skip(start).take(end).cloned().collect(); reader.devices.iter().skip(start).take(end).cloned().collect();
NoCache::new(Json(result)) NoCache::new(Json(result))
@ -39,7 +39,7 @@ pub fn shaped_devices_search(
_auth: AuthGuard, _auth: AuthGuard,
) -> NoCache<Json<Vec<ShapedDevice>>> { ) -> NoCache<Json<Vec<ShapedDevice>>> {
let term = term.trim().to_lowercase(); let term = term.trim().to_lowercase();
let reader = SHAPED_DEVICES.read(); let reader = SHAPED_DEVICES.read().unwrap();
let result: Vec<ShapedDevice> = reader let result: Vec<ShapedDevice> = reader
.devices .devices
.iter() .iter()

View File

@ -1,22 +1,15 @@
use lazy_static::*;
use lqos_bus::IpStats; use lqos_bus::IpStats;
use parking_lot::RwLock; use once_cell::sync::Lazy;
use std::sync::RwLock;
lazy_static! { pub static TOP_10_DOWNLOADERS: Lazy<RwLock<Vec<IpStats>>> =
pub static ref TOP_10_DOWNLOADERS: RwLock<Vec<IpStats>> = Lazy::new(|| RwLock::new(Vec::with_capacity(10)));
RwLock::new(Vec::with_capacity(10));
}
lazy_static! { pub static WORST_10_RTT: Lazy<RwLock<Vec<IpStats>>> =
pub static ref WORST_10_RTT: RwLock<Vec<IpStats>> = Lazy::new(|| RwLock::new(Vec::with_capacity(10)));
RwLock::new(Vec::with_capacity(10));
}
lazy_static! { pub static RTT_HISTOGRAM: Lazy<RwLock<Vec<u32>>> =
pub static ref RTT_HISTOGRAM: RwLock<Vec<u32>> = Lazy::new(|| RwLock::new(Vec::with_capacity(100)));
RwLock::new(Vec::with_capacity(100));
}
lazy_static! { pub static HOST_COUNTS: Lazy<RwLock<(u32, u32)>> =
pub static ref HOST_COUNTS: RwLock<(u32, u32)> = RwLock::new((0, 0)); Lazy::new(|| RwLock::new((0, 0)));
}

View File

@ -1,18 +1,16 @@
use lazy_static::*;
use lqos_bus::IpStats; use lqos_bus::IpStats;
use lqos_config::ConfigShapedDevices; use lqos_config::ConfigShapedDevices;
use parking_lot::RwLock; use once_cell::sync::Lazy;
use std::sync::RwLock;
lazy_static! { /// Global storage of the shaped devices csv data.
/// Global storage of the shaped devices csv data. /// Updated by the file system watcher whenever
/// Updated by the file system watcher whenever /// the underlying file changes.
/// the underlying file changes. pub static SHAPED_DEVICES: Lazy<RwLock<ConfigShapedDevices>> =
pub static ref SHAPED_DEVICES : RwLock<ConfigShapedDevices> = RwLock::new(ConfigShapedDevices::default()); Lazy::new(|| RwLock::new(ConfigShapedDevices::default()));
}
lazy_static! { /// Global storage of the shaped devices csv data.
/// Global storage of the shaped devices csv data. /// Updated by the file system watcher whenever
/// Updated by the file system watcher whenever /// the underlying file changes.
/// the underlying file changes. pub static UNKNOWN_DEVICES: Lazy<RwLock<Vec<IpStats>>> =
pub static ref UNKNOWN_DEVICES : RwLock<Vec<IpStats>> = RwLock::new(Vec::new()); Lazy::new(|| RwLock::new(Vec::new()));
}

View File

@ -95,10 +95,10 @@ fn load_shaped_devices() {
let shaped_devices = ConfigShapedDevices::load(); let shaped_devices = ConfigShapedDevices::load();
if let Ok(new_file) = shaped_devices { if let Ok(new_file) = shaped_devices {
info!("ShapedDevices.csv loaded"); info!("ShapedDevices.csv loaded");
*SHAPED_DEVICES.write() = new_file; *SHAPED_DEVICES.write().unwrap() = new_file;
} else { } else {
warn!("ShapedDevices.csv failed to load, see previous error messages. Reverting to empty set."); warn!("ShapedDevices.csv failed to load, see previous error messages. Reverting to empty set.");
*SHAPED_DEVICES.write() = ConfigShapedDevices::default(); *SHAPED_DEVICES.write().unwrap() = ConfigShapedDevices::default();
} }
} }
@ -136,19 +136,19 @@ async fn get_data_from_server() -> Result<()> {
]; ];
for r in bus_request(requests).await?.iter() { for r in bus_request(requests).await?.iter() {
match r { match r {
BusResponse::TopDownloaders(stats) => { BusResponse::TopDownloaders(stats) => {
*TOP_10_DOWNLOADERS.write() = stats.clone(); *TOP_10_DOWNLOADERS.write().unwrap() = stats.clone();
} }
BusResponse::WorstRtt(stats) => { BusResponse::WorstRtt(stats) => {
*WORST_10_RTT.write() = stats.clone(); *WORST_10_RTT.write().unwrap() = stats.clone();
} }
BusResponse::RttHistogram(stats) => { BusResponse::RttHistogram(stats) => {
*RTT_HISTOGRAM.write() = stats.clone(); *RTT_HISTOGRAM.write().unwrap() = stats.clone();
} }
BusResponse::AllUnknownIps(unknowns) => { BusResponse::AllUnknownIps(unknowns) => {
*HOST_COUNTS.write() = (unknowns.len() as u32, 0); *HOST_COUNTS.write().unwrap() = (unknowns.len() as u32, 0);
let cfg = SHAPED_DEVICES.read(); let cfg = SHAPED_DEVICES.read().unwrap();
let really_unknown: Vec<IpStats> = unknowns let really_unknown: Vec<IpStats> = unknowns
.iter() .iter()
.filter(|ip| { .filter(|ip| {
@ -164,8 +164,8 @@ async fn get_data_from_server() -> Result<()> {
}) })
.cloned() .cloned()
.collect(); .collect();
*HOST_COUNTS.write() = (really_unknown.len() as u32, 0); *HOST_COUNTS.write().unwrap() = (really_unknown.len() as u32, 0);
*UNKNOWN_DEVICES.write() = really_unknown; *UNKNOWN_DEVICES.write().unwrap() = really_unknown;
} }
BusResponse::NotReadyYet => { BusResponse::NotReadyYet => {
warn!("Host system isn't ready to answer all queries yet."); warn!("Host system isn't ready to answer all queries yet.");

View File

@ -1,17 +1,13 @@
mod cache; mod cache;
mod cache_manager; mod cache_manager;
use self::cache::{ use self::cache::{
CPU_USAGE, HOST_COUNTS, NUM_CPUS, RAM_USED, CPU_USAGE, HOST_COUNTS, NUM_CPUS, RAM_USED, RTT_HISTOGRAM,
RTT_HISTOGRAM, TOP_10_DOWNLOADERS, TOTAL_RAM, TOP_10_DOWNLOADERS, TOTAL_RAM, WORST_10_RTT,
WORST_10_RTT,
}; };
use crate::auth_guard::AuthGuard; use crate::auth_guard::AuthGuard;
pub use cache::{SHAPED_DEVICES, UNKNOWN_DEVICES}; pub use cache::{SHAPED_DEVICES, UNKNOWN_DEVICES};
pub use cache_manager::update_tracking; pub use cache_manager::update_tracking;
use lazy_static::lazy_static; use lqos_bus::{bus_request, BusRequest, BusResponse, IpStats, TcHandle};
use lqos_bus::{IpStats, TcHandle, bus_request, BusRequest, BusResponse};
use lqos_config::LibreQoSConfig;
use parking_lot::Mutex;
use rocket::serde::{json::Json, Deserialize, Serialize}; use rocket::serde::{json::Json, Deserialize, Serialize};
#[derive(Serialize, Deserialize, Clone, Debug)] #[derive(Serialize, Deserialize, Clone, Debug)]
@ -41,6 +37,7 @@ impl From<&IpStats> for IpStatsWithPlan {
if !result.circuit_id.is_empty() { if !result.circuit_id.is_empty() {
if let Some(circuit) = SHAPED_DEVICES if let Some(circuit) = SHAPED_DEVICES
.read() .read()
.unwrap()
.devices .devices
.iter() .iter()
.find(|sd| sd.circuit_id == result.circuit_id) .find(|sd| sd.circuit_id == result.circuit_id)
@ -50,8 +47,7 @@ impl From<&IpStats> for IpStatsWithPlan {
} else { } else {
&circuit.circuit_name &circuit.circuit_name
}; };
result.ip_address = result.ip_address = format!("{} ({})", name, result.ip_address);
format!("{} ({})", name, result.ip_address);
result.plan = (circuit.download_max_mbps, circuit.download_min_mbps); result.plan = (circuit.download_max_mbps, circuit.download_min_mbps);
} }
} }
@ -70,15 +66,20 @@ pub struct ThroughputPerSecond {
} }
#[get("/api/current_throughput")] #[get("/api/current_throughput")]
pub async fn current_throughput(_auth: AuthGuard) -> Json<ThroughputPerSecond> { pub async fn current_throughput(
_auth: AuthGuard,
) -> Json<ThroughputPerSecond> {
let mut result = ThroughputPerSecond::default(); let mut result = ThroughputPerSecond::default();
if let Ok(messages) = bus_request(vec![BusRequest::GetCurrentThroughput]).await { if let Ok(messages) =
bus_request(vec![BusRequest::GetCurrentThroughput]).await
{
for msg in messages { for msg in messages {
if let BusResponse::CurrentThroughput { if let BusResponse::CurrentThroughput {
bits_per_second, bits_per_second,
packets_per_second, packets_per_second,
shaped_bits_per_second, shaped_bits_per_second,
} = msg { } = msg
{
result.bits_per_second = bits_per_second; result.bits_per_second = bits_per_second;
result.packets_per_second = packets_per_second; result.packets_per_second = packets_per_second;
result.shaped_bits_per_second = shaped_bits_per_second; result.shaped_bits_per_second = shaped_bits_per_second;
@ -109,33 +110,30 @@ pub fn ram_usage(_auth: AuthGuard) -> Json<Vec<u64>> {
#[get("/api/top_10_downloaders")] #[get("/api/top_10_downloaders")]
pub fn top_10_downloaders(_auth: AuthGuard) -> Json<Vec<IpStatsWithPlan>> { pub fn top_10_downloaders(_auth: AuthGuard) -> Json<Vec<IpStatsWithPlan>> {
let tt: Vec<IpStatsWithPlan> = let tt: Vec<IpStatsWithPlan> =
TOP_10_DOWNLOADERS.read().iter().map(|tt| tt.into()).collect(); TOP_10_DOWNLOADERS.read().unwrap().iter().map(|tt| tt.into()).collect();
Json(tt) Json(tt)
} }
#[get("/api/worst_10_rtt")] #[get("/api/worst_10_rtt")]
pub fn worst_10_rtt(_auth: AuthGuard) -> Json<Vec<IpStatsWithPlan>> { pub fn worst_10_rtt(_auth: AuthGuard) -> Json<Vec<IpStatsWithPlan>> {
let tt: Vec<IpStatsWithPlan> = let tt: Vec<IpStatsWithPlan> =
WORST_10_RTT.read().iter().map(|tt| tt.into()).collect(); WORST_10_RTT.read().unwrap().iter().map(|tt| tt.into()).collect();
Json(tt) Json(tt)
} }
#[get("/api/rtt_histogram")] #[get("/api/rtt_histogram")]
pub fn rtt_histogram(_auth: AuthGuard) -> Json<Vec<u32>> { pub fn rtt_histogram(_auth: AuthGuard) -> Json<Vec<u32>> {
Json(RTT_HISTOGRAM.read().clone()) Json(RTT_HISTOGRAM.read().unwrap().clone())
} }
#[get("/api/host_counts")] #[get("/api/host_counts")]
pub fn host_counts(_auth: AuthGuard) -> Json<(u32, u32)> { pub fn host_counts(_auth: AuthGuard) -> Json<(u32, u32)> {
let shaped_reader = SHAPED_DEVICES.read(); let shaped_reader = SHAPED_DEVICES.read().unwrap();
let n_devices = shaped_reader.devices.len(); let n_devices = shaped_reader.devices.len();
let host_counts = HOST_COUNTS.read(); let host_counts = HOST_COUNTS.read().unwrap();
let unknown = host_counts.0 - host_counts.1; let unknown = host_counts.0 - host_counts.1;
Json((n_devices as u32, unknown)) Json((n_devices as u32, unknown))
} }
lazy_static! { //static CONFIG: Lazy<Mutex<LibreQoSConfig>> =
static ref CONFIG: Mutex<LibreQoSConfig> = // Lazy::new(|| Mutex::new(lqos_config::LibreQoSConfig::load().unwrap()));
Mutex::new(lqos_config::LibreQoSConfig::load().unwrap());
}

View File

@ -6,12 +6,12 @@ use rocket::serde::json::Json;
#[get("/api/all_unknown_devices")] #[get("/api/all_unknown_devices")]
pub fn all_unknown_devices(_auth: AuthGuard) -> NoCache<Json<Vec<IpStats>>> { pub fn all_unknown_devices(_auth: AuthGuard) -> NoCache<Json<Vec<IpStats>>> {
NoCache::new(Json(UNKNOWN_DEVICES.read().clone())) NoCache::new(Json(UNKNOWN_DEVICES.read().unwrap().clone()))
} }
#[get("/api/unknown_devices_count")] #[get("/api/unknown_devices_count")]
pub fn unknown_devices_count(_auth: AuthGuard) -> NoCache<Json<usize>> { pub fn unknown_devices_count(_auth: AuthGuard) -> NoCache<Json<usize>> {
NoCache::new(Json(UNKNOWN_DEVICES.read().len())) NoCache::new(Json(UNKNOWN_DEVICES.read().unwrap().len()))
} }
#[get("/api/unknown_devices_range/<start>/<end>")] #[get("/api/unknown_devices_range/<start>/<end>")]
@ -20,7 +20,7 @@ pub fn unknown_devices_range(
end: usize, end: usize,
_auth: AuthGuard, _auth: AuthGuard,
) -> NoCache<Json<Vec<IpStats>>> { ) -> NoCache<Json<Vec<IpStats>>> {
let reader = UNKNOWN_DEVICES.read(); let reader = UNKNOWN_DEVICES.read().unwrap();
let result: Vec<IpStats> = let result: Vec<IpStats> =
reader.iter().skip(start).take(end).cloned().collect(); reader.iter().skip(start).take(end).cloned().collect();
NoCache::new(Json(result)) NoCache::new(Json(result))
@ -29,7 +29,7 @@ pub fn unknown_devices_range(
#[get("/api/unknown_devices_csv")] #[get("/api/unknown_devices_csv")]
pub fn unknown_devices_csv(_auth: AuthGuard) -> NoCache<String> { pub fn unknown_devices_csv(_auth: AuthGuard) -> NoCache<String> {
let mut result = String::new(); let mut result = String::new();
let reader = UNKNOWN_DEVICES.read(); let reader = UNKNOWN_DEVICES.read().unwrap();
for unknown in reader.iter() { for unknown in reader.iter() {
result += &format!("{}\n", unknown.ip_address); result += &format!("{}\n", unknown.ip_address);

View File

@ -68,6 +68,9 @@
<li class="nav-item" role="presentation"> <li class="nav-item" role="presentation">
<button class="nav-link" id="pills-tins-tab" data-bs-toggle="pill" data-bs-target="#pills-tins" type="button" role="tab" aria-controls="pills-profile" aria-selected="false">All Tins</button> <button class="nav-link" id="pills-tins-tab" data-bs-toggle="pill" data-bs-target="#pills-tins" type="button" role="tab" aria-controls="pills-profile" aria-selected="false">All Tins</button>
</li> </li>
<li class="nav-item" role="presentation">
<button class="nav-link" id="pills-funnel-tab" data-bs-toggle="pill" data-bs-target="#pills-funnel" type="button" role="tab" aria-controls="pills-funnel" aria-selected="false">Queue Funnel</button>
</li>
</ul> </ul>
</div> </div>
<div class="col-sm-2"> <div class="col-sm-2">
@ -139,7 +142,7 @@
</div> </div>
</div> </div>
<div class="tab-pane fade" id="pills-tins" role="tabpanel" aria-labelledby="pills-tins-tab" tabindex="0"> <div class="tab-pane fade" id="pills-tins" role="tabpanel" aria-labelledby="pills-tins-tab" tabindex="1">
<div class="row" class="mtop4"> <div class="row" class="mtop4">
<div class="col-sm-6"> <div class="col-sm-6">
<div class="card bg-light"> <div class="card bg-light">
@ -181,6 +184,9 @@
</div> </div>
</div> </div>
</div> </div>
<div class="tab-pane fade" id="pills-funnel" role="tabpanel" aria-labelledby="pills-funnel-tab" tabindex="2">
</div>
</div> </div>
</div> </div>
@ -450,6 +456,63 @@
setTimeout(getThroughput, 1000); setTimeout(getThroughput, 1000);
} }
let funnels = new MultiRingBuffer(300);
let rtts = {};
let circuitId = "";
function getFunnel(c) {
circuitId = encodeURI(c);
$.get("/api/funnel_for_queue/" + circuitId, (data) => {
let html = "";
for (let i=0; i<data.length; ++i) {
funnels.push(data[i][0], data[i][1].current_throughput[0]*8, data[i][1].current_throughput[1]*8);
rtts[data[i][0]] = new RttHistogram();
let row = "<div class='row row220'>";
row += "<div class='col-sm-6'>";
row += "<div class='card bg-light'>";
row += "<h5 class='card-title'><i class='fa fa-hourglass'></i> <a href='/tree?parent=" + data[i][0] + "'>" + data[i][1].name + " Throughput</a></h5>";
row += "<div id='tp" + data[i][0] + "' class='graph98 graph150'></div>";
row += "</div>";
row += "</div>";
row += "<div class='col-sm-6'>";
row += "<div class='card bg-light'>";
row += "<h5 class='card-title'><i class='fa fa-bar-chart'></i> " + data[i][1].name + " TCP RTT</h5>";
row += "<div id='rtt" + data[i][0] + "' class='graph98 graph150'></div>";
row += "</div>";
row += "</div>";
row += "</div>";
html += row;
}
$("#pills-funnel").html(html);
setTimeout(plotFunnels, 1000);
});
}
function plotFunnels() {
$.get("/api/funnel_for_queue/" + encodeURI(circuitId), (data) => {
for (let i=0; i<data.length; ++i) {
funnels.push(data[i][0], data[i][1].current_throughput[0]*8, data[i][1].current_throughput[1]*8);
for (const [k, v] of Object.entries(funnels.data)) {
let target_div = "tp" + k;
let graphData = v.toScatterGraphData();
let graph = document.getElementById(target_div);
Plotly.newPlot(graph, graphData, { margin: { l:0,r:0,b:0,t:0,pad:4 }, yaxis: { automargin: true }, xaxis: {automargin: true, title: "Time since now (seconds)"} }, { responsive: true });
}
rtts[data[i][0]].clear();
for (let j=0; j<data[i][1].rtts.length; j++) {
rtts[data[i][0]].push(data[i][1].rtts[j]);
}
rtts[data[i][0]].plot("rtt" + data[i][0]);
}
});
setTimeout(plotFunnels, 1000);
}
function start() { function start() {
colorReloadButton(); colorReloadButton();
updateHostCounts(); updateHostCounts();
@ -459,6 +522,7 @@
$.get("/api/watch_circuit/" + params.id, () => { $.get("/api/watch_circuit/" + params.id, () => {
pollQueue(); pollQueue();
getThroughput(); getThroughput();
getFunnel(params.id);
}); });
} }

View File

@ -190,16 +190,6 @@ const reloadModal = `
</div> </div>
</div>`; </div>`;
function yValsRingSort(y, head, capacity) {
let result = [];
for (let i=0; i<head; ++i)
result.push(y[i]);
for (let i=head; i<capacity; ++i) {
result.push(y[i])
}
return result;
}
// MultiRingBuffer provides an interface for storing multiple ring-buffers // MultiRingBuffer provides an interface for storing multiple ring-buffers
// of performance data, with a view to them ending up on the same graph. // of performance data, with a view to them ending up on the same graph.
class MultiRingBuffer { class MultiRingBuffer {
@ -219,14 +209,11 @@ class MultiRingBuffer {
let graphData = []; let graphData = [];
for (const [k, v] of Object.entries(this.data)) { for (const [k, v] of Object.entries(this.data)) {
if (k != rootName) { if (k != rootName) {
let total = v.download.reduce((a, b) => a + b) + let y = v.sortedY;
v.upload.reduce((a, b) => a + b); let dn = { x: v.x_axis, y: y.down, name: k + "_DL", type: 'scatter', stackgroup: 'dn' };
if (total > 0) { let up = { x: v.x_axis, y: y.up, name: k + "_UL", type: 'scatter', stackgroup: 'up' };
let dn = { x: v.x_axis, y: yValsRingSort(v.download, v.head, v.capacity), name: k + "_DL", type: 'scatter', stackgroup: 'dn' }; graphData.push(dn);
let up = { x: v.x_axis, y: yValsRingSort(v.upload, v.head, v.capacity), name: k + "_UL", type: 'scatter', stackgroup: 'up' }; graphData.push(up);
graphData.push(dn);
graphData.push(up);
}
} }
} }
@ -246,17 +233,16 @@ class MultiRingBuffer {
plotTotalThroughput(target_div) { plotTotalThroughput(target_div) {
let graph = document.getElementById(target_div); let graph = document.getElementById(target_div);
let totalDown = yValsRingSort(this.data['total'].download, this.data['total'].head, this.data['total'].capacity); let total = this.data['total'].sortedY();
let totalUp = yValsRingSort(this.data['total'].upload, this.data['total'].head, this.data['total'].capacity); let shaped = this.data['shaped'].sortedY();
let shapedDown = yValsRingSort(this.data['shaped'].download, this.data['shaped'].head, this.data['shaped'].capacity);
let shapedUp = yValsRingSort(this.data['shaped'].upload, this.data['shaped'].head, this.data['shaped'].capacity);
let x = this.data['total'].x_axis; let x = this.data['total'].x_axis;
let data = [ let data = [
{x: x, y:totalDown, name: 'Download', type: 'scatter', marker: {color: 'rgb(255,160,122)'}}, {x: x, y:total.down, name: 'Download', type: 'scatter', marker: {color: 'rgb(255,160,122)'}},
{x: x, y:totalUp, name: 'Upload', type: 'scatter', marker: {color: 'rgb(255,160,122)'}}, {x: x, y:total.up, name: 'Upload', type: 'scatter', marker: {color: 'rgb(255,160,122)'}},
{x: x, y:shapedDown, name: 'Shaped Download', type: 'scatter', fill: 'tozeroy', marker: {color: 'rgb(124,252,0)'}}, {x: x, y:shaped.down, name: 'Shaped Download', type: 'scatter', fill: 'tozeroy', marker: {color: 'rgb(124,252,0)'}},
{x: x, y:shapedUp, name: 'Shaped Upload', type: 'scatter', fill: 'tozeroy', marker: {color: 'rgb(124,252,0)'}}, {x: x, y:shaped.up, name: 'Shaped Upload', type: 'scatter', fill: 'tozeroy', marker: {color: 'rgb(124,252,0)'}},
]; ];
Plotly.newPlot(graph, data, { margin: { l:0,r:0,b:0,t:0,pad:4 }, yaxis: { automargin: true }, xaxis: {automargin: true, title: "Time since now (seconds)"} }, { responsive: true }); Plotly.newPlot(graph, data, { margin: { l:0,r:0,b:0,t:0,pad:4 }, yaxis: { automargin: true }, xaxis: {automargin: true, title: "Time since now (seconds)"} }, { responsive: true });
} }
@ -272,7 +258,7 @@ class RingBuffer {
for (var i = 0; i < capacity; ++i) { for (var i = 0; i < capacity; ++i) {
this.download.push(0.0); this.download.push(0.0);
this.upload.push(0.0); this.upload.push(0.0);
this.x_axis.push(0-i); this.x_axis.push(i);
} }
} }
@ -283,10 +269,27 @@ class RingBuffer {
this.head %= this.capacity; this.head %= this.capacity;
} }
sortedY() {
let result = {
down: [],
up: [],
};
for (let i=this.head; i<this.capacity; i++) {
result.down.push(this.download[i]);
result.up.push(this.upload[i]);
}
for (let i=0; i < this.head; i++) {
result.down.push(this.download[i]);
result.up.push(this.upload[i]);
}
return result;
}
toScatterGraphData() { toScatterGraphData() {
let y = this.sortedY();
let GraphData = [ let GraphData = [
{ x: this.x_axis, y: this.download, name: 'Download', type: 'scatter' }, { x: this.x_axis, y: y.down, name: 'Download', type: 'scatter' },
{ x: this.x_axis, y: this.upload, name: 'Upload', type: 'scatter' }, { x: this.x_axis, y: y.up, name: 'Upload', type: 'scatter' },
]; ];
return GraphData; return GraphData;
} }
@ -316,6 +319,10 @@ class RttHistogram {
this.entries[band] += 1; this.entries[band] += 1;
} }
pushBand(band, n) {
this.entries[band] += n;
}
plot(target_div) { plot(target_div) {
let gData = [ let gData = [
{ x: this.x, y: this.entries, type: 'bar', marker: { color: this.x, colorscale: 'RdBu' } } { x: this.x, y: this.entries, type: 'bar', marker: { color: this.x, colorscale: 'RdBu' } }

View File

@ -168,7 +168,7 @@
<footer>&copy; 2022-2023, LibreQoE LLC</footer> <footer>&copy; 2022-2023, LibreQoE LLC</footer>
<script> <script>
let throughput = new MultiRingBuffer(300); var throughput = new MultiRingBuffer(300);
function updateCurrentThroughput() { function updateCurrentThroughput() {
$.get("/api/current_throughput", (tp) => { $.get("/api/current_throughput", (tp) => {
@ -181,21 +181,28 @@
throughput.push("total", tp.bits_per_second[0], tp.bits_per_second[1]); throughput.push("total", tp.bits_per_second[0], tp.bits_per_second[1]);
throughput.push("shaped", tp.shaped_bits_per_second[0], tp.shaped_bits_per_second[1]); throughput.push("shaped", tp.shaped_bits_per_second[0], tp.shaped_bits_per_second[1]);
throughput.plotTotalThroughput("tpGraph"); throughput.plotTotalThroughput("tpGraph");
setTimeout(updateCurrentThroughput, 1000);
}); });
} }
let funnelData = new MultiRingBuffer(300); var funnelData = new MultiRingBuffer(300);
function updateSiteFunnel() { function updateSiteFunnel() {
$.get("/api/network_tree_summary/", (data) => { $.get("/api/network_tree_summary/", (data) => {
let table = "<table class='table' style='font-size: 8pt;'>";
for (let i = 0; i < data.length; ++i) { for (let i = 0; i < data.length; ++i) {
funnelData.push(data[i][1].name, data[i][1].current_throughput[0] * 8, data[i][1].current_throughput[1] * 8); let name = data[i][1].name;
if (name.length > 20) {
name = name.substring(0, 20) + "...";
}
table += "<tr>";
table += "<td class='redact'>" + redactText(name) + "</td>";
table += "<td>" + scaleNumber(data[i][1].current_throughput[0] * 8) + "</td>";
table += "<td>" + scaleNumber(data[i][1].current_throughput[1] * 8) + "</td>";
table += "</tr>";
} }
funnelData.plotStackedBars("siteFunnel", ""); table += "</table>";
$("#siteFunnel").html(table);
}); });
setTimeout(updateSiteFunnel, 1000);
} }
function updateCpu() { function updateCpu() {
@ -216,7 +223,6 @@
yaxis: { automargin: true, autorange: false, range: [0.0, 100.0] }, yaxis: { automargin: true, autorange: false, range: [0.0, 100.0] },
}, },
{ responsive: true }); { responsive: true });
setTimeout(updateCpu, 2000);
}); });
} }
@ -229,7 +235,6 @@
type: 'pie' type: 'pie'
}]; }];
Plotly.newPlot(graph, data, { margin: { l: 0, r: 0, b: 0, t: 12 }, showlegend: false }, { responsive: true }); Plotly.newPlot(graph, data, { margin: { l: 0, r: 0, b: 0, t: 12 }, showlegend: false }, { responsive: true });
setTimeout(updateRam, 30000);
}); });
} }
@ -262,30 +267,48 @@
function updateTop10() { function updateTop10() {
$.get("/api/top_10_downloaders", (tt) => { $.get("/api/top_10_downloaders", (tt) => {
updateNTable('#top10dl', tt); updateNTable('#top10dl', tt);
setTimeout(updateTop10, 5000);
}); });
} }
function updateWorst10() { function updateWorst10() {
$.get("/api/worst_10_rtt", (tt) => { $.get("/api/worst_10_rtt", (tt) => {
updateNTable('#worstRtt', tt); updateNTable('#worstRtt', tt);
setTimeout(updateWorst10, 5000);
}); });
} }
let rttGraph = new RttHistogram(); var rttGraph = new RttHistogram();
function updateHistogram() { function updateHistogram() {
$.get("/api/rtt_histogram", (rtt) => { $.get("/api/rtt_histogram", (rtt) => {
rttGraph.clear(); rttGraph.clear();
for (let i = 0; i < rtt.length; i++) { for (let i = 0; i < rtt.length; i++) {
rttGraph.push(rtt[i]); rttGraph.pushBand(i, rtt[i]);
} }
rttGraph.plot("rttHistogram"); rttGraph.plot("rttHistogram");
setTimeout(updateHistogram, 5000);
}); });
} }
var tickCount = 0;
function OneSecondCadence() {
updateCurrentThroughput();
updateSiteFunnel();
if (tickCount % 5 == 0) {
updateHistogram();
updateWorst10();
updateTop10();
}
if (tickCount % 10 == 0) {
updateCpu();
updateRam();
}
tickCount++;
setTimeout(OneSecondCadence, 1000);
}
function start() { function start() {
if (isRedacted()) { if (isRedacted()) {
//console.log("Redacting"); //console.log("Redacting");
@ -302,6 +325,7 @@
updateHistogram(); updateHistogram();
updateHostCounts(); updateHostCounts();
updateSiteFunnel(); updateSiteFunnel();
OneSecondCadence();
} }
$(document).ready(start); $(document).ready(start);

View File

@ -65,6 +65,7 @@
<div class="row mbot8 row220"> <div class="row mbot8 row220">
<!-- 5 minutes of throughput --> <!-- 5 minutes of throughput -->
<!--
<div class="col-sm-4"> <div class="col-sm-4">
<div class="card bg-light"> <div class="card bg-light">
<div class="card-body"> <div class="card-body">
@ -73,6 +74,7 @@
</div> </div>
</div> </div>
</div> </div>
-->
<!-- RTT Histogram --> <!-- RTT Histogram -->
<div class="col-sm-4"> <div class="col-sm-4">
@ -258,7 +260,7 @@
$("#treeList").html(tbl); $("#treeList").html(tbl);
// Build the stacked chart // Build the stacked chart
buffers.plotStackedBars("tpGraph", rootName); //buffers.plotStackedBars("tpGraph", rootName);
// Build the RTT histo // Build the RTT histo
rtt_histo.plot("rttHistogram"); rtt_histo.plot("rttHistogram");

View File

@ -2,8 +2,8 @@ use lqos_bus::{BusRequest, BusResponse, TcHandle};
use lqos_utils::hex_string::read_hex_string; use lqos_utils::hex_string::read_hex_string;
use nix::libc::getpid; use nix::libc::getpid;
use pyo3::{ use pyo3::{
exceptions::PyOSError, pyclass, pyfunction, pymodule, types::PyModule, exceptions::PyOSError, pyclass, pyfunction, pymethods, pymodule,
wrap_pyfunction, PyResult, Python, pymethods, types::PyModule, wrap_pyfunction, PyResult, Python,
}; };
use std::{ use std::{
fs::{remove_file, File}, fs::{remove_file, File},
@ -158,7 +158,13 @@ impl BatchedCommands {
Ok(Self { batch: Vec::new() }) Ok(Self { batch: Vec::new() })
} }
pub fn add_ip_mapping(&mut self, ip: String, classid: String, cpu: String, upload: bool) -> PyResult<()> { pub fn add_ip_mapping(
&mut self,
ip: String,
classid: String,
cpu: String,
upload: bool,
) -> PyResult<()> {
let request = parse_add_ip(&ip, &classid, &cpu, upload); let request = parse_add_ip(&ip, &classid, &cpu, upload);
if let Ok(request) = request { if let Ok(request) = request {
self.batch.push(request); self.batch.push(request);

View File

@ -13,10 +13,8 @@ lqos_sys = { path = "../lqos_sys" }
lqos_utils = { path = "../lqos_utils" } lqos_utils = { path = "../lqos_utils" }
log = "0" log = "0"
log-once = "0.4.0" log-once = "0.4.0"
lazy_static = "1.4"
parking_lot = "0"
tokio = { version = "1", features = [ "full", "parking_lot" ] } tokio = { version = "1", features = [ "full", "parking_lot" ] }
rayon = "1" once_cell = "1"
[dev-dependencies] [dev-dependencies]
criterion = { version = "0", features = [ "html_reports"] } criterion = { version = "0", features = [ "html_reports"] }

View File

@ -3,7 +3,7 @@ use lqos_bus::BusResponse;
pub fn get_raw_circuit_data(circuit_id: &str) -> BusResponse { pub fn get_raw_circuit_data(circuit_id: &str) -> BusResponse {
still_watching(circuit_id); still_watching(circuit_id);
let reader = CIRCUIT_TO_QUEUE.read(); let reader = CIRCUIT_TO_QUEUE.read().unwrap();
if let Some(circuit) = reader.get(circuit_id) { if let Some(circuit) = reader.get(circuit_id) {
if let Ok(json) = serde_json::to_string(circuit) { if let Ok(json) = serde_json::to_string(circuit) {
BusResponse::RawQueueData(json) BusResponse::RawQueueData(json)

View File

@ -1,9 +1,8 @@
use crate::queue_store::QueueStore; use once_cell::sync::Lazy;
use lazy_static::*;
use parking_lot::RwLock;
use std::collections::HashMap;
lazy_static! { use crate::queue_store::QueueStore;
pub(crate) static ref CIRCUIT_TO_QUEUE: RwLock<HashMap<String, QueueStore>> = use std::collections::HashMap;
RwLock::new(HashMap::new()); use std::sync::RwLock;
}
pub(crate) static CIRCUIT_TO_QUEUE: Lazy<RwLock<HashMap<String, QueueStore>>> =
Lazy::new(|| RwLock::new(HashMap::new()));

View File

@ -1,10 +1,6 @@
use lazy_static::*;
use std::sync::atomic::AtomicU64; use std::sync::atomic::AtomicU64;
lazy_static! { pub(crate) static QUEUE_MONITOR_INTERVAL: AtomicU64 = AtomicU64::new(1000);
pub(crate) static ref QUEUE_MONITOR_INTERVAL: AtomicU64 =
AtomicU64::new(1000);
}
pub fn set_queue_refresh_interval(interval_ms: u64) { pub fn set_queue_refresh_interval(interval_ms: u64) {
QUEUE_MONITOR_INTERVAL QUEUE_MONITOR_INTERVAL

View File

@ -1,19 +1,16 @@
use std::sync::RwLock;
use crate::queue_structure::{ use crate::queue_structure::{
queue_network::QueueNetwork, queue_node::QueueNode, read_queueing_structure, queue_network::QueueNetwork, queue_node::QueueNode, read_queueing_structure,
}; };
use lazy_static::*;
use log::{error, info}; use log::{error, info};
use lqos_utils::file_watcher::FileWatcher; use lqos_utils::file_watcher::FileWatcher;
use parking_lot::RwLock; use once_cell::sync::Lazy;
use thiserror::Error; use thiserror::Error;
use tokio::task::spawn_blocking; use tokio::task::spawn_blocking;
lazy_static! { pub(crate) static QUEUE_STRUCTURE: Lazy<RwLock<QueueStructure>> =
/// Global storage of the shaped devices csv data. Lazy::new(|| RwLock::new(QueueStructure::new()));
/// Updated by the file system watcher whenever
/// the underlying file changes.
pub(crate) static ref QUEUE_STRUCTURE : RwLock<QueueStructure> = RwLock::new(QueueStructure::new());
}
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct QueueStructure { pub(crate) struct QueueStructure {
@ -48,7 +45,7 @@ pub async fn spawn_queue_structure_monitor() {
fn update_queue_structure() { fn update_queue_structure() {
info!("queueingStructure.json reloaded"); info!("queueingStructure.json reloaded");
QUEUE_STRUCTURE.write().update(); QUEUE_STRUCTURE.write().unwrap().update();
} }
/// Fires up a Linux file system watcher than notifies /// Fires up a Linux file system watcher than notifies

View File

@ -1,7 +1,7 @@
use lqos_utils::hex_string::read_hex_string;
use super::QueueStructureError; use super::QueueStructureError;
use log::error; use log::error;
use lqos_bus::TcHandle; use lqos_bus::TcHandle;
use lqos_utils::hex_string::read_hex_string;
use serde_json::Value; use serde_json::Value;
#[derive(Default, Clone, Debug)] #[derive(Default, Clone, Debug)]

View File

@ -5,7 +5,6 @@ use crate::{
use log::{info, warn}; use log::{info, warn};
use lqos_config::LibreQoSConfig; use lqos_config::LibreQoSConfig;
use lqos_utils::fdtimer::periodic; use lqos_utils::fdtimer::periodic;
use rayon::prelude::{IntoParallelRefMutIterator, ParallelIterator};
mod reader; mod reader;
mod watched_queues; mod watched_queues;
use self::watched_queues::expire_watched_queues; use self::watched_queues::expire_watched_queues;
@ -13,7 +12,7 @@ use watched_queues::WATCHED_QUEUES;
pub use watched_queues::{add_watched_queue, still_watching}; pub use watched_queues::{add_watched_queue, still_watching};
fn track_queues() { fn track_queues() {
let mut watching = WATCHED_QUEUES.write(); let mut watching = WATCHED_QUEUES.write().unwrap();
if watching.is_empty() { if watching.is_empty() {
//info!("No queues marked for read."); //info!("No queues marked for read.");
return; // There's nothing to do - bail out fast return; // There's nothing to do - bail out fast
@ -24,7 +23,7 @@ fn track_queues() {
return; return;
} }
let config = config.unwrap(); let config = config.unwrap();
watching.par_iter_mut().for_each(|q| { watching.iter_mut().for_each(|q| {
let (circuit_id, download_class, upload_class) = q.get(); let (circuit_id, download_class, upload_class) = q.get();
let (download, upload) = if config.on_a_stick_mode { let (download, upload) = if config.on_a_stick_mode {
@ -50,7 +49,7 @@ fn track_queues() {
if let Ok(download) = download { if let Ok(download) = download {
if let Ok(upload) = upload { if let Ok(upload) = upload {
let mut mapping = CIRCUIT_TO_QUEUE.write(); let mut mapping = CIRCUIT_TO_QUEUE.write().unwrap();
if let Some(circuit) = mapping.get_mut(circuit_id) { if let Some(circuit) = mapping.get_mut(circuit_id) {
circuit.update(&download[0], &upload[0]); circuit.update(&download[0], &upload[0]);
} else { } else {

View File

@ -1,14 +1,12 @@
use crate::queue_structure::QUEUE_STRUCTURE; use crate::queue_structure::QUEUE_STRUCTURE;
use lazy_static::*;
use log::{info, warn}; use log::{info, warn};
use lqos_bus::TcHandle; use lqos_bus::TcHandle;
use lqos_utils::unix_time::unix_now; use lqos_utils::unix_time::unix_now;
use parking_lot::RwLock; use once_cell::sync::Lazy;
use std::sync::RwLock;
lazy_static! { pub(crate) static WATCHED_QUEUES: Lazy<RwLock<Vec<WatchedQueue>>> =
pub(crate) static ref WATCHED_QUEUES: RwLock<Vec<WatchedQueue>> = Lazy::new(|| RwLock::new(Vec::new()));
RwLock::new(Vec::new());
}
pub(crate) struct WatchedQueue { pub(crate) struct WatchedQueue {
circuit_id: String, circuit_id: String,
@ -35,7 +33,7 @@ pub fn add_watched_queue(circuit_id: &str) {
//info!("Watching queue {circuit_id}"); //info!("Watching queue {circuit_id}");
let max = unsafe { lqos_sys::libbpf_num_possible_cpus() } * 2; let max = unsafe { lqos_sys::libbpf_num_possible_cpus() } * 2;
{ {
let read_lock = WATCHED_QUEUES.read(); let read_lock = WATCHED_QUEUES.read().unwrap();
if read_lock.iter().any(|q| q.circuit_id == circuit_id) { if read_lock.iter().any(|q| q.circuit_id == circuit_id) {
warn!("Queue {circuit_id} is already being watched. Duplicate ignored."); warn!("Queue {circuit_id} is already being watched. Duplicate ignored.");
return; // No duplicates, please return; // No duplicates, please
@ -49,7 +47,7 @@ pub fn add_watched_queue(circuit_id: &str) {
} }
} }
if let Some(queues) = &QUEUE_STRUCTURE.read().maybe_queues { if let Some(queues) = &QUEUE_STRUCTURE.read().unwrap().maybe_queues {
if let Some(circuit) = queues.iter().find(|c| { if let Some(circuit) = queues.iter().find(|c| {
c.circuit_id.is_some() && c.circuit_id.as_ref().unwrap() == circuit_id c.circuit_id.is_some() && c.circuit_id.as_ref().unwrap() == circuit_id
}) { }) {
@ -60,7 +58,7 @@ pub fn add_watched_queue(circuit_id: &str) {
upload_class: circuit.up_class_id, upload_class: circuit.up_class_id,
}; };
WATCHED_QUEUES.write().push(new_watch); WATCHED_QUEUES.write().unwrap().push(new_watch);
//info!("Added {circuit_id} to watched queues. Now watching {} queues.", WATCHED_QUEUES.read().len()); //info!("Added {circuit_id} to watched queues. Now watching {} queues.", WATCHED_QUEUES.read().len());
} else { } else {
warn!("No circuit ID of {circuit_id}"); warn!("No circuit ID of {circuit_id}");
@ -71,13 +69,13 @@ pub fn add_watched_queue(circuit_id: &str) {
} }
pub(crate) fn expire_watched_queues() { pub(crate) fn expire_watched_queues() {
let mut lock = WATCHED_QUEUES.write(); let mut lock = WATCHED_QUEUES.write().unwrap();
let now = unix_now().unwrap_or(0); let now = unix_now().unwrap_or(0);
lock.retain(|w| w.expires_unix_time > now); lock.retain(|w| w.expires_unix_time > now);
} }
pub fn still_watching(circuit_id: &str) { pub fn still_watching(circuit_id: &str) {
let mut lock = WATCHED_QUEUES.write(); let mut lock = WATCHED_QUEUES.write().unwrap();
if let Some(q) = lock.iter_mut().find(|q| q.circuit_id == circuit_id) { if let Some(q) = lock.iter_mut().find(|q| q.circuit_id == circuit_id) {
//info!("Still watching circuit: {circuit_id}"); //info!("Still watching circuit: {circuit_id}");
q.refresh_timer(); q.refresh_timer();

View File

@ -59,9 +59,9 @@ impl XdpIpAddress {
/// Convers an `XdpIpAddress` type to a Rust `IpAddr` type, using /// Convers an `XdpIpAddress` type to a Rust `IpAddr` type, using
/// the in-build mapped function for squishing IPv4 into IPv6 /// the in-build mapped function for squishing IPv4 into IPv6
pub fn as_ipv6(&self) -> Ipv6Addr { pub fn as_ipv6(&self) -> Ipv6Addr {
if self.is_v4() if self.is_v4() {
{ Ipv4Addr::new(self.0[12], self.0[13], self.0[14], self.0[15])
Ipv4Addr::new(self.0[12], self.0[13], self.0[14], self.0[15]).to_ipv6_mapped() .to_ipv6_mapped()
} else { } else {
Ipv6Addr::new( Ipv6Addr::new(
BigEndian::read_u16(&self.0[0..2]), BigEndian::read_u16(&self.0[0..2]),
@ -78,8 +78,7 @@ impl XdpIpAddress {
/// Converts an `XdpIpAddress` type to a Rust `IpAddr` type /// Converts an `XdpIpAddress` type to a Rust `IpAddr` type
pub fn as_ip(&self) -> IpAddr { pub fn as_ip(&self) -> IpAddr {
if self.is_v4() if self.is_v4() {
{
// It's an IPv4 Address // It's an IPv4 Address
IpAddr::V4(Ipv4Addr::new(self.0[12], self.0[13], self.0[14], self.0[15])) IpAddr::V4(Ipv4Addr::new(self.0[12], self.0[13], self.0[14], self.0[15]))
} else { } else {

View File

@ -1,38 +1,38 @@
use log::error; use log::error;
use thiserror::Error; use thiserror::Error;
/// `read_hex_string` converts a string from C-friendly Hex format /// `read_hex_string` converts a string from C-friendly Hex format
/// (e.g. `0xC12`) into a hexadecimal `u32`. /// (e.g. `0xC12`) into a hexadecimal `u32`.
/// ///
/// ## Parameters /// ## Parameters
/// ///
/// * `s`: the string to attempt to parse. /// * `s`: the string to attempt to parse.
/// ///
/// ## Returns /// ## Returns
/// ///
/// Either a converted `u32` or a `HexParseError`. /// Either a converted `u32` or a `HexParseError`.
/// ///
/// ## Example /// ## Example
/// ///
/// ```rust /// ```rust
/// use lqos_utils::hex_string::read_hex_string; /// use lqos_utils::hex_string::read_hex_string;
/// assert_eq!(read_hex_string("0x12AD").unwrap(), 4781); /// assert_eq!(read_hex_string("0x12AD").unwrap(), 4781);
/// ``` /// ```
pub fn read_hex_string(s: &str) -> Result<u32, HexParseError> { pub fn read_hex_string(s: &str) -> Result<u32, HexParseError> {
let result = u32::from_str_radix(&s.replace("0x", ""), 16); let result = u32::from_str_radix(&s.replace("0x", ""), 16);
match result { match result {
Ok(data) => Ok(data), Ok(data) => Ok(data),
Err(e) => { Err(e) => {
error!("Unable to convert {s} to a u32"); error!("Unable to convert {s} to a u32");
error!("{:?}", e); error!("{:?}", e);
Err(HexParseError::ParseError) Err(HexParseError::ParseError)
}
} }
} }
}
/// `HexParseError` is an error type defining what can go wrong /// `HexParseError` is an error type defining what can go wrong
/// parsing a string into a `u32` hex number. /// parsing a string into a `u32` hex number.
#[derive(Error, Debug)] #[derive(Error, Debug)]
pub enum HexParseError { pub enum HexParseError {
#[error("Unable to decode string into valid hex")] #[error("Unable to decode string into valid hex")]
ParseError, ParseError,
@ -55,4 +55,4 @@ mod tests {
assert!(read_hex_string("0xG00F").is_err()); assert!(read_hex_string("0xG00F").is_err());
assert!(read_hex_string("G00F").is_err()); assert!(read_hex_string("G00F").is_err());
} }
} }

View File

@ -1,7 +1,7 @@
mod commands; mod commands;
pub mod fdtimer; pub mod fdtimer;
pub mod file_watcher; pub mod file_watcher;
pub mod hex_string;
pub mod packet_scale; pub mod packet_scale;
mod string_table_enum; mod string_table_enum;
pub mod unix_time; pub mod unix_time;
pub mod hex_string;

View File

@ -15,7 +15,6 @@ lqos_queue_tracker = { path = "../lqos_queue_tracker" }
lqos_utils = { path = "../lqos_utils" } lqos_utils = { path = "../lqos_utils" }
tokio = { version = "1", features = [ "full", "parking_lot" ] } tokio = { version = "1", features = [ "full", "parking_lot" ] }
once_cell = "1.17.1" once_cell = "1.17.1"
parking_lot = "0.12"
lqos_bus = { path = "../lqos_bus" } lqos_bus = { path = "../lqos_bus" }
signal-hook = "0.3" signal-hook = "0.3"
serde_json = "1" serde_json = "1"
@ -23,7 +22,6 @@ serde = { version = "1.0", features = ["derive"] }
env_logger = "0" env_logger = "0"
log = "0" log = "0"
nix = "0" nix = "0"
rayon = "1"
sysinfo = "0" sysinfo = "0"
# Support JemAlloc on supported platforms # Support JemAlloc on supported platforms

View File

@ -164,12 +164,15 @@ fn handle_bus_requests(
BusRequest::GetNetworkMap { parent } => { BusRequest::GetNetworkMap { parent } => {
shaped_devices_tracker::get_one_network_map_layer(*parent) shaped_devices_tracker::get_one_network_map_layer(*parent)
} }
BusRequest::TopMapQueues( n_queues ) => { BusRequest::TopMapQueues(n_queues) => {
shaped_devices_tracker::get_top_n_root_queues(*n_queues) shaped_devices_tracker::get_top_n_root_queues(*n_queues)
} }
BusRequest::GetNodeNamesFromIds(nodes) => { BusRequest::GetNodeNamesFromIds(nodes) => {
shaped_devices_tracker::map_node_names(nodes) shaped_devices_tracker::map_node_names(nodes)
} }
BusRequest::GetFunnel { target: parent } => {
shaped_devices_tracker::get_funnel(parent)
}
}); });
} }
} }

View File

@ -4,7 +4,7 @@ use lqos_bus::BusResponse;
use lqos_config::{ConfigShapedDevices, NetworkJsonNode}; use lqos_config::{ConfigShapedDevices, NetworkJsonNode};
use lqos_utils::file_watcher::FileWatcher; use lqos_utils::file_watcher::FileWatcher;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use parking_lot::RwLock; use std::sync::RwLock;
use tokio::task::spawn_blocking; use tokio::task::spawn_blocking;
mod netjson; mod netjson;
pub use netjson::*; pub use netjson::*;
@ -17,11 +17,14 @@ fn load_shaped_devices() {
let shaped_devices = ConfigShapedDevices::load(); let shaped_devices = ConfigShapedDevices::load();
if let Ok(new_file) = shaped_devices { if let Ok(new_file) = shaped_devices {
info!("ShapedDevices.csv loaded"); info!("ShapedDevices.csv loaded");
*SHAPED_DEVICES.write() = new_file; *SHAPED_DEVICES.write().unwrap() = new_file;
crate::throughput_tracker::THROUGHPUT_TRACKER.write().refresh_circuit_ids(); crate::throughput_tracker::THROUGHPUT_TRACKER
.write()
.unwrap()
.refresh_circuit_ids();
} else { } else {
warn!("ShapedDevices.csv failed to load, see previous error messages. Reverting to empty set."); warn!("ShapedDevices.csv failed to load, see previous error messages. Reverting to empty set.");
*SHAPED_DEVICES.write() = ConfigShapedDevices::default(); *SHAPED_DEVICES.write().unwrap() = ConfigShapedDevices::default();
} }
} }
@ -55,7 +58,7 @@ fn watch_for_shaped_devices_changing() -> Result<()> {
} }
pub fn get_one_network_map_layer(parent_idx: usize) -> BusResponse { pub fn get_one_network_map_layer(parent_idx: usize) -> BusResponse {
let net_json = NETWORK_JSON.read(); let net_json = NETWORK_JSON.read().unwrap();
if let Some(parent) = net_json.get_cloned_entry_by_index(parent_idx) { if let Some(parent) = net_json.get_cloned_entry_by_index(parent_idx) {
let mut nodes = vec![(parent_idx, parent)]; let mut nodes = vec![(parent_idx, parent)];
nodes.extend_from_slice(&net_json.get_cloned_children(parent_idx)); nodes.extend_from_slice(&net_json.get_cloned_children(parent_idx));
@ -66,14 +69,14 @@ pub fn get_one_network_map_layer(parent_idx: usize) -> BusResponse {
} }
pub fn get_top_n_root_queues(n_queues: usize) -> BusResponse { pub fn get_top_n_root_queues(n_queues: usize) -> BusResponse {
let net_json = NETWORK_JSON.read(); let net_json = NETWORK_JSON.read().unwrap();
if let Some(parent) = net_json.get_cloned_entry_by_index(0) { if let Some(parent) = net_json.get_cloned_entry_by_index(0) {
let mut nodes = vec![(0, parent)]; let mut nodes = vec![(0, parent)];
nodes.extend_from_slice(&net_json.get_cloned_children(0)); nodes.extend_from_slice(&net_json.get_cloned_children(0));
// Remove the top-level entry for root // Remove the top-level entry for root
nodes.remove(0); nodes.remove(0);
// Sort by total bandwidth (up + down) descending // Sort by total bandwidth (up + down) descending
nodes.sort_by(|a,b| { nodes.sort_by(|a, b| {
let total_a = a.1.current_throughput.0 + a.1.current_throughput.1; let total_a = a.1.current_throughput.0 + a.1.current_throughput.1;
let total_b = b.1.current_throughput.0 + b.1.current_throughput.1; let total_b = b.1.current_throughput.0 + b.1.current_throughput.1;
total_b.cmp(&total_a) total_b.cmp(&total_a)
@ -81,19 +84,22 @@ pub fn get_top_n_root_queues(n_queues: usize) -> BusResponse {
// Summarize everything after n_queues // Summarize everything after n_queues
if nodes.len() > n_queues { if nodes.len() > n_queues {
let mut other_bw = (0, 0); let mut other_bw = (0, 0);
nodes.drain(n_queues ..).for_each(|n| { nodes.drain(n_queues..).for_each(|n| {
other_bw.0 += n.1.current_throughput.0; other_bw.0 += n.1.current_throughput.0;
other_bw.1 += n.1.current_throughput.1; other_bw.1 += n.1.current_throughput.1;
}); });
nodes.push((0, NetworkJsonNode{ nodes.push((
name: "Others".into(), 0,
max_throughput: (0,0), NetworkJsonNode {
current_throughput: other_bw, name: "Others".into(),
rtts: Vec::new(), max_throughput: (0, 0),
parents: Vec::new(), current_throughput: other_bw,
immediate_parent: None, rtts: Vec::new(),
})); parents: Vec::new(),
immediate_parent: None,
},
));
} }
BusResponse::NetworkMap(nodes) BusResponse::NetworkMap(nodes)
} else { } else {
@ -103,14 +109,25 @@ pub fn get_top_n_root_queues(n_queues: usize) -> BusResponse {
pub fn map_node_names(nodes: &[usize]) -> BusResponse { pub fn map_node_names(nodes: &[usize]) -> BusResponse {
let mut result = Vec::new(); let mut result = Vec::new();
let reader = NETWORK_JSON.read(); let reader = NETWORK_JSON.read().unwrap();
nodes.iter().for_each(|id| { nodes.iter().for_each(|id| {
if let Some(node) = reader.nodes.get(*id) { if let Some(node) = reader.nodes.get(*id) {
result.push(( result.push((*id, node.name.clone()));
*id,
node.name.clone(),
));
} }
}); });
BusResponse::NodeNames(result) BusResponse::NodeNames(result)
} }
pub fn get_funnel(circuit_id: &str) -> BusResponse {
let reader = NETWORK_JSON.read().unwrap();
if let Some(index) = reader.get_index_for_name(circuit_id) {
// Reverse the scanning order and skip the last entry (the parent)
let mut result = Vec::new();
for idx in reader.nodes[index].parents.iter().rev().skip(1) {
result.push((*idx, reader.nodes[*idx].clone()));
}
return BusResponse::NetworkMap(result);
}
BusResponse::Fail("Unknown Node".into())
}

View File

@ -1,12 +1,13 @@
use log::{info, error, warn}; use anyhow::Result;
use log::{error, info, warn};
use lqos_config::NetworkJson; use lqos_config::NetworkJson;
use lqos_utils::file_watcher::FileWatcher; use lqos_utils::file_watcher::FileWatcher;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use parking_lot::RwLock; use std::sync::RwLock;
use tokio::task::spawn_blocking; use tokio::task::spawn_blocking;
use anyhow::Result;
pub static NETWORK_JSON: Lazy<RwLock<NetworkJson>> = Lazy::new(|| RwLock::new(NetworkJson::default())); pub static NETWORK_JSON: Lazy<RwLock<NetworkJson>> =
Lazy::new(|| RwLock::new(NetworkJson::default()));
pub async fn network_json_watcher() { pub async fn network_json_watcher() {
spawn_blocking(|| { spawn_blocking(|| {
@ -18,30 +19,28 @@ pub async fn network_json_watcher() {
/// Fires up a Linux file system watcher than notifies /// Fires up a Linux file system watcher than notifies
/// when `network.json` changes, and triggers a reload. /// when `network.json` changes, and triggers a reload.
fn watch_for_network_json_changing() -> Result<()> { fn watch_for_network_json_changing() -> Result<()> {
let watch_path = NetworkJson::path(); let watch_path = NetworkJson::path();
if watch_path.is_err() { if watch_path.is_err() {
error!("Unable to generate path for network.json"); error!("Unable to generate path for network.json");
return Err(anyhow::Error::msg( return Err(anyhow::Error::msg("Unable to create path for network.json"));
"Unable to create path for network.json",
));
}
let watch_path = watch_path.unwrap();
let mut watcher = FileWatcher::new("network.json", watch_path);
watcher.set_file_exists_callback(load_network_json);
watcher.set_file_created_callback(load_network_json);
watcher.set_file_changed_callback(load_network_json);
loop {
let result = watcher.watch();
info!("network.json watcher returned: {result:?}");
}
} }
let watch_path = watch_path.unwrap();
fn load_network_json() {
let njs = NetworkJson::load(); let mut watcher = FileWatcher::new("network.json", watch_path);
if let Ok(njs) = njs { watcher.set_file_exists_callback(load_network_json);
*NETWORK_JSON.write() = njs; watcher.set_file_created_callback(load_network_json);
} else { watcher.set_file_changed_callback(load_network_json);
warn!("Unable to load network.json"); loop {
} let result = watcher.watch();
} info!("network.json watcher returned: {result:?}");
}
}
fn load_network_json() {
let njs = NetworkJson::load();
if let Ok(njs) = njs {
*NETWORK_JSON.write().unwrap() = njs;
} else {
warn!("Unable to load network.json");
}
}

View File

@ -1,17 +1,21 @@
mod throughput_entry; mod throughput_entry;
mod tracking_data; mod tracking_data;
use crate::{throughput_tracker::tracking_data::ThroughputTracker, shaped_devices_tracker::NETWORK_JSON}; use crate::{
shaped_devices_tracker::NETWORK_JSON,
throughput_tracker::tracking_data::ThroughputTracker,
};
use log::{info, warn}; use log::{info, warn};
use lqos_bus::{BusResponse, IpStats, TcHandle, XdpPpingResult}; use lqos_bus::{BusResponse, IpStats, TcHandle, XdpPpingResult};
use lqos_sys::XdpIpAddress; use lqos_sys::XdpIpAddress;
use lqos_utils::{fdtimer::periodic, unix_time::time_since_boot}; use lqos_utils::{fdtimer::periodic, unix_time::time_since_boot};
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use parking_lot::RwLock; use std::sync::RwLock;
use std::time::Duration; use std::time::Duration;
const RETIRE_AFTER_SECONDS: u64 = 30; const RETIRE_AFTER_SECONDS: u64 = 30;
pub static THROUGHPUT_TRACKER: Lazy<RwLock<ThroughputTracker>> = Lazy::new(|| RwLock::new(ThroughputTracker::new())); pub static THROUGHPUT_TRACKER: Lazy<RwLock<ThroughputTracker>> =
Lazy::new(|| RwLock::new(ThroughputTracker::new()));
pub fn spawn_throughput_monitor() { pub fn spawn_throughput_monitor() {
info!("Starting the bandwidth monitor thread."); info!("Starting the bandwidth monitor thread.");
@ -20,12 +24,13 @@ pub fn spawn_throughput_monitor() {
std::thread::spawn(move || { std::thread::spawn(move || {
periodic(interval_ms, "Throughput Monitor", &mut || { periodic(interval_ms, "Throughput Monitor", &mut || {
let mut throughput = THROUGHPUT_TRACKER.write(); let mut throughput = THROUGHPUT_TRACKER.write().unwrap();
let mut net_json = NETWORK_JSON.write(); let mut net_json = NETWORK_JSON.write().unwrap();
throughput.copy_previous_and_reset_rtt(&mut net_json); net_json.zero_throughput_and_rtt();
throughput.apply_new_throughput_counters(); throughput.copy_previous_and_reset_rtt();
throughput.apply_rtt_data(); throughput.apply_new_throughput_counters(&mut net_json);
throughput.update_totals(&mut net_json); throughput.apply_rtt_data(&mut net_json);
throughput.update_totals();
throughput.next_cycle(); throughput.next_cycle();
}); });
}); });
@ -33,7 +38,7 @@ pub fn spawn_throughput_monitor() {
pub fn current_throughput() -> BusResponse { pub fn current_throughput() -> BusResponse {
let (bits_per_second, packets_per_second, shaped_bits_per_second) = { let (bits_per_second, packets_per_second, shaped_bits_per_second) = {
let tp = THROUGHPUT_TRACKER.read(); let tp = THROUGHPUT_TRACKER.read().unwrap();
( (
tp.bits_per_second(), tp.bits_per_second(),
tp.packets_per_second(), tp.packets_per_second(),
@ -49,7 +54,7 @@ pub fn current_throughput() -> BusResponse {
pub fn host_counters() -> BusResponse { pub fn host_counters() -> BusResponse {
let mut result = Vec::new(); let mut result = Vec::new();
let tp = THROUGHPUT_TRACKER.read(); let tp = THROUGHPUT_TRACKER.read().unwrap();
tp.raw_data.iter().for_each(|(k, v)| { tp.raw_data.iter().for_each(|(k, v)| {
let ip = k.as_ip(); let ip = k.as_ip();
let (down, up) = v.bytes_per_second; let (down, up) = v.bytes_per_second;
@ -67,7 +72,7 @@ type TopList = (XdpIpAddress, (u64, u64), (u64, u64), f32, TcHandle, String);
pub fn top_n(start: u32, end: u32) -> BusResponse { pub fn top_n(start: u32, end: u32) -> BusResponse {
let mut full_list: Vec<TopList> = { let mut full_list: Vec<TopList> = {
let tp = THROUGHPUT_TRACKER.read(); let tp = THROUGHPUT_TRACKER.read().unwrap();
tp.raw_data tp.raw_data
.iter() .iter()
.filter(|(ip, _)| !ip.as_ip().is_loopback()) .filter(|(ip, _)| !ip.as_ip().is_loopback())
@ -112,7 +117,7 @@ pub fn top_n(start: u32, end: u32) -> BusResponse {
pub fn worst_n(start: u32, end: u32) -> BusResponse { pub fn worst_n(start: u32, end: u32) -> BusResponse {
let mut full_list: Vec<TopList> = { let mut full_list: Vec<TopList> = {
let tp = THROUGHPUT_TRACKER.read(); let tp = THROUGHPUT_TRACKER.read().unwrap();
tp.raw_data tp.raw_data
.iter() .iter()
.filter(|(ip, _)| !ip.as_ip().is_loopback()) .filter(|(ip, _)| !ip.as_ip().is_loopback())
@ -157,7 +162,7 @@ pub fn worst_n(start: u32, end: u32) -> BusResponse {
} }
pub fn best_n(start: u32, end: u32) -> BusResponse { pub fn best_n(start: u32, end: u32) -> BusResponse {
let mut full_list: Vec<TopList> = { let mut full_list: Vec<TopList> = {
let tp = THROUGHPUT_TRACKER.read(); let tp = THROUGHPUT_TRACKER.read().unwrap();
tp.raw_data tp.raw_data
.iter() .iter()
.filter(|(ip, _)| !ip.as_ip().is_loopback()) .filter(|(ip, _)| !ip.as_ip().is_loopback())
@ -203,7 +208,7 @@ pub fn best_n(start: u32, end: u32) -> BusResponse {
} }
pub fn xdp_pping_compat() -> BusResponse { pub fn xdp_pping_compat() -> BusResponse {
let raw = THROUGHPUT_TRACKER.read(); let raw = THROUGHPUT_TRACKER.read().unwrap();
let result = raw let result = raw
.raw_data .raw_data
.iter() .iter()
@ -242,7 +247,7 @@ pub fn xdp_pping_compat() -> BusResponse {
pub fn rtt_histogram() -> BusResponse { pub fn rtt_histogram() -> BusResponse {
let mut result = vec![0; 20]; let mut result = vec![0; 20];
let reader = THROUGHPUT_TRACKER.read(); let reader = THROUGHPUT_TRACKER.read().unwrap();
for (_, data) in reader for (_, data) in reader
.raw_data .raw_data
.iter() .iter()
@ -265,7 +270,7 @@ pub fn rtt_histogram() -> BusResponse {
pub fn host_counts() -> BusResponse { pub fn host_counts() -> BusResponse {
let mut total = 0; let mut total = 0;
let mut shaped = 0; let mut shaped = 0;
let tp = THROUGHPUT_TRACKER.read(); let tp = THROUGHPUT_TRACKER.read().unwrap();
tp.raw_data tp.raw_data
.iter() .iter()
.filter(|(_, d)| retire_check(tp.cycle, d.most_recent_cycle)) .filter(|(_, d)| retire_check(tp.cycle, d.most_recent_cycle))
@ -294,7 +299,7 @@ pub fn all_unknown_ips() -> BusResponse {
let five_minutes_ago_nanoseconds = five_minutes_ago.as_nanos(); let five_minutes_ago_nanoseconds = five_minutes_ago.as_nanos();
let mut full_list: Vec<FullList> = { let mut full_list: Vec<FullList> = {
let tp = THROUGHPUT_TRACKER.read(); let tp = THROUGHPUT_TRACKER.read().unwrap();
tp.raw_data tp.raw_data
.iter() .iter()
.filter(|(ip, _)| !ip.as_ip().is_loopback()) .filter(|(ip, _)| !ip.as_ip().is_loopback())

View File

@ -4,7 +4,6 @@ use super::{throughput_entry::ThroughputEntry, RETIRE_AFTER_SECONDS};
use lqos_bus::TcHandle; use lqos_bus::TcHandle;
use lqos_config::NetworkJson; use lqos_config::NetworkJson;
use lqos_sys::{rtt_for_each, throughput_for_each, XdpIpAddress}; use lqos_sys::{rtt_for_each, throughput_for_each, XdpIpAddress};
use rayon::prelude::{IntoParallelRefMutIterator, ParallelIterator};
use std::collections::HashMap; use std::collections::HashMap;
pub struct ThroughputTracker { pub struct ThroughputTracker {
@ -29,17 +28,11 @@ impl ThroughputTracker {
} }
} }
pub(crate) fn copy_previous_and_reset_rtt( pub(crate) fn copy_previous_and_reset_rtt(&mut self) {
&mut self,
netjson: &mut NetworkJson,
) {
// Zero the previous funnel hierarchy current numbers
netjson.zero_throughput_and_rtt();
// Copy previous byte/packet numbers and reset RTT data // Copy previous byte/packet numbers and reset RTT data
// We're using Rayon's "par_iter_mut" to spread the operation across // We're using Rayon's "par_iter_mut" to spread the operation across
// all CPU cores. // all CPU cores.
self.raw_data.par_iter_mut().for_each(|(_k, v)| { self.raw_data.iter_mut().for_each(|(_k, v)| {
if v.first_cycle < self.cycle { if v.first_cycle < self.cycle {
v.bytes_per_second.0 = v.bytes_per_second.0 =
u64::checked_sub(v.bytes.0, v.prev_bytes.0).unwrap_or(0); u64::checked_sub(v.bytes.0, v.prev_bytes.0).unwrap_or(0);
@ -64,7 +57,7 @@ impl ThroughputTracker {
fn lookup_circuit_id(xdp_ip: &XdpIpAddress) -> Option<String> { fn lookup_circuit_id(xdp_ip: &XdpIpAddress) -> Option<String> {
let mut circuit_id = None; let mut circuit_id = None;
let lookup = xdp_ip.as_ipv6(); let lookup = xdp_ip.as_ipv6();
let cfg = SHAPED_DEVICES.read(); let cfg = SHAPED_DEVICES.read().unwrap();
if let Some((_, id)) = cfg.trie.longest_match(lookup) { if let Some((_, id)) = cfg.trie.longest_match(lookup) {
circuit_id = Some(cfg.devices[*id].circuit_id.clone()); circuit_id = Some(cfg.devices[*id].circuit_id.clone());
} }
@ -76,12 +69,14 @@ impl ThroughputTracker {
circuit_id: Option<String>, circuit_id: Option<String>,
) -> Option<String> { ) -> Option<String> {
if let Some(circuit_id) = circuit_id { if let Some(circuit_id) = circuit_id {
let shaped = SHAPED_DEVICES.read(); let shaped = SHAPED_DEVICES.read().unwrap();
shaped let parent_name = shaped
.devices .devices
.iter() .iter()
.find(|d| d.circuit_id == circuit_id) .find(|d| d.circuit_id == circuit_id)
.map(|device| device.parent_node.clone()) .map(|device| device.parent_node.clone());
//println!("{parent_name:?}");
parent_name
} else { } else {
None None
} }
@ -91,7 +86,7 @@ impl ThroughputTracker {
circuit_id: Option<String>, circuit_id: Option<String>,
) -> Option<Vec<usize>> { ) -> Option<Vec<usize>> {
if let Some(parent) = Self::get_node_name_for_circuit_id(circuit_id) { if let Some(parent) = Self::get_node_name_for_circuit_id(circuit_id) {
let lock = crate::shaped_devices_tracker::NETWORK_JSON.read(); let lock = crate::shaped_devices_tracker::NETWORK_JSON.read().unwrap();
lock.get_parents_for_circuit_id(&parent) lock.get_parents_for_circuit_id(&parent)
} else { } else {
None None
@ -99,14 +94,17 @@ impl ThroughputTracker {
} }
pub(crate) fn refresh_circuit_ids(&mut self) { pub(crate) fn refresh_circuit_ids(&mut self) {
self.raw_data.par_iter_mut().for_each(|(ip, data)| { self.raw_data.iter_mut().for_each(|(ip, data)| {
data.circuit_id = Self::lookup_circuit_id(ip); data.circuit_id = Self::lookup_circuit_id(ip);
data.network_json_parents = data.network_json_parents =
Self::lookup_network_parents(data.circuit_id.clone()); Self::lookup_network_parents(data.circuit_id.clone());
}); });
} }
pub(crate) fn apply_new_throughput_counters(&mut self) { pub(crate) fn apply_new_throughput_counters(
&mut self,
net_json: &mut NetworkJson,
) {
let cycle = self.cycle; let cycle = self.cycle;
let raw_data = &mut self.raw_data; let raw_data = &mut self.raw_data;
throughput_for_each(&mut |xdp_ip, counts| { throughput_for_each(&mut |xdp_ip, counts| {
@ -127,6 +125,16 @@ impl ThroughputTracker {
} }
if entry.packets != entry.prev_packets { if entry.packets != entry.prev_packets {
entry.most_recent_cycle = cycle; entry.most_recent_cycle = cycle;
if let Some(parents) = &entry.network_json_parents {
net_json.add_throughput_cycle(
parents,
(
entry.bytes.0 - entry.prev_bytes.0,
entry.bytes.1 - entry.prev_bytes.1,
),
);
}
} }
} else { } else {
let circuit_id = Self::lookup_circuit_id(xdp_ip); let circuit_id = Self::lookup_circuit_id(xdp_ip);
@ -160,19 +168,22 @@ impl ThroughputTracker {
}); });
} }
pub(crate) fn apply_rtt_data(&mut self) { pub(crate) fn apply_rtt_data(&mut self, net_json: &mut NetworkJson) {
rtt_for_each(&mut |raw_ip, rtt| { rtt_for_each(&mut |raw_ip, rtt| {
if rtt.has_fresh_data != 0 { if rtt.has_fresh_data != 0 {
let ip = XdpIpAddress(*raw_ip); let ip = XdpIpAddress(*raw_ip);
if let Some(tracker) = self.raw_data.get_mut(&ip) { if let Some(tracker) = self.raw_data.get_mut(&ip) {
tracker.recent_rtt_data = rtt.rtt; tracker.recent_rtt_data = rtt.rtt;
tracker.last_fresh_rtt_data_cycle = self.cycle; tracker.last_fresh_rtt_data_cycle = self.cycle;
if let Some(parents) = &tracker.network_json_parents {
net_json.add_rtt_cycle(parents, tracker.median_latency());
}
} }
} }
}); });
} }
pub(crate) fn update_totals(&mut self, net_json: &mut NetworkJson) { pub(crate) fn update_totals(&mut self) {
self.bytes_per_second = (0, 0); self.bytes_per_second = (0, 0);
self.packets_per_second = (0, 0); self.packets_per_second = (0, 0);
self.shaped_bytes_per_second = (0, 0); self.shaped_bytes_per_second = (0, 0);
@ -186,165 +197,33 @@ impl ThroughputTracker {
v.packets.0.saturating_sub(v.prev_packets.0), v.packets.0.saturating_sub(v.prev_packets.0),
v.packets.1.saturating_sub(v.prev_packets.1), v.packets.1.saturating_sub(v.prev_packets.1),
v.tc_handle.as_u32() > 0, v.tc_handle.as_u32() > 0,
&v.network_json_parents,
v.median_latency(),
) )
}) })
.for_each( .for_each(|(bytes_down, bytes_up, packets_down, packets_up, shaped)| {
|(bytes_down, bytes_up, packets_down, packets_up, shaped, parents, median_rtt)| { self.bytes_per_second.0 =
self.bytes_per_second.0 = self.bytes_per_second.0.checked_add(bytes_down).unwrap_or(0);
self.bytes_per_second.0.checked_add(bytes_down).unwrap_or(0); self.bytes_per_second.1 =
self.bytes_per_second.1 = self.bytes_per_second.1.checked_add(bytes_up).unwrap_or(0);
self.bytes_per_second.1.checked_add(bytes_up).unwrap_or(0); self.packets_per_second.0 =
self.packets_per_second.0 = self.packets_per_second.0.checked_add(packets_down).unwrap_or(0);
self.packets_per_second.0.checked_add(packets_down).unwrap_or(0); self.packets_per_second.1 =
self.packets_per_second.1 = self.packets_per_second.1.checked_add(packets_up).unwrap_or(0);
self.packets_per_second.1.checked_add(packets_up).unwrap_or(0); if shaped {
if shaped { self.shaped_bytes_per_second.0 = self
self.shaped_bytes_per_second.0 = self .shaped_bytes_per_second
.shaped_bytes_per_second .0
.0 .checked_add(bytes_down)
.checked_add(bytes_down) .unwrap_or(0);
.unwrap_or(0); self.shaped_bytes_per_second.1 =
self.shaped_bytes_per_second.1 = self self.shaped_bytes_per_second.1.checked_add(bytes_up).unwrap_or(0);
.shaped_bytes_per_second }
.1 });
.checked_add(bytes_up)
.unwrap_or(0);
}
// If we have parent node data, we apply it now
if let Some(parents) = parents {
net_json.add_throughput_cycle(
parents,
(self.bytes_per_second.0, self.bytes_per_second.1),
median_rtt,
)
}
},
);
} }
pub(crate) fn next_cycle(&mut self) { pub(crate) fn next_cycle(&mut self) {
self.cycle += 1; self.cycle += 1;
} }
// pub(crate) fn tick(
// &mut self,
// value_dump: &[(XdpIpAddress, Vec<HostCounter>)],
// rtt: Result<Vec<([u8; 16], RttTrackingEntry)>>,
// ) -> Result<()> {
// // Copy previous byte/packet numbers and reset RTT data
// self.raw_data.iter_mut().for_each(|(_k, v)| {
// if v.first_cycle < self.cycle {
// v.bytes_per_second.0 = u64::checked_sub(v.bytes.0, v.prev_bytes.0).unwrap_or(0);
// v.bytes_per_second.1 = u64::checked_sub(v.bytes.1, v.prev_bytes.1).unwrap_or(0);
// v.packets_per_second.0 =
// u64::checked_sub(v.packets.0, v.prev_packets.0).unwrap_or(0);
// v.packets_per_second.1 =
// u64::checked_sub(v.packets.1, v.prev_packets.1).unwrap_or(0);
// v.prev_bytes = v.bytes;
// v.prev_packets = v.packets;
// }
// // Roll out stale RTT data
// if self.cycle > RETIRE_AFTER_SECONDS
// && v.last_fresh_rtt_data_cycle < self.cycle - RETIRE_AFTER_SECONDS
// {
// v.recent_rtt_data = [0; 60];
// }
// });
// value_dump.iter().for_each(|(xdp_ip, counts)| {
// if let Some(entry) = self.raw_data.get_mut(xdp_ip) {
// entry.bytes = (0, 0);
// entry.packets = (0, 0);
// for c in counts {
// entry.bytes.0 += c.download_bytes;
// entry.bytes.1 += c.upload_bytes;
// entry.packets.0 += c.download_packets;
// entry.packets.1 += c.upload_packets;
// if c.tc_handle != 0 {
// entry.tc_handle = TcHandle::from_u32(c.tc_handle);
// }
// if c.last_seen != 0 {
// entry.last_seen = c.last_seen;
// }
// }
// if entry.packets != entry.prev_packets {
// entry.most_recent_cycle = self.cycle;
// }
// } else {
// let mut entry = ThroughputEntry {
// first_cycle: self.cycle,
// most_recent_cycle: 0,
// bytes: (0, 0),
// packets: (0, 0),
// prev_bytes: (0, 0),
// prev_packets: (0, 0),
// bytes_per_second: (0, 0),
// packets_per_second: (0, 0),
// tc_handle: TcHandle::zero(),
// recent_rtt_data: [0; 60],
// last_fresh_rtt_data_cycle: 0,
// last_seen: 0,
// };
// for c in counts {
// entry.bytes.0 += c.download_bytes;
// entry.bytes.1 += c.upload_bytes;
// entry.packets.0 += c.download_packets;
// entry.packets.1 += c.upload_packets;
// if c.tc_handle != 0 {
// entry.tc_handle = TcHandle::from_u32(c.tc_handle);
// }
// }
// self.raw_data.insert(*xdp_ip, entry);
// }
// });
// // Apply RTT data
// if let Ok(rtt_dump) = rtt {
// for (raw_ip, rtt) in rtt_dump {
// if rtt.has_fresh_data != 0 {
// let ip = XdpIpAddress(raw_ip);
// if let Some(tracker) = self.raw_data.get_mut(&ip) {
// tracker.recent_rtt_data = rtt.rtt;
// tracker.last_fresh_rtt_data_cycle = self.cycle;
// }
// }
// }
// }
// // Update totals
// self.bytes_per_second = (0, 0);
// self.packets_per_second = (0, 0);
// self.shaped_bytes_per_second = (0, 0);
// self.raw_data
// .iter()
// .map(|(_k, v)| {
// (
// v.bytes.0 - v.prev_bytes.0,
// v.bytes.1 - v.prev_bytes.1,
// v.packets.0 - v.prev_packets.0,
// v.packets.1 - v.prev_packets.1,
// v.tc_handle.as_u32() > 0,
// )
// })
// .for_each(|(bytes_down, bytes_up, packets_down, packets_up, shaped)| {
// self.bytes_per_second.0 += bytes_down;
// self.bytes_per_second.1 += bytes_up;
// self.packets_per_second.0 += packets_down;
// self.packets_per_second.1 += packets_up;
// if shaped {
// self.shaped_bytes_per_second.0 += bytes_down;
// self.shaped_bytes_per_second.1 += bytes_up;
// }
// });
// // Onto the next cycle
// self.cycle += 1;
// Ok(())
// }
pub(crate) fn bits_per_second(&self) -> (u64, u64) { pub(crate) fn bits_per_second(&self) -> (u64, u64) {
(self.bytes_per_second.0 * 8, self.bytes_per_second.1 * 8) (self.bytes_per_second.0 * 8, self.bytes_per_second.1 * 8)
} }