Heimdall circuit UI speed (#296)

* Circuit queue - lazy rendering by active tab

Only perform network requests for the active tab on the
circuit_queue.html page. Small to moderate performance
improvement, but it greatly reduces the time spent polling.

* Significantly optimize network and rendering performance of the queues funnel display.

* Substantially improve performance on the flows display by using msgpack with a dictionary.

* Remove some commented code

* Fetch circuit info via efficient msgpack

* Use MsgPack for circuit throughput

* Get rid of the monstrosity that was copying queue data over the bus in a raw JSON string, hideously wasteful. Instead, we now have a 'transit' series of objects in the bus definition for tight encoding. This also cleaned up some node manager code. It's mostly useful for the next stage, which will start to reduce the amount of data we are transmitting.

* A lot of the redundant network transport is culled.

* More unused fields culled.

* Elimate a HUGE amount of garbage collection by allocating and reusing a single object, and cleaning up the JS rendering. Still not good enough.

* Switch to an efficient msgpack transmission format.

* Cleanup handling of 'none' in msgpack

* Fix scale delays to ms

* Commit to send to payne

* Use WebGL for a slight rendering boost.

* Further reduce draw time of circuit page by using redraw commands.

* Finish previous commit

* Use redraw with preallocated/non-GC data for all ringbuffer renders.

* Fix a rare issue with reloading network.json that could cause a stall.

* Optimize RTT graphs with the reload system.
This commit is contained in:
Herbert "TheBracket
2023-03-25 09:37:04 -05:00
committed by GitHub
parent d53200f43d
commit 0cf7d5dd0a
15 changed files with 845 additions and 429 deletions

View File

@@ -5,6 +5,7 @@ mod request;
mod response;
mod session;
mod unix_socket_server;
mod queue_data;
pub use client::bus_request;
use log::error;
pub use persistent_client::BusClient;
@@ -14,6 +15,7 @@ pub use response::BusResponse;
pub use session::BusSession;
use thiserror::Error;
pub use unix_socket_server::UnixSocketServer;
pub use queue_data::*;
/// The local socket path to which `lqosd` will bind itself,
/// listening for requets.

View File

@@ -0,0 +1,103 @@
use serde::{Serialize, Deserialize};
/// Type used for *displaying* the queue store data. It deliberately
/// doesn't include data that we aren't going to display in a GUI.
#[allow(missing_docs)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
pub struct QueueStoreTransit {
pub history: Vec<(CakeDiffTransit, CakeDiffTransit)>,
pub history_head: usize,
//pub prev_download: Option<CakeTransit>,
//pub prev_upload: Option<CakeTransit>,
pub current_download: CakeTransit,
pub current_upload: CakeTransit,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
#[allow(missing_docs)]
pub struct CakeDiffTransit {
pub bytes: u64,
pub packets: u32,
pub qlen: u32,
pub tins: Vec<CakeDiffTinTransit>,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
#[allow(missing_docs)]
pub struct CakeDiffTinTransit {
pub sent_bytes: u64,
pub backlog_bytes: u32,
pub drops: u32,
pub marks: u32,
pub avg_delay_us: u32,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
#[allow(missing_docs)]
pub struct CakeTransit {
//pub handle: TcHandle,
//pub parent: TcHandle,
//pub bytes: u64,
//pub packets: u32,
//pub overlimits: u32,
//pub requeues: u32,
//pub backlog: u32,
//pub qlen: u32,
pub memory_used: u32,
//pub memory_limit: u32,
//pub capacity_estimate: u32,
//pub min_network_size: u16,
//pub max_network_size: u16,
//pub min_adj_size: u16,
//pub max_adj_size: u16,
//pub avg_hdr_offset: u16,
//pub tins: Vec<CakeTinTransit>,
//pub drops: u32,
}
/*
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
#[allow(missing_docs)]
pub struct CakeOptionsTransit {
pub rtt: u64,
pub bandwidth: u8,
pub diffserv: u8,
pub flowmode: u8,
pub ack_filter: u8,
pub nat: bool,
pub wash: bool,
pub ingress: bool,
pub split_gso: bool,
pub raw: bool,
pub overhead: u16,
pub fwmark: TcHandle,
}
// Commented out data is collected but not used
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
#[allow(missing_docs)]
pub struct CakeTinTransit {
//pub threshold_rate: u64,
//pub sent_bytes: u64,
//pub backlog_bytes: u32,
//pub target_us: u32,
//pub interval_us: u32,
//pub peak_delay_us: u32,
//pub avg_delay_us: u32,
//pub base_delay_us: u32,
//pub sent_packets: u32,
//pub way_indirect_hits: u16,
//pub way_misses: u16,
//pub way_collisions: u16,
//pub drops: u32,
//pub ecn_marks: u32,
//pub ack_drops: u32,
//pub sparse_flows: u16,
//pub bulk_flows: u16,
//pub unresponsive_flows: u16,
//pub max_pkt_len: u16,
//pub flow_quantum: u16,
}
*/

View File

@@ -1,6 +1,7 @@
use crate::{IpMapping, IpStats, XdpPpingResult, FlowTransport, ip_stats::PacketHeader};
use serde::{Deserialize, Serialize};
use std::net::IpAddr;
use super::QueueStoreTransit;
/// A `BusResponse` object represents a single
/// reply generated from a `BusRequest`, and batched
@@ -67,7 +68,7 @@ pub enum BusResponse {
/// A string containing a JSON dump of a queue stats. Analagos to
/// the response from `tc show qdisc`.
RawQueueData(String),
RawQueueData(Option<Box<QueueStoreTransit>>),
/// Results from network map queries
NetworkMap(Vec<(usize, lqos_config::NetworkJsonTransport)>),

View File

@@ -12,11 +12,15 @@
#![warn(missing_docs)]
mod bus;
mod ip_stats;
pub use ip_stats::{IpMapping, IpStats, XdpPpingResult, FlowProto, FlowTransport, tos_parser, PacketHeader};
pub use ip_stats::{
tos_parser, FlowProto, FlowTransport, IpMapping, IpStats, PacketHeader,
XdpPpingResult,
};
mod tc_handle;
pub use bus::{
bus_request, decode_request, decode_response, encode_request,
encode_response, BusClient, BusReply, BusRequest, BusResponse, BusSession,
CakeDiffTinTransit, CakeDiffTransit, CakeTransit, QueueStoreTransit,
UnixSocketServer, BUS_SOCKET_PATH,
};
pub use tc_handle::TcHandle;

View File

@@ -179,6 +179,7 @@ impl NetworkJson {
&self,
circuit_id: &str,
) -> Option<Vec<usize>> {
//println!("Looking for parents of {circuit_id}");
self
.nodes
.iter()

View File

@@ -106,7 +106,7 @@ pub async fn node_names(
#[get("/api/funnel_for_queue/<circuit_id>")]
pub async fn funnel_for_queue(
circuit_id: String,
) -> NoCache<Json<Vec<(usize, NetworkJsonTransport)>>> {
) -> NoCache<MsgPack<Vec<(usize, NetworkJsonTransport)>>> {
let mut result = Vec::new();
let target = SHAPED_DEVICES
@@ -127,5 +127,5 @@ pub async fn funnel_for_queue(
result.extend_from_slice(map);
}
}
NoCache::new(Json(result))
NoCache::new(MsgPack(result))
}

View File

@@ -1,12 +1,13 @@
use crate::auth_guard::AuthGuard;
use crate::cache_control::NoCache;
use crate::tracker::SHAPED_DEVICES;
use lqos_bus::{bus_request, BusRequest, BusResponse, FlowTransport, PacketHeader};
use lqos_bus::{bus_request, BusRequest, BusResponse, FlowTransport, PacketHeader, QueueStoreTransit};
use rocket::fs::NamedFile;
use rocket::http::Status;
use rocket::response::content::RawJson;
use rocket::serde::json::Json;
use rocket::serde::Serialize;
use rocket::serde::msgpack::MsgPack;
use std::net::IpAddr;
#[derive(Serialize, Clone)]
@@ -30,7 +31,7 @@ pub async fn watch_circuit(
pub async fn circuit_info(
circuit_id: String,
_auth: AuthGuard,
) -> NoCache<Json<CircuitInfo>> {
) -> NoCache<MsgPack<CircuitInfo>> {
if let Some(device) = SHAPED_DEVICES
.read()
.unwrap()
@@ -45,13 +46,13 @@ pub async fn circuit_info(
device.upload_max_mbps as u64 * 1_000_000,
),
};
NoCache::new(Json(result))
NoCache::new(MsgPack(result))
} else {
let result = CircuitInfo {
name: "Nameless".to_string(),
capacity: (1_000_000, 1_000_000),
};
NoCache::new(Json(result))
NoCache::new(MsgPack(result))
}
}
@@ -59,7 +60,7 @@ pub async fn circuit_info(
pub async fn current_circuit_throughput(
circuit_id: String,
_auth: AuthGuard,
) -> NoCache<Json<Vec<(String, u64, u64)>>> {
) -> NoCache<MsgPack<Vec<(String, u64, u64)>>> {
let mut result = Vec::new();
// Get a list of host counts
// This is really inefficient, but I'm struggling to find a better way.
@@ -84,25 +85,29 @@ pub async fn current_circuit_throughput(
}
}
NoCache::new(Json(result))
NoCache::new(MsgPack(result))
}
#[get("/api/raw_queue_by_circuit/<circuit_id>")]
pub async fn raw_queue_by_circuit(
circuit_id: String,
_auth: AuthGuard,
) -> NoCache<RawJson<String>> {
) -> NoCache<MsgPack<QueueStoreTransit>> {
let responses =
bus_request(vec![BusRequest::GetRawQueueData(circuit_id)]).await.unwrap();
let result = match &responses[0] {
BusResponse::RawQueueData(msg) => msg.clone(),
_ => "Unable to request queue".to_string(),
BusResponse::RawQueueData(Some(msg)) => {
*msg.clone()
}
_ => QueueStoreTransit::default()
};
NoCache::new(RawJson(result))
NoCache::new(MsgPack(result))
}
#[get("/api/flows/<ip_list>")]
pub async fn flow_stats(ip_list: String, _auth: AuthGuard) -> NoCache<Json<Vec<(FlowTransport, Option<FlowTransport>)>>> {
pub async fn flow_stats(ip_list: String, _auth: AuthGuard) -> NoCache<MsgPack<Vec<(FlowTransport, Option<FlowTransport>)>>> {
let mut result = Vec::new();
let request: Vec<BusRequest> = ip_list.split(',').map(|ip| BusRequest::GetFlowStats(ip.to_string())).collect();
let responses = bus_request(request).await.unwrap();
@@ -111,7 +116,7 @@ pub async fn flow_stats(ip_list: String, _auth: AuthGuard) -> NoCache<Json<Vec<(
result.extend_from_slice(flow);
}
}
NoCache::new(Json(result))
NoCache::new(MsgPack(result))
}
#[derive(Serialize, Clone)]

File diff suppressed because it is too large Load Diff

View File

@@ -36,6 +36,49 @@ const IpStats = {
"plan": 6,
}
const FlowTrans = {
"src": 0,
"dst": 1,
"proto": 2,
"src_port": 3,
"dst_port": 4,
"bytes": 5,
"packets": 6,
"dscp": 7,
"ecn": 8
}
const CircuitInfo = {
"name" : 0,
"capacity" : 1,
}
const QD = { // Queue data
"history": 0,
"history_head": 1,
"current_download": 2,
"current_upload": 3,
}
const CT = { // Cake transit
"memory_used": 0,
}
const CDT = { // Cake Diff Transit
"bytes": 0,
"packets": 1,
"qlen": 2,
"tins": 3,
}
const CDTT = { // Cake Diff Tin Transit
"sent_bytes": 0,
"backlog_bytes": 1,
"drops": 2,
"marks": 3,
"avg_delay_us": 4,
}
function metaverse_color_ramp(n) {
if (n <= 9) {
return "#32b08c";
@@ -271,18 +314,23 @@ class MultiRingBuffer {
plotTotalThroughput(target_div) {
let graph = document.getElementById(target_div);
let total = this.data['total'].sortedY();
let shaped = this.data['shaped'].sortedY();
this.data['total'].prepare();
this.data['shaped'].prepare();
let x = this.data['total'].x_axis;
let data = [
{x: x, y:total.down, name: 'Download', type: 'scatter', marker: {color: 'rgb(255,160,122)'}},
{x: x, y:total.up, name: 'Upload', type: 'scatter', marker: {color: 'rgb(255,160,122)'}},
{x: x, y:shaped.down, name: 'Shaped Download', type: 'scatter', fill: 'tozeroy', marker: {color: 'rgb(124,252,0)'}},
{x: x, y:shaped.up, name: 'Shaped Upload', type: 'scatter', fill: 'tozeroy', marker: {color: 'rgb(124,252,0)'}},
let graphData = [
{x: x, y:this.data['total'].sortedY[0], name: 'Download', type: 'scatter', marker: {color: 'rgb(255,160,122)'}},
{x: x, y:this.data['total'].sortedY[1], name: 'Upload', type: 'scatter', marker: {color: 'rgb(255,160,122)'}},
{x: x, y:this.data['shaped'].sortedY[0], name: 'Shaped Download', type: 'scatter', fill: 'tozeroy', marker: {color: 'rgb(124,252,0)'}},
{x: x, y:this.data['shaped'].sortedY[1], name: 'Shaped Upload', type: 'scatter', fill: 'tozeroy', marker: {color: 'rgb(124,252,0)'}},
];
Plotly.newPlot(graph, data, { margin: { l:0,r:0,b:0,t:0,pad:4 }, yaxis: { automargin: true, title: "Traffic (bits)" }, xaxis: {automargin: true, title: "Time since now (seconds)"} }, { responsive: true });
if (this.plotted == null) {
Plotly.newPlot(graph, graphData, { margin: { l:0,r:0,b:0,t:0,pad:4 }, yaxis: { automargin: true, title: "Traffic (bits)" }, xaxis: {automargin: true, title: "Time since now (seconds)"} }, { responsive: true });
this.plotted = true;
} else {
Plotly.redraw(graph, graphData);
}
}
}
@@ -293,10 +341,13 @@ class RingBuffer {
this.download = [];
this.upload = [];
this.x_axis = [];
this.sortedY = [ [], [] ];
for (var i = 0; i < capacity; ++i) {
this.download.push(0.0);
this.upload.push(0.0);
this.x_axis.push(capacity - i);
this.sortedY[0].push(0);
this.sortedY[1].push(0);
}
}
@@ -307,27 +358,25 @@ class RingBuffer {
this.head %= this.capacity;
}
sortedY() {
let result = {
down: [],
up: [],
};
prepare() {
let counter = 0;
for (let i=this.head; i<this.capacity; i++) {
result.down.push(this.download[i]);
result.up.push(this.upload[i]);
this.sortedY[0][counter] = this.download[i];
this.sortedY[1][counter] = this.upload[i];
counter++;
}
for (let i=0; i < this.head; i++) {
result.down.push(this.download[i]);
result.up.push(this.upload[i]);
this.sortedY[0][counter] = this.download[i];
this.sortedY[1][counter] = this.upload[i];
counter++;
}
return result;
}
toScatterGraphData() {
let y = this.sortedY();
this.prepare();
let GraphData = [
{ x: this.x_axis, y: y.down, name: 'Download', type: 'scatter' },
{ x: this.x_axis, y: y.up, name: 'Upload', type: 'scatter' },
{ x: this.x_axis, y: this.sortedY[0], name: 'Download', type: 'scatter' },
{ x: this.x_axis, y: this.sortedY[1], name: 'Upload', type: 'scatter' },
];
return GraphData;
}
@@ -366,7 +415,12 @@ class RttHistogram {
{ x: this.x, y: this.entries, type: 'bar', marker: { color: this.x, colorscale: 'RdBu' } }
]
let graph = document.getElementById(target_div);
Plotly.newPlot(graph, gData, { margin: { l: 40, r: 0, b: 35, t: 0 }, yaxis: { title: "# Hosts" }, xaxis: { title: 'TCP Round-Trip Time (ms)' } }, { responsive: true });
if (this.plotted == null) {
Plotly.newPlot(graph, gData, { margin: { l: 40, r: 0, b: 35, t: 0 }, yaxis: { title: "# Hosts" }, xaxis: { title: 'TCP Round-Trip Time (ms)' } }, { responsive: true });
this.plotted = true;
} else {
Plotly.redraw(graph, gData);
}
}
}

View File

@@ -1,15 +1,15 @@
use crate::{circuit_to_queue::CIRCUIT_TO_QUEUE, still_watching};
use crate::{
circuit_to_queue::CIRCUIT_TO_QUEUE, queue_store::QueueStore, still_watching,
};
use lqos_bus::BusResponse;
pub fn get_raw_circuit_data(circuit_id: &str) -> BusResponse {
still_watching(circuit_id);
if let Some(circuit) = CIRCUIT_TO_QUEUE.get(circuit_id) {
if let Ok(json) = serde_json::to_string(circuit.value()) {
BusResponse::RawQueueData(json)
} else {
BusResponse::RawQueueData(String::new())
}
let cv: QueueStore = circuit.value().clone();
let transit = Box::new(cv.into());
BusResponse::RawQueueData(Some(transit))
} else {
BusResponse::RawQueueData(String::new())
BusResponse::RawQueueData(None)
}
}

View File

@@ -1,11 +1,16 @@
use crate::{
queue_diff::{make_queue_diff, QueueDiff},
queue_types::QueueType,
queue_diff::{make_queue_diff, CakeDiffTin, QueueDiff},
queue_types::{
QueueType,
},
NUM_QUEUE_HISTORY,
};
use lqos_bus::{
CakeDiffTinTransit, CakeDiffTransit, CakeTransit, QueueStoreTransit,
};
use serde::Serialize;
#[derive(Debug, Serialize)]
#[derive(Debug, Serialize, Clone)]
pub struct QueueStore {
history: Vec<(QueueDiff, QueueDiff)>,
history_head: usize,
@@ -50,3 +55,134 @@ impl QueueStore {
}
}
}
// Note: I'm overriding the warning because the "from only" behaviour
// is actually what we want here.
#[allow(clippy::from_over_into)]
impl Into<QueueStoreTransit> for QueueStore {
fn into(self) -> QueueStoreTransit {
QueueStoreTransit {
history: self
.history
.iter()
.cloned()
.map(|(a, b)| (a.into(), b.into()))
.collect(),
history_head: self.history_head,
//prev_download: self.prev_download.map(|d| d.into()),
//prev_upload: self.prev_upload.map(|u| u.into()),
current_download: self.current_download.into(),
current_upload: self.current_upload.into(),
}
}
}
#[allow(clippy::from_over_into)]
impl Into<CakeDiffTransit> for QueueDiff {
fn into(self) -> CakeDiffTransit {
if let QueueDiff::Cake(c) = &self {
CakeDiffTransit {
bytes: c.bytes,
packets: c.packets,
qlen: c.qlen,
tins: c.tins.iter().cloned().map(|t| t.into()).collect(),
}
} else {
CakeDiffTransit::default()
}
}
}
#[allow(clippy::from_over_into)]
impl Into<CakeDiffTinTransit> for CakeDiffTin {
fn into(self) -> CakeDiffTinTransit {
CakeDiffTinTransit {
sent_bytes: self.sent_bytes,
backlog_bytes: self.backlog_bytes,
drops: self.drops,
marks: self.marks,
avg_delay_us: self.avg_delay_us,
}
}
}
#[allow(clippy::from_over_into)]
impl Into<CakeTransit> for QueueType {
fn into(self) -> CakeTransit {
if let QueueType::Cake(c) = self {
CakeTransit {
//handle: c.handle,
//parent: c.parent,
//options: c.options.into(),
//bytes: c.bytes,
//packets: c.packets,
//overlimits: c.overlimits,
//requeues: c.requeues,
//backlog: c.backlog,
//qlen: c.qlen,
memory_used: c.memory_used,
//memory_limit: c.memory_limit,
//capacity_estimate: c.capacity_estimate,
//min_network_size: c.min_network_size,
//max_network_size: c.max_network_size,
//min_adj_size: c.min_adj_size,
//max_adj_size: c.max_adj_size,
//avg_hdr_offset: c.avg_hdr_offset,
//tins: c.tins.iter().cloned().map(|t| t.into()).collect(),
//drops: c.drops,
}
} else {
CakeTransit::default()
}
}
}
/*
#[allow(clippy::from_over_into)]
impl Into<CakeOptionsTransit> for TcCakeOptions {
fn into(self) -> CakeOptionsTransit {
CakeOptionsTransit {
rtt: self.rtt,
bandwidth: self.bandwidth as u8,
diffserv: self.diffserv as u8,
flowmode: self.flowmode as u8,
ack_filter: self.ack_filter as u8,
nat: self.nat,
wash: self.wash,
ingress: self.ingress,
split_gso: self.split_gso,
raw: self.raw,
overhead: self.overhead,
fwmark: self.fwmark,
}
}
}
#[allow(clippy::from_over_into)]
impl Into<CakeTinTransit> for TcCakeTin {
fn into(self) -> CakeTinTransit {
CakeTinTransit {
//threshold_rate: self.threshold_rate,
//sent_bytes: self.sent_bytes,
//backlog_bytes: self.backlog_bytes,
//target_us: self.target_us,
//interval_us: self.interval_us,
//peak_delay_us: self.peak_delay_us,
//avg_delay_us: self.avg_delay_us,
//base_delay_us: self.base_delay_us,
//sent_packets: self.sent_packets,
//way_indirect_hits: self.way_indirect_hits,
//way_misses: self.way_misses,
//way_collisions: self.way_collisions,
//drops: self.drops,
//ecn_marks: self.ecn_marks,
//ack_drops: self.ack_drops,
//sparse_flows: self.sparse_flows,
//bulk_flows: self.bulk_flows,
//unresponsive_flows: self.unresponsive_flows,
//max_pkt_len: self.max_pkt_len,
//flow_quantum: self.flow_quantum,
}
}
}
*/

View File

@@ -1,4 +1,4 @@
mod tc_cake;
pub(crate) mod tc_cake;
mod tc_fq_codel;
mod tc_htb;
mod tc_mq;

View File

@@ -27,63 +27,63 @@ string_table_enum!(BandWidth, unlimited); // in the present implementation with
pub struct TcCake {
pub(crate) handle: TcHandle,
pub(crate) parent: TcHandle,
options: TcCakeOptions,
pub(crate) options: TcCakeOptions,
pub(crate) bytes: u64,
pub(crate) packets: u32,
overlimits: u32,
requeues: u32,
pub(crate) overlimits: u32,
pub(crate) requeues: u32,
pub(crate) backlog: u32,
pub(crate) qlen: u32,
memory_used: u32,
memory_limit: u32,
capacity_estimate: u32,
min_network_size: u16,
max_network_size: u16,
min_adj_size: u16,
max_adj_size: u16,
avg_hdr_offset: u16,
pub(crate) memory_used: u32,
pub(crate) memory_limit: u32,
pub(crate) capacity_estimate: u32,
pub(crate) min_network_size: u16,
pub(crate) max_network_size: u16,
pub(crate) min_adj_size: u16,
pub(crate) max_adj_size: u16,
pub(crate) avg_hdr_offset: u16,
pub(crate) tins: Vec<TcCakeTin>,
pub(crate) drops: u32,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
struct TcCakeOptions {
rtt: u64,
bandwidth: BandWidth,
diffserv: DiffServ,
flowmode: FlowMode,
ack_filter: AckFilter,
nat: bool,
wash: bool,
ingress: bool,
split_gso: bool,
raw: bool,
overhead: u16,
fwmark: TcHandle,
pub(crate) struct TcCakeOptions {
pub(crate) rtt: u64,
pub(crate) bandwidth: BandWidth,
pub(crate) diffserv: DiffServ,
pub(crate) flowmode: FlowMode,
pub(crate) ack_filter: AckFilter,
pub(crate) nat: bool,
pub(crate) wash: bool,
pub(crate) ingress: bool,
pub(crate) split_gso: bool,
pub(crate) raw: bool,
pub(crate) overhead: u16,
pub(crate) fwmark: TcHandle,
}
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct TcCakeTin {
threshold_rate: u64,
pub(crate) struct TcCakeTin {
pub(crate) threshold_rate: u64,
pub(crate) sent_bytes: u64,
pub(crate) backlog_bytes: u32,
target_us: u32,
interval_us: u32,
peak_delay_us: u32,
pub(crate) target_us: u32,
pub(crate) interval_us: u32,
pub(crate) peak_delay_us: u32,
pub(crate) avg_delay_us: u32,
base_delay_us: u32,
sent_packets: u32,
way_indirect_hits: u16,
way_misses: u16,
way_collisions: u16,
pub(crate) base_delay_us: u32,
pub(crate) sent_packets: u32,
pub(crate) way_indirect_hits: u16,
pub(crate) way_misses: u16,
pub(crate) way_collisions: u16,
pub(crate) drops: u32,
pub(crate) ecn_marks: u32,
ack_drops: u32,
sparse_flows: u16,
bulk_flows: u16,
unresponsive_flows: u16,
max_pkt_len: u16,
flow_quantum: u16,
pub(crate) ack_drops: u32,
pub(crate) sparse_flows: u16,
pub(crate) bulk_flows: u16,
pub(crate) unresponsive_flows: u16,
pub(crate) max_pkt_len: u16,
pub(crate) flow_quantum: u16,
}
impl TcCake {

View File

@@ -3,7 +3,7 @@ macro_rules! string_table_enum {
($enum_name: ident, $($option:ident),*) => {
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
#[allow(non_camel_case_types)]
enum $enum_name {
pub(crate) enum $enum_name {
$($option, )*
Unknown
}
@@ -41,7 +41,7 @@ macro_rules! dashy_table_enum {
($enum_name: ident, $($option:ident),*) => {
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
#[allow(non_camel_case_types)]
enum $enum_name {
pub(crate) enum $enum_name {
$($option, )*
Unknown
}

View File

@@ -41,6 +41,9 @@ fn load_network_json() {
if let Ok(njs) = njs {
let mut write_lock = NETWORK_JSON.write().unwrap();
*write_lock = njs;
std::mem::drop(write_lock);
crate::throughput_tracker::THROUGHPUT_TRACKER
.refresh_circuit_ids();
} else {
warn!("Unable to load network.json");
}