2024-02-27 14:54:29 -06:00
|
|
|
use std::{sync::atomic::AtomicU64, time::Duration};
|
2024-03-15 12:15:11 -05:00
|
|
|
use crate::{shaped_devices_tracker::{NETWORK_JSON, SHAPED_DEVICES}, stats::{HIGH_WATERMARK_DOWN, HIGH_WATERMARK_UP}, throughput_tracker::flow_data::{expire_rtt_flows, flowbee_rtt_map}};
|
|
|
|
|
use super::{flow_data::{get_flowbee_event_count_and_reset, FlowAnalysis, FlowbeeLocalData, RttData, ALL_FLOWS}, throughput_entry::ThroughputEntry, RETIRE_AFTER_SECONDS};
|
2023-03-08 17:19:35 +00:00
|
|
|
use dashmap::DashMap;
|
2023-01-11 14:12:40 +00:00
|
|
|
use lqos_bus::TcHandle;
|
2024-03-15 12:15:11 -05:00
|
|
|
use lqos_sys::{flowbee_data::FlowbeeKey, iterate_flows, throughput_for_each};
|
2024-02-27 14:54:29 -06:00
|
|
|
use lqos_utils::{unix_time::time_since_boot, XdpIpAddress};
|
Add Rust source tree to the main repo.
Replaces the previously separate repo, merging the Rust
system into the mainline ready for 1.4.
The Rust system currently provides:
* lqos_bus: type definitions for localhost control bus.
* lqos_config: handler for configuration files.
* lqos_node_manager: local web-based monitor and manager.
* lqos_sys: eBPF program that handles all of the tracking,
CPU assignment, VLAN redirect, TC queue assignment,
and RTT tracking. Wrapped in a Rust core, providing
consistent in-Rust building and mostly-safe wrappers
to C code.
* lqosd: a daemon designed to run continually while
LibreQoS is operating. On start, it parses the configuration
files and sets up interface mapping (the Python code is still
required to actually build queues). It then assigns the various
eBPF programs to appropriate interfaces. The eBPF systems are
removed when the daemon terminates.
lqosd also provides a "bus", listening to requests for changes
or information on localhost, providing a control plane for
the rest of the project.
* lqtop: An example program demonstrating how to use the bus,
acts like "top", showing current network traffic and mappings.
* xdp_iphash_to_cpu_cmdline: a Rust wrapper providing the same
services as the cpumap originated tool of the same name. This is
a "shim" - it will go away once the native Python library is
ready.
* xdp_pping: also a shim, providing equivalent services to the
cpumap service of the same name.
A helper shell script "remove_pinned_maps.sh" can be used to
remove all pinned eBPF maps from the system, allowing for eBPF
program upgrades that change persistent map structures without
a reboot.
Signed-off-by: Herbert Wolverson <herberticus@gmail.com>
2023-01-04 15:09:06 +00:00
|
|
|
|
|
|
|
|
pub struct ThroughputTracker {
|
2023-03-08 17:29:15 +00:00
|
|
|
pub(crate) cycle: AtomicU64,
|
2023-03-08 17:19:35 +00:00
|
|
|
pub(crate) raw_data: DashMap<XdpIpAddress, ThroughputEntry>,
|
2023-03-08 17:48:36 +00:00
|
|
|
pub(crate) bytes_per_second: (AtomicU64, AtomicU64),
|
|
|
|
|
pub(crate) packets_per_second: (AtomicU64, AtomicU64),
|
|
|
|
|
pub(crate) shaped_bytes_per_second: (AtomicU64, AtomicU64),
|
Add Rust source tree to the main repo.
Replaces the previously separate repo, merging the Rust
system into the mainline ready for 1.4.
The Rust system currently provides:
* lqos_bus: type definitions for localhost control bus.
* lqos_config: handler for configuration files.
* lqos_node_manager: local web-based monitor and manager.
* lqos_sys: eBPF program that handles all of the tracking,
CPU assignment, VLAN redirect, TC queue assignment,
and RTT tracking. Wrapped in a Rust core, providing
consistent in-Rust building and mostly-safe wrappers
to C code.
* lqosd: a daemon designed to run continually while
LibreQoS is operating. On start, it parses the configuration
files and sets up interface mapping (the Python code is still
required to actually build queues). It then assigns the various
eBPF programs to appropriate interfaces. The eBPF systems are
removed when the daemon terminates.
lqosd also provides a "bus", listening to requests for changes
or information on localhost, providing a control plane for
the rest of the project.
* lqtop: An example program demonstrating how to use the bus,
acts like "top", showing current network traffic and mappings.
* xdp_iphash_to_cpu_cmdline: a Rust wrapper providing the same
services as the cpumap originated tool of the same name. This is
a "shim" - it will go away once the native Python library is
ready.
* xdp_pping: also a shim, providing equivalent services to the
cpumap service of the same name.
A helper shell script "remove_pinned_maps.sh" can be used to
remove all pinned eBPF maps from the system, allowing for eBPF
program upgrades that change persistent map structures without
a reboot.
Signed-off-by: Herbert Wolverson <herberticus@gmail.com>
2023-01-04 15:09:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl ThroughputTracker {
|
2023-01-23 00:30:31 +00:00
|
|
|
pub(crate) fn new() -> Self {
|
|
|
|
|
// The capacity should match that found in
|
|
|
|
|
// maximums.h (MAX_TRACKED_IPS), so we grab it
|
|
|
|
|
// from there via the C API.
|
|
|
|
|
Self {
|
2023-03-08 17:29:15 +00:00
|
|
|
cycle: AtomicU64::new(RETIRE_AFTER_SECONDS),
|
2023-03-08 17:19:35 +00:00
|
|
|
raw_data: DashMap::with_capacity(lqos_sys::max_tracked_ips()),
|
2023-03-08 17:48:36 +00:00
|
|
|
bytes_per_second: (AtomicU64::new(0), AtomicU64::new(0)),
|
|
|
|
|
packets_per_second: (AtomicU64::new(0), AtomicU64::new(0)),
|
|
|
|
|
shaped_bytes_per_second: (AtomicU64::new(0), AtomicU64::new(0)),
|
Add Rust source tree to the main repo.
Replaces the previously separate repo, merging the Rust
system into the mainline ready for 1.4.
The Rust system currently provides:
* lqos_bus: type definitions for localhost control bus.
* lqos_config: handler for configuration files.
* lqos_node_manager: local web-based monitor and manager.
* lqos_sys: eBPF program that handles all of the tracking,
CPU assignment, VLAN redirect, TC queue assignment,
and RTT tracking. Wrapped in a Rust core, providing
consistent in-Rust building and mostly-safe wrappers
to C code.
* lqosd: a daemon designed to run continually while
LibreQoS is operating. On start, it parses the configuration
files and sets up interface mapping (the Python code is still
required to actually build queues). It then assigns the various
eBPF programs to appropriate interfaces. The eBPF systems are
removed when the daemon terminates.
lqosd also provides a "bus", listening to requests for changes
or information on localhost, providing a control plane for
the rest of the project.
* lqtop: An example program demonstrating how to use the bus,
acts like "top", showing current network traffic and mappings.
* xdp_iphash_to_cpu_cmdline: a Rust wrapper providing the same
services as the cpumap originated tool of the same name. This is
a "shim" - it will go away once the native Python library is
ready.
* xdp_pping: also a shim, providing equivalent services to the
cpumap service of the same name.
A helper shell script "remove_pinned_maps.sh" can be used to
remove all pinned eBPF maps from the system, allowing for eBPF
program upgrades that change persistent map structures without
a reboot.
Signed-off-by: Herbert Wolverson <herberticus@gmail.com>
2023-01-04 15:09:06 +00:00
|
|
|
}
|
2023-01-23 00:30:31 +00:00
|
|
|
}
|
Add Rust source tree to the main repo.
Replaces the previously separate repo, merging the Rust
system into the mainline ready for 1.4.
The Rust system currently provides:
* lqos_bus: type definitions for localhost control bus.
* lqos_config: handler for configuration files.
* lqos_node_manager: local web-based monitor and manager.
* lqos_sys: eBPF program that handles all of the tracking,
CPU assignment, VLAN redirect, TC queue assignment,
and RTT tracking. Wrapped in a Rust core, providing
consistent in-Rust building and mostly-safe wrappers
to C code.
* lqosd: a daemon designed to run continually while
LibreQoS is operating. On start, it parses the configuration
files and sets up interface mapping (the Python code is still
required to actually build queues). It then assigns the various
eBPF programs to appropriate interfaces. The eBPF systems are
removed when the daemon terminates.
lqosd also provides a "bus", listening to requests for changes
or information on localhost, providing a control plane for
the rest of the project.
* lqtop: An example program demonstrating how to use the bus,
acts like "top", showing current network traffic and mappings.
* xdp_iphash_to_cpu_cmdline: a Rust wrapper providing the same
services as the cpumap originated tool of the same name. This is
a "shim" - it will go away once the native Python library is
ready.
* xdp_pping: also a shim, providing equivalent services to the
cpumap service of the same name.
A helper shell script "remove_pinned_maps.sh" can be used to
remove all pinned eBPF maps from the system, allowing for eBPF
program upgrades that change persistent map structures without
a reboot.
Signed-off-by: Herbert Wolverson <herberticus@gmail.com>
2023-01-04 15:09:06 +00:00
|
|
|
|
2023-03-08 17:48:36 +00:00
|
|
|
pub(crate) fn copy_previous_and_reset_rtt(&self) {
|
2023-01-23 00:30:31 +00:00
|
|
|
// Copy previous byte/packet numbers and reset RTT data
|
2023-03-08 17:29:15 +00:00
|
|
|
let self_cycle = self.cycle.load(std::sync::atomic::Ordering::Relaxed);
|
2023-03-08 17:19:35 +00:00
|
|
|
self.raw_data.iter_mut().for_each(|mut v| {
|
2023-03-08 17:29:15 +00:00
|
|
|
if v.first_cycle < self_cycle {
|
2023-01-23 00:30:31 +00:00
|
|
|
v.bytes_per_second.0 =
|
|
|
|
|
u64::checked_sub(v.bytes.0, v.prev_bytes.0).unwrap_or(0);
|
|
|
|
|
v.bytes_per_second.1 =
|
|
|
|
|
u64::checked_sub(v.bytes.1, v.prev_bytes.1).unwrap_or(0);
|
|
|
|
|
v.packets_per_second.0 =
|
|
|
|
|
u64::checked_sub(v.packets.0, v.prev_packets.0).unwrap_or(0);
|
|
|
|
|
v.packets_per_second.1 =
|
|
|
|
|
u64::checked_sub(v.packets.1, v.prev_packets.1).unwrap_or(0);
|
|
|
|
|
}
|
2023-07-12 13:44:21 +00:00
|
|
|
v.prev_bytes = v.bytes;
|
|
|
|
|
v.prev_packets = v.packets;
|
|
|
|
|
|
2023-01-23 00:30:31 +00:00
|
|
|
// Roll out stale RTT data
|
2023-03-08 17:29:15 +00:00
|
|
|
if self_cycle > RETIRE_AFTER_SECONDS
|
|
|
|
|
&& v.last_fresh_rtt_data_cycle < self_cycle - RETIRE_AFTER_SECONDS
|
2023-01-23 00:30:31 +00:00
|
|
|
{
|
|
|
|
|
v.recent_rtt_data = [0; 60];
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
}
|
Add Rust source tree to the main repo.
Replaces the previously separate repo, merging the Rust
system into the mainline ready for 1.4.
The Rust system currently provides:
* lqos_bus: type definitions for localhost control bus.
* lqos_config: handler for configuration files.
* lqos_node_manager: local web-based monitor and manager.
* lqos_sys: eBPF program that handles all of the tracking,
CPU assignment, VLAN redirect, TC queue assignment,
and RTT tracking. Wrapped in a Rust core, providing
consistent in-Rust building and mostly-safe wrappers
to C code.
* lqosd: a daemon designed to run continually while
LibreQoS is operating. On start, it parses the configuration
files and sets up interface mapping (the Python code is still
required to actually build queues). It then assigns the various
eBPF programs to appropriate interfaces. The eBPF systems are
removed when the daemon terminates.
lqosd also provides a "bus", listening to requests for changes
or information on localhost, providing a control plane for
the rest of the project.
* lqtop: An example program demonstrating how to use the bus,
acts like "top", showing current network traffic and mappings.
* xdp_iphash_to_cpu_cmdline: a Rust wrapper providing the same
services as the cpumap originated tool of the same name. This is
a "shim" - it will go away once the native Python library is
ready.
* xdp_pping: also a shim, providing equivalent services to the
cpumap service of the same name.
A helper shell script "remove_pinned_maps.sh" can be used to
remove all pinned eBPF maps from the system, allowing for eBPF
program upgrades that change persistent map structures without
a reboot.
Signed-off-by: Herbert Wolverson <herberticus@gmail.com>
2023-01-04 15:09:06 +00:00
|
|
|
|
2023-03-03 21:42:22 +00:00
|
|
|
fn lookup_circuit_id(xdp_ip: &XdpIpAddress) -> Option<String> {
|
|
|
|
|
let mut circuit_id = None;
|
|
|
|
|
let lookup = xdp_ip.as_ipv6();
|
2023-03-07 21:37:23 +00:00
|
|
|
let cfg = SHAPED_DEVICES.read().unwrap();
|
2023-03-03 21:42:22 +00:00
|
|
|
if let Some((_, id)) = cfg.trie.longest_match(lookup) {
|
|
|
|
|
circuit_id = Some(cfg.devices[*id].circuit_id.clone());
|
|
|
|
|
}
|
|
|
|
|
//println!("{lookup:?} Found circuit_id: {circuit_id:?}");
|
|
|
|
|
circuit_id
|
|
|
|
|
}
|
|
|
|
|
|
2023-03-04 16:58:17 +00:00
|
|
|
pub(crate) fn get_node_name_for_circuit_id(
|
|
|
|
|
circuit_id: Option<String>,
|
|
|
|
|
) -> Option<String> {
|
|
|
|
|
if let Some(circuit_id) = circuit_id {
|
2023-03-07 21:37:23 +00:00
|
|
|
let shaped = SHAPED_DEVICES.read().unwrap();
|
|
|
|
|
let parent_name = shaped
|
2023-03-04 16:58:17 +00:00
|
|
|
.devices
|
|
|
|
|
.iter()
|
|
|
|
|
.find(|d| d.circuit_id == circuit_id)
|
2023-03-07 21:37:23 +00:00
|
|
|
.map(|device| device.parent_node.clone());
|
|
|
|
|
//println!("{parent_name:?}");
|
|
|
|
|
parent_name
|
2023-03-04 16:58:17 +00:00
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub(crate) fn lookup_network_parents(
|
|
|
|
|
circuit_id: Option<String>,
|
|
|
|
|
) -> Option<Vec<usize>> {
|
|
|
|
|
if let Some(parent) = Self::get_node_name_for_circuit_id(circuit_id) {
|
2023-03-07 21:37:23 +00:00
|
|
|
let lock = crate::shaped_devices_tracker::NETWORK_JSON.read().unwrap();
|
2023-03-04 16:58:17 +00:00
|
|
|
lock.get_parents_for_circuit_id(&parent)
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-03-08 17:48:36 +00:00
|
|
|
pub(crate) fn refresh_circuit_ids(&self) {
|
2023-03-08 17:19:35 +00:00
|
|
|
self.raw_data.iter_mut().for_each(|mut data| {
|
|
|
|
|
data.circuit_id = Self::lookup_circuit_id(data.key());
|
2023-03-04 16:58:17 +00:00
|
|
|
data.network_json_parents =
|
|
|
|
|
Self::lookup_network_parents(data.circuit_id.clone());
|
2023-03-03 21:42:22 +00:00
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
2023-03-07 21:37:23 +00:00
|
|
|
pub(crate) fn apply_new_throughput_counters(
|
2023-03-08 17:48:36 +00:00
|
|
|
&self,
|
2023-03-07 21:37:23 +00:00
|
|
|
) {
|
2023-03-08 17:48:36 +00:00
|
|
|
let raw_data = &self.raw_data;
|
2023-03-08 17:29:15 +00:00
|
|
|
let self_cycle = self.cycle.load(std::sync::atomic::Ordering::Relaxed);
|
2023-01-23 00:30:31 +00:00
|
|
|
throughput_for_each(&mut |xdp_ip, counts| {
|
2023-03-08 17:19:35 +00:00
|
|
|
if let Some(mut entry) = raw_data.get_mut(xdp_ip) {
|
2023-01-23 00:30:31 +00:00
|
|
|
entry.bytes = (0, 0);
|
|
|
|
|
entry.packets = (0, 0);
|
2023-04-18 20:37:49 +00:00
|
|
|
for c in counts {
|
|
|
|
|
entry.bytes.0 += c.download_bytes;
|
|
|
|
|
entry.bytes.1 += c.upload_bytes;
|
|
|
|
|
entry.packets.0 += c.download_packets;
|
|
|
|
|
entry.packets.1 += c.upload_packets;
|
|
|
|
|
if c.tc_handle != 0 {
|
|
|
|
|
entry.tc_handle = TcHandle::from_u32(c.tc_handle);
|
2023-01-23 00:30:31 +00:00
|
|
|
}
|
2023-04-18 20:37:49 +00:00
|
|
|
if c.last_seen != 0 {
|
|
|
|
|
entry.last_seen = c.last_seen;
|
2023-01-23 00:30:31 +00:00
|
|
|
}
|
2023-04-18 20:37:49 +00:00
|
|
|
}
|
2023-01-23 00:30:31 +00:00
|
|
|
if entry.packets != entry.prev_packets {
|
2023-03-08 17:29:15 +00:00
|
|
|
entry.most_recent_cycle = self_cycle;
|
2023-03-07 21:37:23 +00:00
|
|
|
|
|
|
|
|
if let Some(parents) = &entry.network_json_parents {
|
2023-03-08 15:58:52 +00:00
|
|
|
let net_json = NETWORK_JSON.read().unwrap();
|
2023-03-07 21:37:23 +00:00
|
|
|
net_json.add_throughput_cycle(
|
|
|
|
|
parents,
|
|
|
|
|
(
|
2023-04-18 20:37:49 +00:00
|
|
|
entry.bytes.0.saturating_sub(entry.prev_bytes.0),
|
|
|
|
|
entry.bytes.1.saturating_sub(entry.prev_bytes.1),
|
2023-03-07 21:37:23 +00:00
|
|
|
),
|
|
|
|
|
);
|
|
|
|
|
}
|
2023-01-23 00:30:31 +00:00
|
|
|
}
|
|
|
|
|
} else {
|
2023-03-04 16:58:17 +00:00
|
|
|
let circuit_id = Self::lookup_circuit_id(xdp_ip);
|
2023-01-23 00:30:31 +00:00
|
|
|
let mut entry = ThroughputEntry {
|
2023-03-04 16:58:17 +00:00
|
|
|
circuit_id: circuit_id.clone(),
|
|
|
|
|
network_json_parents: Self::lookup_network_parents(circuit_id),
|
2023-03-08 17:29:15 +00:00
|
|
|
first_cycle: self_cycle,
|
2023-01-23 00:30:31 +00:00
|
|
|
most_recent_cycle: 0,
|
|
|
|
|
bytes: (0, 0),
|
|
|
|
|
packets: (0, 0),
|
|
|
|
|
prev_bytes: (0, 0),
|
|
|
|
|
prev_packets: (0, 0),
|
|
|
|
|
bytes_per_second: (0, 0),
|
|
|
|
|
packets_per_second: (0, 0),
|
|
|
|
|
tc_handle: TcHandle::zero(),
|
|
|
|
|
recent_rtt_data: [0; 60],
|
|
|
|
|
last_fresh_rtt_data_cycle: 0,
|
|
|
|
|
last_seen: 0,
|
|
|
|
|
};
|
2023-04-18 20:37:49 +00:00
|
|
|
for c in counts {
|
2023-07-12 13:44:21 +00:00
|
|
|
entry.bytes.0 += c.download_bytes;
|
|
|
|
|
entry.bytes.1 += c.upload_bytes;
|
|
|
|
|
entry.packets.0 += c.download_packets;
|
|
|
|
|
entry.packets.1 += c.upload_packets;
|
2023-04-18 20:37:49 +00:00
|
|
|
if c.tc_handle != 0 {
|
|
|
|
|
entry.tc_handle = TcHandle::from_u32(c.tc_handle);
|
2023-01-23 00:30:31 +00:00
|
|
|
}
|
2023-04-18 20:37:49 +00:00
|
|
|
}
|
2023-01-23 00:30:31 +00:00
|
|
|
raw_data.insert(*xdp_ip, entry);
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
}
|
Add Rust source tree to the main repo.
Replaces the previously separate repo, merging the Rust
system into the mainline ready for 1.4.
The Rust system currently provides:
* lqos_bus: type definitions for localhost control bus.
* lqos_config: handler for configuration files.
* lqos_node_manager: local web-based monitor and manager.
* lqos_sys: eBPF program that handles all of the tracking,
CPU assignment, VLAN redirect, TC queue assignment,
and RTT tracking. Wrapped in a Rust core, providing
consistent in-Rust building and mostly-safe wrappers
to C code.
* lqosd: a daemon designed to run continually while
LibreQoS is operating. On start, it parses the configuration
files and sets up interface mapping (the Python code is still
required to actually build queues). It then assigns the various
eBPF programs to appropriate interfaces. The eBPF systems are
removed when the daemon terminates.
lqosd also provides a "bus", listening to requests for changes
or information on localhost, providing a control plane for
the rest of the project.
* lqtop: An example program demonstrating how to use the bus,
acts like "top", showing current network traffic and mappings.
* xdp_iphash_to_cpu_cmdline: a Rust wrapper providing the same
services as the cpumap originated tool of the same name. This is
a "shim" - it will go away once the native Python library is
ready.
* xdp_pping: also a shim, providing equivalent services to the
cpumap service of the same name.
A helper shell script "remove_pinned_maps.sh" can be used to
remove all pinned eBPF maps from the system, allowing for eBPF
program upgrades that change persistent map structures without
a reboot.
Signed-off-by: Herbert Wolverson <herberticus@gmail.com>
2023-01-04 15:09:06 +00:00
|
|
|
|
2024-03-05 08:44:57 -06:00
|
|
|
pub(crate) fn apply_flow_data(
|
|
|
|
|
&self,
|
|
|
|
|
timeout_seconds: u64,
|
2024-03-12 15:19:07 -05:00
|
|
|
_netflow_enabled: bool,
|
2024-03-15 12:15:11 -05:00
|
|
|
sender: std::sync::mpsc::Sender<(FlowbeeKey, (FlowbeeLocalData, FlowAnalysis))>,
|
2024-03-05 08:44:57 -06:00
|
|
|
) {
|
2024-03-15 12:15:11 -05:00
|
|
|
//log::debug!("Flowbee events this second: {}", get_flowbee_event_count_and_reset());
|
2023-03-08 17:29:15 +00:00
|
|
|
let self_cycle = self.cycle.load(std::sync::atomic::Ordering::Relaxed);
|
2024-02-27 14:06:13 -06:00
|
|
|
|
2024-02-27 14:54:29 -06:00
|
|
|
if let Ok(now) = time_since_boot() {
|
2024-03-15 12:15:11 -05:00
|
|
|
let rtt_samples = flowbee_rtt_map();
|
|
|
|
|
get_flowbee_event_count_and_reset();
|
2024-02-27 14:54:29 -06:00
|
|
|
let since_boot = Duration::from(now);
|
2024-03-05 08:44:57 -06:00
|
|
|
let expire = (since_boot - Duration::from_secs(timeout_seconds)).as_nanos() as u64;
|
|
|
|
|
|
|
|
|
|
// Track the expired keys
|
|
|
|
|
let mut expired_keys = Vec::new();
|
2024-03-12 14:20:35 -05:00
|
|
|
|
2024-03-15 12:15:11 -05:00
|
|
|
let mut all_flows_lock = ALL_FLOWS.lock().unwrap();
|
2024-03-05 08:44:57 -06:00
|
|
|
|
2024-03-08 09:32:15 -06:00
|
|
|
// Track through all the flows
|
|
|
|
|
iterate_flows(&mut |key, data| {
|
2024-03-05 08:44:57 -06:00
|
|
|
|
2024-03-12 08:57:29 -05:00
|
|
|
if data.end_status == 3 {
|
2024-03-12 08:21:33 -05:00
|
|
|
// The flow has been handled already and should be ignored.
|
2024-03-12 10:29:08 -05:00
|
|
|
// DO NOT process it again.
|
2024-03-12 11:38:19 -05:00
|
|
|
} else if data.last_seen < expire {
|
2024-03-12 10:29:08 -05:00
|
|
|
// This flow has expired but not been handled yet. Add it to the list to be cleaned.
|
2024-03-12 08:21:33 -05:00
|
|
|
expired_keys.push(key.clone());
|
2024-03-08 09:32:15 -06:00
|
|
|
} else {
|
|
|
|
|
// We have a valid flow, so it needs to be tracked
|
2024-03-15 12:15:11 -05:00
|
|
|
if let Some(this_flow) = all_flows_lock.get_mut(&key) {
|
2024-03-08 13:15:49 -06:00
|
|
|
this_flow.0.last_seen = data.last_seen;
|
|
|
|
|
this_flow.0.bytes_sent = data.bytes_sent;
|
|
|
|
|
this_flow.0.packets_sent = data.packets_sent;
|
|
|
|
|
this_flow.0.rate_estimate_bps = data.rate_estimate_bps;
|
2024-03-13 08:20:36 -05:00
|
|
|
this_flow.0.tcp_retransmits = data.tcp_retransmits;
|
2024-03-08 13:15:49 -06:00
|
|
|
this_flow.0.end_status = data.end_status;
|
|
|
|
|
this_flow.0.tos = data.tos;
|
2024-03-15 12:15:11 -05:00
|
|
|
this_flow.0.flags = data.flags;
|
2024-03-15 12:29:47 -05:00
|
|
|
let rtt = rtt_samples.get(&key).copied().unwrap_or([RttData::from_nanos(0); 2]);
|
|
|
|
|
this_flow.0.rtt = rtt;
|
2024-03-08 12:11:37 -06:00
|
|
|
} else {
|
|
|
|
|
// Insert it into the map
|
2024-03-08 14:52:02 -06:00
|
|
|
let flow_analysis = FlowAnalysis::new(&key);
|
2024-03-15 12:15:11 -05:00
|
|
|
all_flows_lock.insert(key.clone(), (data.into(), flow_analysis));
|
2024-03-08 12:11:37 -06:00
|
|
|
}
|
2024-03-08 09:32:15 -06:00
|
|
|
|
|
|
|
|
// TCP - we have RTT data? 6 is TCP
|
2024-03-14 14:07:06 -05:00
|
|
|
if key.ip_protocol == 6 && data.end_status == 0 {
|
2024-03-08 09:32:15 -06:00
|
|
|
if let Some(mut tracker) = self.raw_data.get_mut(&key.local_ip) {
|
2024-03-15 12:15:11 -05:00
|
|
|
if let Some(rtt) = rtt_samples.get(&key) {
|
2024-03-15 12:29:47 -05:00
|
|
|
for i in 0..2 {
|
|
|
|
|
if rtt[i].as_nanos() > 0 {
|
|
|
|
|
// Shift left
|
|
|
|
|
for i in 1..60 {
|
|
|
|
|
tracker.recent_rtt_data[i] = tracker.recent_rtt_data[i - 1];
|
|
|
|
|
}
|
2024-03-15 12:55:23 -05:00
|
|
|
tracker.recent_rtt_data[0] = rtt[i].as_millis() as u32;
|
2024-03-15 12:29:47 -05:00
|
|
|
tracker.last_fresh_rtt_data_cycle = self_cycle;
|
|
|
|
|
if let Some(parents) = &tracker.network_json_parents {
|
|
|
|
|
let net_json = NETWORK_JSON.write().unwrap();
|
|
|
|
|
if let Some(rtt) = tracker.median_latency() {
|
|
|
|
|
net_json.add_rtt_cycle(parents, rtt);
|
|
|
|
|
}
|
2024-03-14 14:07:06 -05:00
|
|
|
}
|
|
|
|
|
}
|
2024-02-27 16:22:22 -06:00
|
|
|
}
|
2024-03-15 12:15:11 -05:00
|
|
|
}
|
2024-03-12 08:57:29 -05:00
|
|
|
|
2024-03-14 14:07:06 -05:00
|
|
|
if data.end_status != 0 {
|
|
|
|
|
// The flow has ended. We need to remove it from the map.
|
|
|
|
|
expired_keys.push(key.clone());
|
|
|
|
|
}
|
2024-03-12 10:29:08 -05:00
|
|
|
}
|
2024-03-12 08:57:29 -05:00
|
|
|
}
|
2024-03-08 09:32:15 -06:00
|
|
|
}
|
|
|
|
|
}); // End flow iterator
|
2024-03-05 08:44:57 -06:00
|
|
|
|
2024-03-08 09:32:15 -06:00
|
|
|
if !expired_keys.is_empty() {
|
2024-03-12 10:29:08 -05:00
|
|
|
for key in expired_keys.iter() {
|
2024-03-09 10:29:49 -06:00
|
|
|
// Send it off to netperf for analysis if we are supporting doing so.
|
2024-03-15 12:15:11 -05:00
|
|
|
if let Some(d) = all_flows_lock.get(&key) {
|
2024-03-12 15:19:07 -05:00
|
|
|
let _ = sender.send((key.clone(), (d.0.clone(), d.1.clone())));
|
2024-03-09 10:29:49 -06:00
|
|
|
}
|
2024-03-12 08:57:29 -05:00
|
|
|
// Remove the flow from circulation
|
2024-03-15 12:15:11 -05:00
|
|
|
all_flows_lock.remove(&key);
|
2024-03-05 08:44:57 -06:00
|
|
|
}
|
2024-03-12 10:29:08 -05:00
|
|
|
|
|
|
|
|
let ret = lqos_sys::end_flows(&mut expired_keys);
|
|
|
|
|
if let Err(e) = ret {
|
|
|
|
|
log::warn!("Failed to end flows: {:?}", e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Cleaning run
|
2024-03-15 12:15:11 -05:00
|
|
|
all_flows_lock.retain(|_k,v| v.0.last_seen >= expire);
|
|
|
|
|
expire_rtt_flows();
|
2024-02-27 14:54:29 -06:00
|
|
|
}
|
2023-01-23 00:30:31 +00:00
|
|
|
}
|
Add Rust source tree to the main repo.
Replaces the previously separate repo, merging the Rust
system into the mainline ready for 1.4.
The Rust system currently provides:
* lqos_bus: type definitions for localhost control bus.
* lqos_config: handler for configuration files.
* lqos_node_manager: local web-based monitor and manager.
* lqos_sys: eBPF program that handles all of the tracking,
CPU assignment, VLAN redirect, TC queue assignment,
and RTT tracking. Wrapped in a Rust core, providing
consistent in-Rust building and mostly-safe wrappers
to C code.
* lqosd: a daemon designed to run continually while
LibreQoS is operating. On start, it parses the configuration
files and sets up interface mapping (the Python code is still
required to actually build queues). It then assigns the various
eBPF programs to appropriate interfaces. The eBPF systems are
removed when the daemon terminates.
lqosd also provides a "bus", listening to requests for changes
or information on localhost, providing a control plane for
the rest of the project.
* lqtop: An example program demonstrating how to use the bus,
acts like "top", showing current network traffic and mappings.
* xdp_iphash_to_cpu_cmdline: a Rust wrapper providing the same
services as the cpumap originated tool of the same name. This is
a "shim" - it will go away once the native Python library is
ready.
* xdp_pping: also a shim, providing equivalent services to the
cpumap service of the same name.
A helper shell script "remove_pinned_maps.sh" can be used to
remove all pinned eBPF maps from the system, allowing for eBPF
program upgrades that change persistent map structures without
a reboot.
Signed-off-by: Herbert Wolverson <herberticus@gmail.com>
2023-01-04 15:09:06 +00:00
|
|
|
|
2023-03-08 17:48:36 +00:00
|
|
|
#[inline(always)]
|
|
|
|
|
fn set_atomic_tuple_to_zero(tuple: &(AtomicU64, AtomicU64)) {
|
|
|
|
|
tuple.0.store(0, std::sync::atomic::Ordering::Relaxed);
|
|
|
|
|
tuple.1.store(0, std::sync::atomic::Ordering::Relaxed);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline(always)]
|
|
|
|
|
fn add_atomic_tuple(tuple: &(AtomicU64, AtomicU64), n: (u64, u64)) {
|
2023-07-11 16:20:41 +00:00
|
|
|
let n0 = tuple.0.load(std::sync::atomic::Ordering::Relaxed);
|
2023-07-11 14:46:27 +00:00
|
|
|
if let Some(n) = n0.checked_add(n.0) {
|
|
|
|
|
tuple.0.store(n, std::sync::atomic::Ordering::Relaxed);
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-11 16:20:41 +00:00
|
|
|
let n1 = tuple.1.load(std::sync::atomic::Ordering::Relaxed);
|
2023-07-11 14:46:27 +00:00
|
|
|
if let Some(n) = n1.checked_add(n.1) {
|
|
|
|
|
tuple.1.store(n, std::sync::atomic::Ordering::Relaxed);
|
|
|
|
|
}
|
2023-03-08 17:48:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub(crate) fn update_totals(&self) {
|
2023-07-11 18:36:21 +00:00
|
|
|
let current_cycle = self.cycle.load(std::sync::atomic::Ordering::Relaxed);
|
2023-03-08 17:48:36 +00:00
|
|
|
Self::set_atomic_tuple_to_zero(&self.bytes_per_second);
|
|
|
|
|
Self::set_atomic_tuple_to_zero(&self.packets_per_second);
|
|
|
|
|
Self::set_atomic_tuple_to_zero(&self.shaped_bytes_per_second);
|
2023-01-23 00:30:31 +00:00
|
|
|
self
|
2023-02-14 22:06:57 +00:00
|
|
|
.raw_data
|
2023-03-08 17:19:35 +00:00
|
|
|
.iter()
|
2023-07-11 18:36:21 +00:00
|
|
|
.filter(|v|
|
|
|
|
|
v.most_recent_cycle == current_cycle &&
|
|
|
|
|
v.first_cycle + 2 < current_cycle
|
|
|
|
|
)
|
2023-02-14 22:06:57 +00:00
|
|
|
.map(|v| {
|
2023-01-23 00:30:31 +00:00
|
|
|
(
|
2023-02-01 20:00:32 +00:00
|
|
|
v.bytes.0.saturating_sub(v.prev_bytes.0),
|
|
|
|
|
v.bytes.1.saturating_sub(v.prev_bytes.1),
|
|
|
|
|
v.packets.0.saturating_sub(v.prev_packets.0),
|
|
|
|
|
v.packets.1.saturating_sub(v.prev_packets.1),
|
2023-01-23 00:30:31 +00:00
|
|
|
v.tc_handle.as_u32() > 0,
|
|
|
|
|
)
|
|
|
|
|
})
|
2023-03-07 21:37:23 +00:00
|
|
|
.for_each(|(bytes_down, bytes_up, packets_down, packets_up, shaped)| {
|
2023-03-08 17:48:36 +00:00
|
|
|
Self::add_atomic_tuple(&self.bytes_per_second, (bytes_down, bytes_up));
|
|
|
|
|
Self::add_atomic_tuple(&self.packets_per_second, (packets_down, packets_up));
|
2023-03-07 21:37:23 +00:00
|
|
|
if shaped {
|
2023-03-08 17:48:36 +00:00
|
|
|
Self::add_atomic_tuple(&self.shaped_bytes_per_second, (bytes_down, bytes_up));
|
2023-03-07 21:37:23 +00:00
|
|
|
}
|
|
|
|
|
});
|
2023-03-21 17:22:48 +00:00
|
|
|
|
|
|
|
|
let current = self.bits_per_second();
|
|
|
|
|
if current.0 < 100000000000 && current.1 < 100000000000 {
|
|
|
|
|
let prev_max = (
|
|
|
|
|
HIGH_WATERMARK_DOWN.load(std::sync::atomic::Ordering::Relaxed),
|
|
|
|
|
HIGH_WATERMARK_UP.load(std::sync::atomic::Ordering::Relaxed),
|
|
|
|
|
);
|
|
|
|
|
if current.0 > prev_max.0 {
|
|
|
|
|
HIGH_WATERMARK_DOWN.store(current.0, std::sync::atomic::Ordering::Relaxed);
|
|
|
|
|
}
|
|
|
|
|
if current.1 > prev_max.1 {
|
|
|
|
|
HIGH_WATERMARK_UP.store(current.1, std::sync::atomic::Ordering::Relaxed);
|
|
|
|
|
}
|
|
|
|
|
}
|
2023-01-23 00:30:31 +00:00
|
|
|
}
|
Add Rust source tree to the main repo.
Replaces the previously separate repo, merging the Rust
system into the mainline ready for 1.4.
The Rust system currently provides:
* lqos_bus: type definitions for localhost control bus.
* lqos_config: handler for configuration files.
* lqos_node_manager: local web-based monitor and manager.
* lqos_sys: eBPF program that handles all of the tracking,
CPU assignment, VLAN redirect, TC queue assignment,
and RTT tracking. Wrapped in a Rust core, providing
consistent in-Rust building and mostly-safe wrappers
to C code.
* lqosd: a daemon designed to run continually while
LibreQoS is operating. On start, it parses the configuration
files and sets up interface mapping (the Python code is still
required to actually build queues). It then assigns the various
eBPF programs to appropriate interfaces. The eBPF systems are
removed when the daemon terminates.
lqosd also provides a "bus", listening to requests for changes
or information on localhost, providing a control plane for
the rest of the project.
* lqtop: An example program demonstrating how to use the bus,
acts like "top", showing current network traffic and mappings.
* xdp_iphash_to_cpu_cmdline: a Rust wrapper providing the same
services as the cpumap originated tool of the same name. This is
a "shim" - it will go away once the native Python library is
ready.
* xdp_pping: also a shim, providing equivalent services to the
cpumap service of the same name.
A helper shell script "remove_pinned_maps.sh" can be used to
remove all pinned eBPF maps from the system, allowing for eBPF
program upgrades that change persistent map structures without
a reboot.
Signed-off-by: Herbert Wolverson <herberticus@gmail.com>
2023-01-04 15:09:06 +00:00
|
|
|
|
2023-03-08 17:29:15 +00:00
|
|
|
pub(crate) fn next_cycle(&self) {
|
|
|
|
|
self.cycle.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
2023-01-23 00:30:31 +00:00
|
|
|
}
|
Add Rust source tree to the main repo.
Replaces the previously separate repo, merging the Rust
system into the mainline ready for 1.4.
The Rust system currently provides:
* lqos_bus: type definitions for localhost control bus.
* lqos_config: handler for configuration files.
* lqos_node_manager: local web-based monitor and manager.
* lqos_sys: eBPF program that handles all of the tracking,
CPU assignment, VLAN redirect, TC queue assignment,
and RTT tracking. Wrapped in a Rust core, providing
consistent in-Rust building and mostly-safe wrappers
to C code.
* lqosd: a daemon designed to run continually while
LibreQoS is operating. On start, it parses the configuration
files and sets up interface mapping (the Python code is still
required to actually build queues). It then assigns the various
eBPF programs to appropriate interfaces. The eBPF systems are
removed when the daemon terminates.
lqosd also provides a "bus", listening to requests for changes
or information on localhost, providing a control plane for
the rest of the project.
* lqtop: An example program demonstrating how to use the bus,
acts like "top", showing current network traffic and mappings.
* xdp_iphash_to_cpu_cmdline: a Rust wrapper providing the same
services as the cpumap originated tool of the same name. This is
a "shim" - it will go away once the native Python library is
ready.
* xdp_pping: also a shim, providing equivalent services to the
cpumap service of the same name.
A helper shell script "remove_pinned_maps.sh" can be used to
remove all pinned eBPF maps from the system, allowing for eBPF
program upgrades that change persistent map structures without
a reboot.
Signed-off-by: Herbert Wolverson <herberticus@gmail.com>
2023-01-04 15:09:06 +00:00
|
|
|
|
2023-01-23 00:30:31 +00:00
|
|
|
pub(crate) fn bits_per_second(&self) -> (u64, u64) {
|
2023-03-08 17:48:36 +00:00
|
|
|
(self.bytes_per_second.0.load(std::sync::atomic::Ordering::Relaxed) * 8, self.bytes_per_second.1.load(std::sync::atomic::Ordering::Relaxed) * 8)
|
2023-01-23 00:30:31 +00:00
|
|
|
}
|
Add Rust source tree to the main repo.
Replaces the previously separate repo, merging the Rust
system into the mainline ready for 1.4.
The Rust system currently provides:
* lqos_bus: type definitions for localhost control bus.
* lqos_config: handler for configuration files.
* lqos_node_manager: local web-based monitor and manager.
* lqos_sys: eBPF program that handles all of the tracking,
CPU assignment, VLAN redirect, TC queue assignment,
and RTT tracking. Wrapped in a Rust core, providing
consistent in-Rust building and mostly-safe wrappers
to C code.
* lqosd: a daemon designed to run continually while
LibreQoS is operating. On start, it parses the configuration
files and sets up interface mapping (the Python code is still
required to actually build queues). It then assigns the various
eBPF programs to appropriate interfaces. The eBPF systems are
removed when the daemon terminates.
lqosd also provides a "bus", listening to requests for changes
or information on localhost, providing a control plane for
the rest of the project.
* lqtop: An example program demonstrating how to use the bus,
acts like "top", showing current network traffic and mappings.
* xdp_iphash_to_cpu_cmdline: a Rust wrapper providing the same
services as the cpumap originated tool of the same name. This is
a "shim" - it will go away once the native Python library is
ready.
* xdp_pping: also a shim, providing equivalent services to the
cpumap service of the same name.
A helper shell script "remove_pinned_maps.sh" can be used to
remove all pinned eBPF maps from the system, allowing for eBPF
program upgrades that change persistent map structures without
a reboot.
Signed-off-by: Herbert Wolverson <herberticus@gmail.com>
2023-01-04 15:09:06 +00:00
|
|
|
|
2023-01-23 00:30:31 +00:00
|
|
|
pub(crate) fn shaped_bits_per_second(&self) -> (u64, u64) {
|
2023-03-08 17:48:36 +00:00
|
|
|
(self.shaped_bytes_per_second.0.load(std::sync::atomic::Ordering::Relaxed) * 8, self.shaped_bytes_per_second.1.load(std::sync::atomic::Ordering::Relaxed) * 8)
|
2023-01-23 00:30:31 +00:00
|
|
|
}
|
Add Rust source tree to the main repo.
Replaces the previously separate repo, merging the Rust
system into the mainline ready for 1.4.
The Rust system currently provides:
* lqos_bus: type definitions for localhost control bus.
* lqos_config: handler for configuration files.
* lqos_node_manager: local web-based monitor and manager.
* lqos_sys: eBPF program that handles all of the tracking,
CPU assignment, VLAN redirect, TC queue assignment,
and RTT tracking. Wrapped in a Rust core, providing
consistent in-Rust building and mostly-safe wrappers
to C code.
* lqosd: a daemon designed to run continually while
LibreQoS is operating. On start, it parses the configuration
files and sets up interface mapping (the Python code is still
required to actually build queues). It then assigns the various
eBPF programs to appropriate interfaces. The eBPF systems are
removed when the daemon terminates.
lqosd also provides a "bus", listening to requests for changes
or information on localhost, providing a control plane for
the rest of the project.
* lqtop: An example program demonstrating how to use the bus,
acts like "top", showing current network traffic and mappings.
* xdp_iphash_to_cpu_cmdline: a Rust wrapper providing the same
services as the cpumap originated tool of the same name. This is
a "shim" - it will go away once the native Python library is
ready.
* xdp_pping: also a shim, providing equivalent services to the
cpumap service of the same name.
A helper shell script "remove_pinned_maps.sh" can be used to
remove all pinned eBPF maps from the system, allowing for eBPF
program upgrades that change persistent map structures without
a reboot.
Signed-off-by: Herbert Wolverson <herberticus@gmail.com>
2023-01-04 15:09:06 +00:00
|
|
|
|
2023-01-23 00:30:31 +00:00
|
|
|
pub(crate) fn packets_per_second(&self) -> (u64, u64) {
|
2023-03-08 17:48:36 +00:00
|
|
|
(
|
|
|
|
|
self.packets_per_second.0.load(std::sync::atomic::Ordering::Relaxed),
|
|
|
|
|
self.packets_per_second.1.load(std::sync::atomic::Ordering::Relaxed),
|
|
|
|
|
)
|
2023-01-23 00:30:31 +00:00
|
|
|
}
|
Add Rust source tree to the main repo.
Replaces the previously separate repo, merging the Rust
system into the mainline ready for 1.4.
The Rust system currently provides:
* lqos_bus: type definitions for localhost control bus.
* lqos_config: handler for configuration files.
* lqos_node_manager: local web-based monitor and manager.
* lqos_sys: eBPF program that handles all of the tracking,
CPU assignment, VLAN redirect, TC queue assignment,
and RTT tracking. Wrapped in a Rust core, providing
consistent in-Rust building and mostly-safe wrappers
to C code.
* lqosd: a daemon designed to run continually while
LibreQoS is operating. On start, it parses the configuration
files and sets up interface mapping (the Python code is still
required to actually build queues). It then assigns the various
eBPF programs to appropriate interfaces. The eBPF systems are
removed when the daemon terminates.
lqosd also provides a "bus", listening to requests for changes
or information on localhost, providing a control plane for
the rest of the project.
* lqtop: An example program demonstrating how to use the bus,
acts like "top", showing current network traffic and mappings.
* xdp_iphash_to_cpu_cmdline: a Rust wrapper providing the same
services as the cpumap originated tool of the same name. This is
a "shim" - it will go away once the native Python library is
ready.
* xdp_pping: also a shim, providing equivalent services to the
cpumap service of the same name.
A helper shell script "remove_pinned_maps.sh" can be used to
remove all pinned eBPF maps from the system, allowing for eBPF
program upgrades that change persistent map structures without
a reboot.
Signed-off-by: Herbert Wolverson <herberticus@gmail.com>
2023-01-04 15:09:06 +00:00
|
|
|
|
2023-01-23 00:30:31 +00:00
|
|
|
#[allow(dead_code)]
|
|
|
|
|
pub(crate) fn dump(&self) {
|
2023-03-08 17:19:35 +00:00
|
|
|
for v in self.raw_data.iter() {
|
|
|
|
|
let ip = v.key().as_ip();
|
2023-01-23 00:30:31 +00:00
|
|
|
log::info!("{:<34}{:?}", ip, v.tc_handle);
|
Add Rust source tree to the main repo.
Replaces the previously separate repo, merging the Rust
system into the mainline ready for 1.4.
The Rust system currently provides:
* lqos_bus: type definitions for localhost control bus.
* lqos_config: handler for configuration files.
* lqos_node_manager: local web-based monitor and manager.
* lqos_sys: eBPF program that handles all of the tracking,
CPU assignment, VLAN redirect, TC queue assignment,
and RTT tracking. Wrapped in a Rust core, providing
consistent in-Rust building and mostly-safe wrappers
to C code.
* lqosd: a daemon designed to run continually while
LibreQoS is operating. On start, it parses the configuration
files and sets up interface mapping (the Python code is still
required to actually build queues). It then assigns the various
eBPF programs to appropriate interfaces. The eBPF systems are
removed when the daemon terminates.
lqosd also provides a "bus", listening to requests for changes
or information on localhost, providing a control plane for
the rest of the project.
* lqtop: An example program demonstrating how to use the bus,
acts like "top", showing current network traffic and mappings.
* xdp_iphash_to_cpu_cmdline: a Rust wrapper providing the same
services as the cpumap originated tool of the same name. This is
a "shim" - it will go away once the native Python library is
ready.
* xdp_pping: also a shim, providing equivalent services to the
cpumap service of the same name.
A helper shell script "remove_pinned_maps.sh" can be used to
remove all pinned eBPF maps from the system, allowing for eBPF
program upgrades that change persistent map structures without
a reboot.
Signed-off-by: Herbert Wolverson <herberticus@gmail.com>
2023-01-04 15:09:06 +00:00
|
|
|
}
|
2023-01-23 00:30:31 +00:00
|
|
|
}
|
2023-01-11 14:12:40 +00:00
|
|
|
}
|