Merge pull request #370 from LibreQoE/long_term_stats

Long term stats into develop
This commit is contained in:
Robert Chacón 2023-07-10 14:55:32 -06:00 committed by GitHub
commit 1659aef41d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
198 changed files with 17194 additions and 618 deletions

15
.gitignore vendored
View File

@ -56,6 +56,13 @@ src/webusers.toml
src/lqusers.toml src/lqusers.toml
src/dist src/dist
src/rust/lqos_anonymous_stats_server/anonymous.sqlite src/rust/lqos_anonymous_stats_server/anonymous.sqlite
src/rust/long_term_stats/license_server/lqkeys.bin
src/rust/long_term_stats/lts_node/lqkeys.bin
src/rust/long_term_stats/pgdb/.env
src/rust/long_term_stats/site_build/node_modules
src/rust/long_term_stats/site_build/output
src/rust/long_term_stats/site_build/package-lock.json
src/rust/long_term_stats/wasm_pipe/staging
# Ignore Rust build artifacts # Ignore Rust build artifacts
src/rust/target src/rust/target
@ -105,3 +112,11 @@ tramp
# virtual environments # virtual environments
.venv .venv
venv venv
src/integrationUISPbandwidths.template.csv
src/lts_keys.bin
src/network
src/network.json.good
src/network.pdf
src/ShapedDevices.csv.good
.gitignore
src/rust/lqosd/lts_keys.bin

View File

@ -69,10 +69,12 @@ Single-thread CPU performance will determine the max throughput of a single HTB
| Customer Max Plan | Passmark Single-Thread | | Customer Max Plan | Passmark Single-Thread |
| --------------------| ------------------------ | | --------------------| ------------------------ |
| 100 Mbps | 1000 | | 100 Mbps | 1000 |
| 250 Mbps | 1500 | | 250 Mbps | 1250 |
| 500 Mbps | 2000 | | 500 Mbps | 1500 |
| 1 Gbps | 2500 | | 1 Gbps | 1750 |
| 2 Gbps | 3000 | | 2 Gbps | 2000 |
| 3 Gbps | 2500 |
| 4 Gbps | 3000 |
Below is a table of approximate aggregate throughput capacity, assuming a a CPU with a [single thread](https://www.cpubenchmark.net/singleThread.html#server-thread) performance of 2700 or greater: Below is a table of approximate aggregate throughput capacity, assuming a a CPU with a [single thread](https://www.cpubenchmark.net/singleThread.html#server-thread) performance of 2700 or greater:

View File

@ -173,7 +173,7 @@ def validateNetworkAndDevices():
for ipEntry in ipv4_list: for ipEntry in ipv4_list:
if ipEntry in seenTheseIPsAlready: if ipEntry in seenTheseIPsAlready:
warnings.warn("Provided IPv4 '" + ipEntry + "' in ShapedDevices.csv at row " + str(rowNum) + " is duplicate.", stacklevel=2) warnings.warn("Provided IPv4 '" + ipEntry + "' in ShapedDevices.csv at row " + str(rowNum) + " is duplicate.", stacklevel=2)
devicesValidatedOrNot = False #devicesValidatedOrNot = False
seenTheseIPsAlready.append(ipEntry) seenTheseIPsAlready.append(ipEntry)
else: else:
if (type(ipaddress.ip_network(ipEntry)) is ipaddress.IPv4Network) or (type(ipaddress.ip_address(ipEntry)) is ipaddress.IPv4Address): if (type(ipaddress.ip_network(ipEntry)) is ipaddress.IPv4Network) or (type(ipaddress.ip_address(ipEntry)) is ipaddress.IPv4Address):
@ -740,6 +740,26 @@ def refreshShapers():
# Parse network structure. For each tier, generate commands to create corresponding HTB and leaf classes. Prepare commands for execution later # Parse network structure. For each tier, generate commands to create corresponding HTB and leaf classes. Prepare commands for execution later
# Define lists for hash filters # Define lists for hash filters
def traverseNetwork(data): def traverseNetwork(data):
# Cake needs help handling rates lower than 5 Mbps
def sqmFixupRate(rate:int, sqm:str) -> str:
# If we aren't using cake, just return the sqm string
if not sqm.startswith("cake") or "rtt" in sqm:
return sqm
# If we are using cake, we need to fixup the rate
# Based on: 1 MTU is 1500 bytes, or 12,000 bits.
# At 1 Mbps, (1,000 bits per ms) transmitting an MTU takes 12ms. Add 3ms for overhead, and we get 15ms.
# So 15ms divided by 5 (for 1%) multiplied by 100 yields 300ms.
# The same formula gives 180ms at 2Mbps
# 140ms at 3Mbps
# 120ms at 4Mbps
match rate:
case 1: return sqm + " rtt 300"
case 2: return sqm + " rtt 180"
case 3: return sqm + " rtt 140"
case 4: return sqm + " rtt 120"
case _: return sqm
for node in data: for node in data:
command = 'class add dev ' + interfaceA + ' parent ' + data[node]['parentClassID'] + ' classid ' + data[node]['classMinor'] + ' htb rate '+ str(data[node]['downloadBandwidthMbpsMin']) + 'mbit ceil '+ str(data[node]['downloadBandwidthMbps']) + 'mbit prio 3' command = 'class add dev ' + interfaceA + ' parent ' + data[node]['parentClassID'] + ' classid ' + data[node]['classMinor'] + ' htb rate '+ str(data[node]['downloadBandwidthMbpsMin']) + 'mbit ceil '+ str(data[node]['downloadBandwidthMbps']) + 'mbit prio 3'
linuxTCcommands.append(command) linuxTCcommands.append(command)
@ -760,14 +780,18 @@ def refreshShapers():
command = 'class add dev ' + interfaceA + ' parent ' + data[node]['classid'] + ' classid ' + circuit['classMinor'] + ' htb rate '+ str(circuit['minDownload']) + 'mbit ceil '+ str(circuit['maxDownload']) + 'mbit prio 3' + tcComment command = 'class add dev ' + interfaceA + ' parent ' + data[node]['classid'] + ' classid ' + circuit['classMinor'] + ' htb rate '+ str(circuit['minDownload']) + 'mbit ceil '+ str(circuit['maxDownload']) + 'mbit prio 3' + tcComment
linuxTCcommands.append(command) linuxTCcommands.append(command)
# Only add CAKE / fq_codel qdisc if monitorOnlyMode is Off # Only add CAKE / fq_codel qdisc if monitorOnlyMode is Off
if monitorOnlyMode == False: if monitorOnlyMode == False:
command = 'qdisc add dev ' + interfaceA + ' parent ' + circuit['classMajor'] + ':' + circuit['classMinor'] + ' ' + sqm # SQM Fixup for lower rates
useSqm = sqmFixupRate(circuit['maxDownload'], sqm)
command = 'qdisc add dev ' + interfaceA + ' parent ' + circuit['classMajor'] + ':' + circuit['classMinor'] + ' ' + useSqm
linuxTCcommands.append(command) linuxTCcommands.append(command)
command = 'class add dev ' + interfaceB + ' parent ' + data[node]['up_classid'] + ' classid ' + circuit['classMinor'] + ' htb rate '+ str(circuit['minUpload']) + 'mbit ceil '+ str(circuit['maxUpload']) + 'mbit prio 3' command = 'class add dev ' + interfaceB + ' parent ' + data[node]['up_classid'] + ' classid ' + circuit['classMinor'] + ' htb rate '+ str(circuit['minUpload']) + 'mbit ceil '+ str(circuit['maxUpload']) + 'mbit prio 3'
linuxTCcommands.append(command) linuxTCcommands.append(command)
# Only add CAKE / fq_codel qdisc if monitorOnlyMode is Off # Only add CAKE / fq_codel qdisc if monitorOnlyMode is Off
if monitorOnlyMode == False: if monitorOnlyMode == False:
command = 'qdisc add dev ' + interfaceB + ' parent ' + circuit['up_classMajor'] + ':' + circuit['classMinor'] + ' ' + sqm # SQM Fixup for lower rates
useSqm = sqmFixupRate(circuit['maxUpload'], sqm)
command = 'qdisc add dev ' + interfaceB + ' parent ' + circuit['up_classMajor'] + ':' + circuit['classMinor'] + ' ' + useSqm
linuxTCcommands.append(command) linuxTCcommands.append(command)
pass pass
for device in circuit['devices']: for device in circuit['devices']:

View File

@ -10,6 +10,10 @@ try:
except: except:
from ispConfig import uispSite, uispStrategy from ispConfig import uispSite, uispStrategy
overwriteNetworkJSONalways = False overwriteNetworkJSONalways = False
try:
from ispConfig import uispSuspendedStrategy
except:
uispSuspendedStrategy = "none"
try: try:
from ispConfig import airMax_capacity from ispConfig import airMax_capacity
except: except:
@ -458,6 +462,18 @@ def buildFullGraph():
if (site['qos']['downloadSpeed']) and (site['qos']['uploadSpeed']): if (site['qos']['downloadSpeed']) and (site['qos']['uploadSpeed']):
download = int(round(site['qos']['downloadSpeed']/1000000)) download = int(round(site['qos']['downloadSpeed']/1000000))
upload = int(round(site['qos']['uploadSpeed']/1000000)) upload = int(round(site['qos']['uploadSpeed']/1000000))
if site['identification'] is not None and site['identification']['suspended'] is not None and site['identification']['suspended'] == True:
if uispSuspendedStrategy == "ignore":
print("WARNING: Site " + name + " is suspended")
continue
if uispSuspendedStrategy == "slow":
print("WARNING: Site " + name + " is suspended")
download = 1
upload = 1
if site['identification']['status'] == "disconnected":
print("WARNING: Site " + name + " is disconnected")
continue
node = NetworkNode(id=id, displayName=name, type=nodeType, node = NetworkNode(id=id, displayName=name, type=nodeType,
parentId=parent, download=download, upload=upload, address=address, customerName=customerName) parentId=parent, download=download, upload=upload, address=address, customerName=customerName)

View File

@ -17,6 +17,7 @@ upstreamBandwidthCapacityUploadMbps = 1000
# Devices in ShapedDevices.csv without a defined ParentNode (such as if you have a flat {} network) # Devices in ShapedDevices.csv without a defined ParentNode (such as if you have a flat {} network)
# will be placed under one of these generated parent node, evenly spread out across CPU cores. # will be placed under one of these generated parent node, evenly spread out across CPU cores.
# This defines the bandwidth limit for each of those generated parent nodes. # This defines the bandwidth limit for each of those generated parent nodes.
# If you are not sure what values to use, simply use the same values as upstreamBandwidthCapacityDownloadMbps and upstreamBandwidthCapacityUploadMbps
generatedPNDownloadMbps = 1000 generatedPNDownloadMbps = 1000
generatedPNUploadMbps = 1000 generatedPNUploadMbps = 1000
@ -97,6 +98,11 @@ uispSite = ''
# or site options. # or site options.
# * "full" - build a complete network map # * "full" - build a complete network map
uispStrategy = "full" uispStrategy = "full"
# Handling of UISP suspensions:
# * "none" - do not handle suspensions
# * "ignore" - do not add suspended customers to the network map
# * "slow" - limit suspended customers to 1mbps
uispSuspendedStrategy = "none"
# Assumed capacity of AirMax and LTU radios vs reported capacity by UISP. For example, 65% would be 0.65. # Assumed capacity of AirMax and LTU radios vs reported capacity by UISP. For example, 65% would be 0.65.
# For AirMax, this applies to flexible frame only. AirMax fixed frame will have capacity based on ratio. # For AirMax, this applies to flexible frame only. AirMax fixed frame will have capacity based on ratio.
airMax_capacity = 0.65 airMax_capacity = 0.65

1689
src/rust/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -27,4 +27,15 @@ members = [
"lqos_anonymous_stats_server", # The server for gathering anonymous usage data. "lqos_anonymous_stats_server", # The server for gathering anonymous usage data.
"lqos_heimdall", # Library for managing Heimdall flow watching "lqos_heimdall", # Library for managing Heimdall flow watching
"lqos_map_perf", # A CLI tool for testing eBPF map performance "lqos_map_perf", # A CLI tool for testing eBPF map performance
"lqstats", # A CLI utility for retrieving long-term statistics
"long_term_stats/license_server", # Licensing Server for LibreQoS Long-term stats
"long_term_stats/lts_node", # Long-term stats cluster node (web interface)
"long_term_stats/lts_ingestor", # Long-term stats data ingestor (feeding databases)
"long_term_stats/pgdb", # PostgreSQL interface for the LTS system
"long_term_stats/licman", # A CLI tool for managing the licensing server
"long_term_stats/lts_client", # Shared data and client-side code for long-term stats
"long_term_stats/wasm_pipe", # Provides a WebAssembly tight/compressed data pipeline
"long_term_stats/wasm_pipe_types", # Common types between the WASM conduit and the WASM server
"lqos_map_perf", # A CLI tool for testing eBPF map performance
"uisp", # REST support for the UISP API
] ]

View File

@ -0,0 +1,23 @@
# Long Term Stats
We'd really rather you let us host your long-term statistics. It's a lot
of work, and gives us a revenue stream to keep building LibreQoS.
If you really want to self-host, setup is a bit convoluted - but we won't
stop you.
## PostgreSQL
* Install PostgreSQL somewhere on your network. You only want one PostgreSQL host per long-term node stats cluster.
* Setup the database schema (TBD).
* Put the connection string for your database in `/etc/lqdb` on each host.
* Install the `sqlx` tool with `cargo install sqlx-cli --no-default-features --features rustls,postgres`
## For each stats node in the cluster
* Install InfluxDB.
* Install lts_node.
* Setup `/etc/lqdb`.
* Copy `lts_keys.bin` from the license server to the `lts_node` directory.
* Run the process.
* Login to the licensing server, and run `licman host add <ip of the new host>`

View File

@ -0,0 +1,14 @@
[package]
name = "license_server"
version = "0.1.0"
edition = "2021"
[dependencies]
tokio = { version = "1.25.0", features = ["full"] }
anyhow = "1"
env_logger = "0"
log = "0"
serde = { version = "1.0", features = ["derive"] }
lts_client = { path = "../lts_client" }
pgdb = { path = "../pgdb" }
once_cell = "1"

View File

@ -0,0 +1,3 @@
# License Server
Runs at LibreQoS and matches license keys with an "is valid" list. If you're running your very own licensing server, then you will need to set this up on your server to accept your key. Details will be provided later.

View File

@ -0,0 +1,14 @@
mod server;
mod pki;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
// Start the logger
env_logger::init_from_env(
env_logger::Env::default()
.filter_or(env_logger::DEFAULT_FILTER_ENV, "warn"),
);
let _ = server::start().await;
Ok(())
}

View File

@ -0,0 +1,6 @@
use lts_client::{dryoc::dryocbox::*, pki::generate_new_keypair};
use once_cell::sync::Lazy;
use tokio::sync::RwLock;
pub(crate) static LIBREQOS_KEYPAIR: Lazy<RwLock<KeyPair>> = Lazy::new(|| RwLock::new(generate_new_keypair(KEY_PATH)));
const KEY_PATH: &str = "lqkeys.bin"; // Store in the working directory

View File

@ -0,0 +1,149 @@
use lts_client::transport_data::{LicenseReply, LicenseRequest};
use pgdb::sqlx::{Pool, Postgres};
use std::net::SocketAddr;
use tokio::{
io::{AsyncReadExt, AsyncWriteExt},
net::TcpListener,
spawn,
};
use crate::pki::LIBREQOS_KEYPAIR;
pub async fn start() -> anyhow::Result<()> {
let listener = TcpListener::bind(":::9126").await?;
log::info!("Listening on :::9126");
let pool = pgdb::get_connection_pool(5).await;
if pool.is_err() {
log::error!("Unable to connect to the database");
log::error!("{pool:?}");
return Err(anyhow::Error::msg("Unable to connect to the database"));
}
let pool = pool.unwrap();
loop {
let (mut socket, address) = listener.accept().await?;
log::info!("Connection from {address:?}");
let pool = pool.clone();
spawn(async move {
let mut buf = vec![0u8; 10240];
if let Ok(bytes) = socket.read(&mut buf).await {
log::info!("Received {bytes} bytes from {address:?}");
match decode(&buf, address, pool).await {
Err(e) => log::error!("{e:?}"),
Ok(reply) => {
let bytes = build_reply(&reply);
match bytes {
Ok(bytes) => {
log::info!("Submitting {} bytes to network", bytes.len());
if let Err(e) = socket.write_all(&bytes).await {
log::error!("Write error: {e:?}");
}
}
Err(e) => {
log::error!("{e:?}");
}
}
}
}
}
});
}
}
async fn decode(
buf: &[u8],
address: SocketAddr,
pool: Pool<Postgres>,
) -> anyhow::Result<LicenseReply> {
const U64SIZE: usize = std::mem::size_of::<u64>();
let version_buf = &buf[0..2].try_into()?;
let version = u16::from_be_bytes(*version_buf);
let size_buf = &buf[2..2 + U64SIZE].try_into()?;
let size = u64::from_be_bytes(*size_buf);
log::info!("Received a version {version} payload of serialized size {size} from {address:?}");
match version {
1 => {
let start = 2 + U64SIZE;
let end = start + size as usize;
let payload: LicenseRequest = lts_client::cbor::from_slice(&buf[start..end])?;
let license = check_license(&payload, address, pool).await?;
Ok(license)
}
_ => {
log::error!("Unknown version of statistics: {version}, dumped {size} bytes");
Err(anyhow::Error::msg("Version error"))
}
}
}
async fn check_license(
request: &LicenseRequest,
address: SocketAddr,
pool: Pool<Postgres>,
) -> anyhow::Result<LicenseReply> {
match request {
LicenseRequest::LicenseCheck { key } => {
log::info!("Checking license from {address:?}, key: {key}");
if key == "test" {
log::info!("License is valid");
Ok(LicenseReply::Valid {
expiry: 0, // Temporary value
stats_host: "127.0.0.1:9127".to_string(), // Also temporary
})
} else {
match pgdb::get_stats_host_for_key(pool, key).await {
Ok(host) => {
log::info!("License is valid");
return Ok(LicenseReply::Valid {
expiry: 0, // Temporary value
stats_host: host,
});
}
Err(e) => {
log::warn!("Unable to get stats host for key: {e:?}");
}
}
log::info!("License is denied");
Ok(LicenseReply::Denied)
}
}
LicenseRequest::KeyExchange { node_id, node_name, license_key, public_key } => {
log::info!("Public key exchange requested by {node_id}");
// Check if the node_id / license key combination exists
// If it does, update it to the current last-seen and the new public key
// If it doesn't, insert it
let public_key = lts_client::cbor::to_vec(&public_key).unwrap();
let result = pgdb::insert_or_update_node_public_key(pool, node_id, node_name, license_key, &public_key).await;
if result.is_err() {
log::warn!("Unable to insert or update node public key: {result:?}");
return Err(anyhow::Error::msg("Unable to insert or update node public key"));
}
let public_key = LIBREQOS_KEYPAIR.read().await.public_key.clone();
Ok(LicenseReply::MyPublicKey { public_key })
}
}
}
fn build_reply(reply: &LicenseReply) -> anyhow::Result<Vec<u8>> {
let mut result = Vec::new();
let payload = lts_client::cbor::to_vec(reply);
if let Err(e) = payload {
log::warn!("Unable to serialize statistics. Not sending them.");
log::warn!("{e:?}");
return Err(anyhow::Error::msg("Unable to serialize"));
}
let payload = payload.unwrap();
// Store the version as network order
result.extend(1u16.to_be_bytes());
// Store the payload size as network order
result.extend((payload.len() as u64).to_be_bytes());
// Store the payload itself
result.extend(payload);
Ok(result)
}

View File

@ -0,0 +1,12 @@
[package]
name = "licman"
version = "0.1.0"
edition = "2021"
[dependencies]
clap = { version = "4", features = ["derive"] }
anyhow = "1"
pgdb = { path = "../pgdb" }
tokio = { version = "1", features = [ "rt", "macros", "net", "io-util", "time" ] }
env_logger = "0"
log = "0"

View File

@ -0,0 +1,112 @@
use anyhow::Result;
use clap::{Parser, Subcommand};
use pgdb::create_free_trial;
use std::process::exit;
#[derive(Parser)]
#[command()]
struct Args {
#[command(subcommand)]
command: Option<Commands>,
}
#[derive(Subcommand)]
enum Commands {
/// Manage stats hosts
Hosts {
#[command(subcommand)]
command: Option<HostsCommands>,
},
/// Manage licenses
License {
#[command(subcommand)]
command: Option<LicenseCommands>,
},
/// Manage users
Users {
#[command(subcommand)]
command: Option<UsersCommands>,
},
}
#[derive(Subcommand)]
enum HostsCommands {
/// Add a host to the list of available stats storing hosts
Add { hostname: String, influx_host: String, api_key: String },
}
#[derive(Subcommand)]
enum LicenseCommands {
/// Create a new free trial license
FreeTrial { organization: String },
}
#[derive(Subcommand)]
enum UsersCommands {
/// Add a new user
Add { key: String, username: String, password: String, nicename: String },
/// Delete a user
Delete { key: String, username: String },
}
#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<()> {
env_logger::init_from_env(
env_logger::Env::default()
.filter_or(env_logger::DEFAULT_FILTER_ENV, "warn"),
);
// Get the database connection pool
let pool = pgdb::get_connection_pool(5).await;
if pool.is_err() {
log::error!("Unable to connect to the database");
log::error!("{pool:?}");
return Err(anyhow::Error::msg("Unable to connect to the database"));
}
let pool = pool.unwrap();
let cli = Args::parse();
match cli.command {
Some(Commands::Hosts {
command: Some(HostsCommands::Add { hostname, influx_host, api_key }),
}) => {
match pgdb::add_stats_host(pool, hostname, influx_host, api_key).await {
Err(e) => {
log::error!("Unable to add stats host: {e:?}");
exit(1);
}
Ok(new_id) => {
println!("Added stats host with id {}", new_id);
}
}
}
Some(Commands::License{command: Some(LicenseCommands::FreeTrial { organization })}) => {
match create_free_trial(pool, &organization).await {
Err(e) => {
log::error!("Unable to create free trial: {e:?}");
exit(1);
}
Ok(key) => {
println!("Your new license key is: {}", key);
}
}
}
Some(Commands::Users{command: Some(UsersCommands::Add { key, username, password, nicename })}) => {
match pgdb::add_user(pool, &key, &username, &password, &nicename).await {
Err(e) => {
log::error!("Unable to add user: {e:?}");
exit(1);
}
Ok(_) => {
println!("Added user {}", username);
}
}
}
_ => {
println!("Run with --help to see instructions");
exit(0);
}
}
Ok(())
}

View File

@ -0,0 +1,23 @@
[package]
name = "lts_client"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
lqos_config = { path = "../../lqos_config" }
lqos_utils = { path = "../../lqos_utils" }
uisp = { path = "../../uisp" }
dryoc = { version = "0.5", features = ["serde"] }
serde = { version = "1.0", features = ["derive"] }
thiserror = "1"
tokio = { version = "1", features = [ "rt", "macros", "net", "io-util", "time" ] }
serde_cbor = "0" # For RFC8949/7409 format C binary objects
log = "0"
bincode = "1"
once_cell = "1"
sysinfo = "0"
num-traits = "0.2"
miniz_oxide = "0.7.1"
dashmap = "5.4"

View File

@ -0,0 +1,61 @@
use num_traits::{Bounded, CheckedDiv, NumCast, Zero};
#[derive(Debug, Clone)]
pub(crate) struct MinMaxAvg<T> {
pub(crate) min: T,
pub(crate) max: T,
pub(crate) avg: T,
}
impl<
T: Bounded
+ Zero
+ std::ops::AddAssign<T>
+ Copy
+ std::cmp::Ord
+ CheckedDiv
+ NumCast,
> MinMaxAvg<T>
{
pub(crate) fn from_slice(stats: &[T]) -> Self {
let mut min = T::max_value();
let mut max = T::min_value();
let mut avg = T::zero();
stats.iter().for_each(|n| {
avg += *n;
min = T::min(min, *n);
max = T::max(max, *n);
});
let len = T::from(stats.len()).unwrap();
avg = avg.checked_div(&len).unwrap_or(T::zero());
Self { max, min, avg }
}
}
#[derive(Debug, Clone)]
pub(crate) struct MinMaxAvgPair<T> {
pub(crate) down: MinMaxAvg<T>,
pub(crate) up: MinMaxAvg<T>,
}
impl<
T: Bounded
+ Zero
+ std::ops::AddAssign<T>
+ Copy
+ std::cmp::Ord
+ CheckedDiv
+ NumCast,
> MinMaxAvgPair<T>
{
pub(crate) fn from_slice(stats: &[(T, T)]) -> Self {
let down: Vec<T> = stats.iter().map(|(down, _up)| *down).collect();
let up: Vec<T> = stats.iter().map(|(_down, up)| *up).collect();
Self {
down: MinMaxAvg::<T>::from_slice(&down),
up: MinMaxAvg::<T>::from_slice(&up),
}
}
}

View File

@ -0,0 +1,145 @@
mod session_buffer;
mod min_max;
mod system_stats;
use crate::{transport_data::{StatsHost, StatsSummary, StatsRttSummary, StatsTreeNode, StatsSubmission, StatsTotals}, submission_queue::{new_submission, comm_channel::SenderChannelMessage}};
use self::min_max::{MinMaxAvgPair, MinMaxAvg};
pub(crate) use session_buffer::{StatsSession, SESSION_BUFFER};
use lqos_utils::unix_time::unix_now;
use tokio::sync::mpsc::Sender;
use std::{collections::HashMap, net::IpAddr};
use super::{HostSummary, NetworkTreeEntry};
pub(crate) async fn collate_stats(comm_tx: Sender<SenderChannelMessage>) {
let timestamp = unix_now().unwrap_or(0);
if timestamp == 0 {
return; // We're not ready
}
let mut writer = SESSION_BUFFER.lock().await;
if writer.is_empty() {
return; // Nothing to do
}
// Collate total stats for the period
let bps: Vec<(u64, u64)> = writer
.iter()
.map(|e| e.throughput.bits_per_second)
.collect();
let pps: Vec<(u64, u64)> = writer
.iter()
.map(|e| e.throughput.packets_per_second)
.collect();
let sbps: Vec<(u64, u64)> = writer
.iter()
.map(|e| e.throughput.shaped_bits_per_second)
.collect();
let bits_per_second = MinMaxAvgPair::from_slice(&bps);
let packets_per_second = MinMaxAvgPair::from_slice(&pps);
let shaped_bits_per_second = MinMaxAvgPair::from_slice(&sbps);
// Iterate hosts gathering min/max data
let mut hosts_accumulator: HashMap<IpAddr, Vec<&HostSummary>> = HashMap::new();
let mut tree_accumulator: HashMap<String, Vec<(usize, &NetworkTreeEntry)>> = HashMap::new();
writer.iter().for_each(|e| {
e.throughput.hosts.iter().for_each(|host| {
if let Some(hosts) = hosts_accumulator.get_mut(&host.ip) {
hosts.push(host);
} else {
hosts_accumulator.insert(host.ip, vec![host]);
}
});
e.network_tree.iter().for_each(|(index, node)| {
if let Some(t) = tree_accumulator.get_mut(&node.name) {
t.push((*index, node));
} else {
tree_accumulator.insert(node.name.clone(), vec![(*index, node)]);
}
});
});
// Get min/max data per IP
let mut stats_hosts = Vec::new();
for (ip, host) in hosts_accumulator.into_iter() {
let bits = MinMaxAvgPair::from_slice(
&host
.iter()
.map(|h| (h.bits_per_second.0, h.bits_per_second.1))
.collect::<Vec<(u64, u64)>>(),
);
let rtt = MinMaxAvg::from_slice(
&host
.iter()
.map(|h| (h.median_rtt * 100.0) as u32)
.collect::<Vec<u32>>(),
);
let sh = StatsHost {
ip_address: ip.to_string(),
circuit_id: host[0].circuit_id.clone(),
bits: StatsSummary{ min: (bits.down.min, bits.up.min), max: (bits.down.max, bits.up.max), avg: (bits.down.avg, bits.up.avg) },
rtt: StatsRttSummary{ min: rtt.min, max: rtt.max, avg: rtt.avg },
};
stats_hosts.push(sh);
}
// Get network tree min/max data
let mut tree_entries = Vec::new();
for (name, nodes) in tree_accumulator.into_iter() {
let bits = MinMaxAvgPair::from_slice(
&nodes
.iter()
.map(|(_i, n)| (n.current_throughput.0, n.current_throughput.1))
.collect::<Vec<(u32, u32)>>(),
);
let rtt = MinMaxAvg::from_slice(
&nodes
.iter()
.map(|(_i, n)| (n.rtts.2) as u32)
.collect::<Vec<u32>>(),
);
let n = StatsTreeNode {
index: nodes[0].0,
name: name.to_string(),
max_throughput: nodes[0].1.max_throughput,
current_throughput: StatsSummary{ min: (bits.down.min.into(), bits.up.min.into()), max: (bits.down.max.into(), bits.up.max.into()), avg: (bits.down.avg.into(), bits.up.avg.into()) },
rtt: StatsRttSummary{ min: rtt.min, max: rtt.max, avg: rtt.avg },
parents: nodes[0].1.parents.clone(),
immediate_parent: nodes[0].1.immediate_parent,
node_type: nodes[0].1.node_type.clone(),
};
tree_entries.push(n);
}
// Add to the submissions queue
let (cpu, ram) = system_stats::get_cpu_ram().await;
new_submission(StatsSubmission {
timestamp,
totals: Some(StatsTotals {
bits: StatsSummary {
min: (bits_per_second.down.min, bits_per_second.up.min),
max: (bits_per_second.down.max, bits_per_second.up.max),
avg: (bits_per_second.down.avg, bits_per_second.up.avg),
},
shaped_bits: StatsSummary {
min: (shaped_bits_per_second.down.min, shaped_bits_per_second.up.min),
max: (shaped_bits_per_second.down.max, shaped_bits_per_second.up.max),
avg: (shaped_bits_per_second.down.avg, shaped_bits_per_second.up.avg),
},
packets: StatsSummary {
min: (packets_per_second.down.min, packets_per_second.up.min),
max: (packets_per_second.down.max, packets_per_second.up.max),
avg: (packets_per_second.down.avg, packets_per_second.up.avg),
},
}),
cpu_usage: Some(cpu),
ram_percent: Some(ram),
hosts: Some(stats_hosts),
tree: Some(tree_entries),
uisp_devices: None,
}, comm_tx).await;
// Clear the collection buffer
writer.clear();
}

View File

@ -0,0 +1,11 @@
use once_cell::sync::Lazy;
use tokio::sync::Mutex;
use crate::collector::{ThroughputSummary, network_tree::NetworkTreeEntry};
pub(crate) static SESSION_BUFFER: Lazy<Mutex<Vec<StatsSession>>> =
Lazy::new(|| Mutex::new(Vec::new()));
pub(crate) struct StatsSession {
pub(crate) throughput: ThroughputSummary,
pub(crate) network_tree: Vec<(usize, NetworkTreeEntry)>,
}

View File

@ -0,0 +1,24 @@
use once_cell::sync::Lazy;
use sysinfo::{System, SystemExt};
use tokio::sync::Mutex;
static SYS: Lazy<Mutex<System>> = Lazy::new(|| Mutex::new(System::new_all()));
pub(crate) async fn get_cpu_ram() -> (Vec<u32>, u32) {
use sysinfo::CpuExt;
let mut lock = SYS.lock().await;
lock.refresh_cpu();
lock.refresh_memory();
let cpus: Vec<u32> = lock
.cpus()
.iter()
.map(|cpu| cpu.cpu_usage() as u32) // Always rounds down
.collect();
let memory = (lock.used_memory() as f32 / lock.total_memory() as f32) * 100.0;
//println!("cpu: {:?}, ram: {}", cpus, memory);
(cpus, memory as u32)
}

View File

@ -0,0 +1,117 @@
//! Provides a thread that runs in the background for `lqosd`. It is
//! triggered whenever fresh throughput data is ready to be collected.
//! The data is stored in a "session buffer", to be collated when the
//! collation period timer fires.
//!
//! This is designed to ensure that even long averaging periods don't
//! lose min/max values.
use super::StatsUpdateMessage;
use crate::{collector::{collation::{collate_stats, StatsSession}, SESSION_BUFFER, uisp_ext::gather_uisp_data}, submission_queue::{enqueue_shaped_devices_if_allowed, comm_channel::{SenderChannelMessage, start_communication_channel}}};
use lqos_config::EtcLqos;
use once_cell::sync::Lazy;
use std::{sync::atomic::AtomicU64, time::Duration};
use tokio::sync::mpsc::{self, Receiver, Sender};
use dashmap::DashSet;
static STATS_COUNTER: AtomicU64 = AtomicU64::new(0);
pub(crate) static DEVICE_ID_LIST: Lazy<DashSet<String>> = Lazy::new(DashSet::new);
/// Launches the long-term statistics manager task. Returns immediately,
/// because it creates the channel and then spawns listener threads.
///
/// Returns a channel that may be used to notify of data availability.
pub async fn start_long_term_stats() -> Sender<StatsUpdateMessage> {
let (update_tx, update_rx): (Sender<StatsUpdateMessage>, Receiver<StatsUpdateMessage>) = mpsc::channel(10);
let (comm_tx, comm_rx): (Sender<SenderChannelMessage>, Receiver<SenderChannelMessage>) = mpsc::channel(10);
tokio::spawn(lts_manager(update_rx, comm_tx));
tokio::spawn(collation_scheduler(update_tx.clone()));
tokio::spawn(uisp_collection_manager(update_tx.clone()));
tokio::spawn(start_communication_channel(comm_rx));
// Return the channel, for notifications
update_tx
}
async fn collation_scheduler(tx: Sender<StatsUpdateMessage>) {
loop {
let collation_period = get_collation_period();
tx.send(StatsUpdateMessage::CollationTime).await.unwrap();
tokio::time::sleep(collation_period).await;
}
}
async fn lts_manager(mut rx: Receiver<StatsUpdateMessage>, comm_tx: Sender<SenderChannelMessage>) {
log::info!("Long-term stats gathering thread started");
loop {
let msg = rx.recv().await;
match msg {
Some(StatsUpdateMessage::ThroughputReady(throughput)) => {
let counter = STATS_COUNTER.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
if counter > 5 {
log::info!("Enqueueing throughput data for collation");
SESSION_BUFFER.lock().await.push(StatsSession {
throughput: throughput.0,
network_tree: throughput.1,
});
}
}
Some(StatsUpdateMessage::ShapedDevicesChanged(shaped_devices)) => {
log::info!("Enqueueing shaped devices for collation");
// Update the device id list
DEVICE_ID_LIST.clear();
shaped_devices.iter().for_each(|d| {
DEVICE_ID_LIST.insert(d.device_id.clone());
});
tokio::spawn(enqueue_shaped_devices_if_allowed(shaped_devices, comm_tx.clone()));
}
Some(StatsUpdateMessage::CollationTime) => {
log::info!("Collation time reached");
tokio::spawn(collate_stats(comm_tx.clone()));
}
Some(StatsUpdateMessage::UispCollationTime) => {
log::info!("UISP Collation time reached");
tokio::spawn(gather_uisp_data(comm_tx.clone()));
}
Some(StatsUpdateMessage::Quit) => {
// The daemon is exiting, terminate
let _ = comm_tx.send(SenderChannelMessage::Quit).await;
break;
}
None => {
log::warn!("Long-term stats thread received a None message");
}
}
}
}
fn get_collation_period() -> Duration {
if let Ok(cfg) = EtcLqos::load() {
if let Some(lts) = &cfg.long_term_stats {
return Duration::from_secs(lts.collation_period_seconds.into());
}
}
Duration::from_secs(60)
}
fn get_uisp_collation_period() -> Option<Duration> {
if let Ok(cfg) = EtcLqos::load() {
if let Some(lts) = &cfg.long_term_stats {
return Some(Duration::from_secs(lts.uisp_reporting_interval_seconds.unwrap_or(300)));
}
}
None
}
async fn uisp_collection_manager(control_tx: Sender<StatsUpdateMessage>) {
if let Some(period) = get_uisp_collation_period() {
log::info!("Starting UISP poller with period {:?}", period);
loop {
control_tx.send(StatsUpdateMessage::UispCollationTime).await.unwrap();
tokio::time::sleep(period).await;
}
}
}

View File

@ -0,0 +1,13 @@
//! Provides stats collection services for `lqosd`.
mod collection_manager;
mod stats_availability;
mod throughput_summary;
mod collation;
mod network_tree;
mod uisp_ext;
pub use stats_availability::StatsUpdateMessage;
pub use collection_manager::start_long_term_stats;
pub use throughput_summary::{ThroughputSummary, HostSummary};
pub(crate) use collation::SESSION_BUFFER;
pub use network_tree::NetworkTreeEntry;

View File

@ -0,0 +1,44 @@
use lqos_config::NetworkJsonNode;
#[derive(Debug, Clone)]
pub struct NetworkTreeEntry {
pub name: String,
pub max_throughput: (u32, u32),
pub current_throughput: (u32, u32),
pub rtts: (u16, u16, u16),
pub parents: Vec<usize>,
pub immediate_parent: Option<usize>,
pub node_type: Option<String>,
}
impl From<&NetworkJsonNode> for NetworkTreeEntry {
fn from(value: &NetworkJsonNode) -> Self {
let mut max = 0;
let mut min = if value.rtts.is_empty() {
0
} else {
u16::MAX
};
let mut sum = 0;
for n in value.rtts.iter() {
let n = *n;
sum += n;
if n < min { min = n; }
if n > max { max = n; }
}
let avg = sum.checked_div(value.rtts.len() as u16).unwrap_or(0);
Self {
name: value.name.clone(),
max_throughput: value.max_throughput,
parents: value.parents.clone(),
immediate_parent: value.immediate_parent,
current_throughput: (
value.current_throughput.0.load(std::sync::atomic::Ordering::Relaxed) as u32,
value.current_throughput.1.load(std::sync::atomic::Ordering::Relaxed) as u32,
),
node_type: value.node_type.clone(),
rtts: (min, max, avg),
}
}
}

View File

@ -0,0 +1,21 @@
//! Message type to be sent to the long-term stats thread when
//! data is available.
use lqos_config::ShapedDevice;
use super::{ThroughputSummary, network_tree::NetworkTreeEntry};
#[derive(Debug)]
/// Messages to/from the stats collection thread
pub enum StatsUpdateMessage {
/// Fresh throughput stats have been collected
ThroughputReady(Box<(ThroughputSummary, Vec<(usize, NetworkTreeEntry)>)>),
/// ShapedDevices.csv has changed and the server needs new data
ShapedDevicesChanged(Vec<ShapedDevice>),
/// It's time to collate the session buffer
CollationTime,
/// The daemon is exiting
Quit,
/// Time to gather UISP data
UispCollationTime,
}

View File

@ -0,0 +1,23 @@
//! Provides an interface for collecting data from the throughput
//! tracker in `lqosd` and submitting it into the long-term stats
//! system.
//!
//! Note that ThroughputSummary should be boxed, to avoid copying
use std::net::IpAddr;
#[derive(Debug)]
pub struct ThroughputSummary {
pub bits_per_second: (u64, u64),
pub shaped_bits_per_second: (u64, u64),
pub packets_per_second: (u64, u64),
pub hosts: Vec<HostSummary>,
}
#[derive(Debug)]
pub struct HostSummary {
pub ip: IpAddr,
pub circuit_id: Option<String>,
pub bits_per_second: (u64, u64),
pub median_rtt: f32,
}

View File

@ -0,0 +1,42 @@
use lqos_utils::unix_time::unix_now;
use tokio::sync::mpsc::Sender;
use crate::{submission_queue::{comm_channel::SenderChannelMessage, new_submission}, transport_data::{StatsSubmission, UispExtDevice}, collector::collection_manager::DEVICE_ID_LIST};
pub(crate) async fn gather_uisp_data(comm_tx: Sender<SenderChannelMessage>) {
log::info!("Gathering UISP Data for Long-Term Stats");
let timestamp = unix_now().unwrap_or(0);
if timestamp == 0 {
return; // We're not ready
}
if let Ok(config) = lqos_config::LibreQoSConfig::load() {
if let Ok(devices) = uisp::load_all_devices_with_interfaces(config).await {
log::info!("Loaded {} UISP devices", devices.len());
// Collate the data
let uisp_devices: Vec<UispExtDevice> = devices
.into_iter()
.filter(|d| DEVICE_ID_LIST.contains(&d.identification.id))
.map(|device| device.into())
.collect();
log::info!("Retained {} relevant UISP devices", uisp_devices.len());
// Build a queue message containing just UISP info
// Submit it
let submission = StatsSubmission {
timestamp,
totals: None,
hosts: None,
tree: None,
cpu_usage: None,
ram_percent: None,
uisp_devices: Some(uisp_devices),
};
new_submission(submission, comm_tx).await;
} else {
log::warn!("Unable to load UISP devices");
}
} else {
log::warn!("UISP data collection requested, but no LibreQoS configuration found");
}
}

View File

@ -0,0 +1,28 @@
//! Shared data and functionality for the long-term statistics system.
/// Transport data and helpers for the long-term statistics system.
pub mod transport_data;
/// Shared public key infrastructure data and functionality for the long-term statistics system.
pub mod pki;
/// Collection system for `lqosd`
pub mod collector;
/// Submissions system for `lqosd`
pub mod submission_queue;
/// Re-export bincode
pub mod bincode {
pub use bincode::*;
}
/// Re-export CBOR
pub mod cbor {
pub use serde_cbor::*;
}
/// Re-export dryocbox
pub mod dryoc {
pub use dryoc::*;
}

View File

@ -0,0 +1,69 @@
use std::path::Path;
use dryoc::dryocbox::*;
/// Genereate a new keypair and store it in a file. If the file exists,
/// it will be loaded rather than re-generated.
///
/// # Arguments
///
/// * `key_path` - The path to the file to store the keypair in
///
/// # Returns
///
/// The generated or loaded keypair
pub fn generate_new_keypair(key_path: &str) -> KeyPair {
let path = Path::new(key_path);
if path.exists() {
if let Ok(bytes) = std::fs::read(path) {
if let Ok(keypair) = bincode::deserialize(&bytes) {
log::info!("Loaded keypair from {}", path.display());
return keypair;
}
}
}
let keypair = KeyPair::gen();
let bytes = bincode::serialize(&keypair).unwrap();
std::fs::write(path, bytes).unwrap();
log::info!("Generated new keypair and stored it at {}", path.display());
keypair
}
#[cfg(test)]
mod test {
use dryoc::dryocbox::*;
#[test]
fn test_sealed_box_roundtrip() {
let sender_keypair = KeyPair::gen();
let recipient_keypair = KeyPair::gen();
let nonce = Nonce::gen();
let message = b"Once upon a time, there was a man with a dream.";
let dryocbox = DryocBox::encrypt_to_vecbox(
message,
&nonce,
&recipient_keypair.public_key,
&sender_keypair.secret_key,
)
.expect("unable to encrypt");
let sodium_box = dryocbox.to_vec();
let dryocbox = DryocBox::from_bytes(&sodium_box).expect("failed to read box");
let decrypted = dryocbox
.decrypt_to_vec(
&nonce,
&sender_keypair.public_key,
&recipient_keypair.secret_key,
)
.expect("unable to decrypt");
assert_eq!(message, decrypted.as_slice());
}
#[test]
fn test_serialize_keypair() {
let keypair = KeyPair::gen();
let serialized = bincode::serialize(&keypair).unwrap();
let deserialized : KeyPair = bincode::deserialize(&serialized).unwrap();
assert_eq!(keypair, deserialized);
}
}

View File

@ -0,0 +1,4 @@
//! Shared public-key data for the license server, long-terms stats
//! node and the client.
mod keygen;
pub use keygen::*;

View File

@ -0,0 +1,60 @@
use dryoc::{dryocbox::{Nonce, DryocBox}, types::{NewByteArray, ByteArray}};
use lqos_config::EtcLqos;
use crate::{transport_data::{LtsCommand, NodeIdAndLicense}, submission_queue::queue::QueueError};
use super::keys::{SERVER_PUBLIC_KEY, KEYPAIR};
pub(crate) async fn encode_submission(submission: &LtsCommand) -> Result<Vec<u8>, QueueError> {
let nonce = Nonce::gen();
let mut result = Vec::new();
// Store the version as network order
result.extend(1u16.to_be_bytes());
// Pack the license key and node id into a header
let header = get_license_key_and_node_id(&nonce)?;
let header_bytes = serde_cbor::to_vec(&header).map_err(|_| QueueError::SendFail)?;
// Store the size of the header and the header
result.extend((header_bytes.len() as u64).to_be_bytes());
result.extend(header_bytes);
// Pack the submission body into bytes
let payload_bytes = serde_cbor::to_vec(&submission).map_err(|_| QueueError::SendFail)?;
// TODO: Compress it?
let payload_bytes = miniz_oxide::deflate::compress_to_vec(&payload_bytes, 8);
// Encrypt it
let remote_public = SERVER_PUBLIC_KEY.read().await.clone().unwrap();
let my_private = KEYPAIR.read().await.secret_key.clone();
let dryocbox = DryocBox::encrypt_to_vecbox(
&payload_bytes,
&nonce,
&remote_public,
&my_private,
).map_err(|_| QueueError::SendFail)?;
let encrypted_bytes = dryocbox.to_vec();
// Store the size of the submission
result.extend((encrypted_bytes.len() as u64).to_be_bytes());
result.extend(encrypted_bytes);
// Store the encrypted, zipped submission itself
Ok(result)
}
fn get_license_key_and_node_id(nonce: &Nonce) -> Result<NodeIdAndLicense, QueueError> {
let cfg = EtcLqos::load().map_err(|_| QueueError::SendFail)?;
if let Some(node_id) = cfg.node_id {
if let Some(lts) = &cfg.long_term_stats {
if let Some(license_key) = &lts.license_key {
return Ok(NodeIdAndLicense {
node_id,
license_key: license_key.clone(),
nonce: *nonce.as_array(),
});
}
}
}
Err(QueueError::SendFail)
}

View File

@ -0,0 +1,38 @@
use crate::{pki::generate_new_keypair, dryoc::dryocbox::{KeyPair, PublicKey}, transport_data::{exchange_keys_with_license_server, LicenseReply}};
use lqos_config::EtcLqos;
use once_cell::sync::Lazy;
use tokio::sync::RwLock;
pub(crate) static KEYPAIR: Lazy<RwLock<KeyPair>> = Lazy::new(|| RwLock::new(generate_new_keypair("lts_keys.bin")));
pub(crate) static SERVER_PUBLIC_KEY: Lazy<RwLock<Option<PublicKey>>> = Lazy::new(|| RwLock::new(None));
async fn store_server_public_key(key: &PublicKey) {
*SERVER_PUBLIC_KEY.write().await = Some(key.clone());
}
pub(crate) async fn key_exchange() -> bool {
let cfg = EtcLqos::load().unwrap();
let node_id = cfg.node_id.unwrap();
let node_name = if let Some(node_name) = cfg.node_name {
node_name
} else {
node_id.clone()
};
let license_key = cfg.long_term_stats.unwrap().license_key.unwrap();
let keypair = (KEYPAIR.read().await).clone();
match exchange_keys_with_license_server(node_id, node_name, license_key, keypair.public_key.clone()).await {
Ok(LicenseReply::MyPublicKey { public_key }) => {
store_server_public_key(&public_key).await;
log::info!("Received a public key for the server");
true
}
Ok(_) => {
log::warn!("License server sent an unexpected response.");
false
}
Err(e) => {
log::warn!("Error exchanging keys with license server: {}", e);
false
}
}
}

View File

@ -0,0 +1,83 @@
use std::time::Duration;
use tokio::{sync::mpsc::Receiver, time::sleep, net::TcpStream};
use self::keys::key_exchange;
use super::{licensing::{get_license_status, LicenseState}, queue::send_queue};
mod keys;
mod encode;
pub(crate) use encode::encode_submission;
pub(crate) enum SenderChannelMessage {
QueueReady,
Quit,
}
pub(crate) async fn start_communication_channel(mut rx: Receiver<SenderChannelMessage>) {
let mut connected = false;
let mut stream: Option<TcpStream> = None;
loop {
match rx.try_recv() {
Ok(SenderChannelMessage::QueueReady) => {
// If not connected, see if we are allowed to connect and get a target
if !connected || stream.is_none() {
log::info!("Establishing LTS TCP channel.");
stream = connect_if_permitted().await;
if stream.is_some() {
connected = true;
}
}
// If we're still not connected, skip - otherwise, send the
// queued data
if let Some(tcpstream) = &mut stream {
if connected && tcpstream.writable().await.is_ok() {
// Send the data
let all_good = send_queue(tcpstream).await;
if all_good.is_err() {
log::error!("Stream fail during send. Will re-send");
connected = false;
stream = None;
}
} else {
stream = None;
connected = false;
}
} else {
connected = false;
stream = None;
}
}
Ok(SenderChannelMessage::Quit) => {
break;
}
_ => {}
}
sleep(Duration::from_secs(10)).await;
}
}
async fn connect_if_permitted() -> Option<TcpStream> {
let license = get_license_status().await;
if let LicenseState::Valid { stats_host, .. } = license {
if !key_exchange().await {
return None;
}
let host = format!("{stats_host}:9128");
let stream = TcpStream::connect(&host).await;
match stream {
Err(e) => {
log::error!("Unable to connect to {host}: {e}");
return None;
}
Ok(stream) => {
if stream.writable().await.is_err() {
log::error!("Unable to write to {host}");
return None;
}
return Some(stream);
}
}
}
None
}

View File

@ -0,0 +1,16 @@
use std::sync::RwLock;
use once_cell::sync::Lazy;
use tokio::sync::mpsc::Sender;
use crate::transport_data::StatsSubmission;
use super::{queue::enqueue_if_allowed, comm_channel::SenderChannelMessage};
pub(crate) static CURRENT_STATS: Lazy<RwLock<Option<StatsSubmission>>> = Lazy::new(|| RwLock::new(None));
pub(crate) async fn new_submission(data: StatsSubmission, comm_tx: Sender<SenderChannelMessage>) {
*CURRENT_STATS.write().unwrap() = Some(data.clone());
enqueue_if_allowed(data, comm_tx).await;
}
pub fn get_current_stats() -> Option<StatsSubmission> {
CURRENT_STATS.read().unwrap().clone()
}

View File

@ -0,0 +1,86 @@
use crate::transport_data::{ask_license_server, LicenseReply};
use lqos_config::EtcLqos;
use lqos_utils::unix_time::unix_now;
use once_cell::sync::Lazy;
use tokio::sync::RwLock;
#[derive(Default, Clone)]
struct LicenseStatus {
key: String,
state: LicenseState,
last_check: u64,
}
#[derive(Default, Clone, PartialEq, Debug)]
pub(crate) enum LicenseState {
#[default]
Unknown,
Denied,
Valid {
/// When does the license expire?
expiry: u64,
/// Host to which to send stats
stats_host: String,
},
}
static LICENSE_STATUS: Lazy<RwLock<LicenseStatus>> =
Lazy::new(|| RwLock::new(LicenseStatus::default()));
pub(crate) async fn get_license_status() -> LicenseState {
if let Ok(unix_time) = unix_now() {
let license_status = {
LICENSE_STATUS.read().await.clone()
};
if license_status.state == LicenseState::Unknown || license_status.last_check < unix_time - (60 * 60) {
return check_license(unix_time).await;
}
return license_status.state;
}
LicenseState::Unknown
}
const MISERLY_NO_KEY: &str = "IDontSupportDevelopersAndShouldFeelBad";
async fn check_license(unix_time: u64) -> LicenseState {
if let Ok(cfg) = EtcLqos::load() {
if let Some(cfg) = cfg.long_term_stats {
if let Some(key) = cfg.license_key {
if key == MISERLY_NO_KEY {
log::warn!("You are using the self-hosting license key. We'd be happy to sell you a real one.");
return LicenseState::Valid { expiry: 0, stats_host: "192.168.100.11:9127".to_string() }
}
let mut lock = LICENSE_STATUS.write().await;
lock.last_check = unix_time;
lock.key = key.clone();
match ask_license_server(key.clone()).await {
Ok(state) => {
match state {
LicenseReply::Denied => {
log::warn!("License is in state: DENIED.");
lock.state = LicenseState::Denied;
}
LicenseReply::Valid{expiry, stats_host} => {
log::info!("License is in state: VALID.");
lock.state = LicenseState::Valid{
expiry, stats_host
};
}
_ => {
log::warn!("Unexpected type of data received. Denying to be safe.");
lock.state = LicenseState::Denied;
}
}
return lock.state.clone();
}
Err(e) => {
log::error!("Error checking licensing server");
log::error!("{e:?}");
}
}
}
}
}
LicenseState::Unknown
}

View File

@ -0,0 +1,7 @@
mod current;
mod licensing;
mod queue;
pub(crate) mod comm_channel;
pub(crate) use current::new_submission;
pub(crate) use queue::enqueue_shaped_devices_if_allowed;
pub use current::get_current_stats;

View File

@ -0,0 +1,96 @@
use lqos_config::ShapedDevice;
use once_cell::sync::Lazy;
use thiserror::Error;
use tokio::{sync::{Mutex, mpsc::Sender}, net::TcpStream, io::AsyncWriteExt};
use crate::transport_data::{StatsSubmission, LtsCommand};
use super::{licensing::{LicenseState, get_license_status}, comm_channel::{SenderChannelMessage, encode_submission}};
pub(crate) async fn enqueue_if_allowed(data: StatsSubmission, comm_tx: Sender<SenderChannelMessage>) {
let license = get_license_status().await;
match license {
LicenseState::Unknown => {
log::info!("Temporary error finding license status. Will retry.");
}
LicenseState::Denied => {
log::error!("Your license is invalid. Please contact support.");
}
LicenseState::Valid{ .. } => {
QUEUE.push(LtsCommand::Submit(Box::new(data))).await;
let _ = comm_tx.send(SenderChannelMessage::QueueReady).await;
}
}
}
pub(crate) async fn enqueue_shaped_devices_if_allowed(devices: Vec<ShapedDevice>, comm_tx: Sender<SenderChannelMessage>) {
let license = get_license_status().await;
match license {
LicenseState::Unknown => {
log::info!("Temporary error finding license status. Will retry.");
}
LicenseState::Denied => {
log::error!("Your license is invalid. Please contact support.");
}
LicenseState::Valid{ .. } => {
QUEUE.push(LtsCommand::Devices(devices)).await;
let _ = comm_tx.send(SenderChannelMessage::QueueReady).await;
}
}
}
static QUEUE: Lazy<Queue> = Lazy::new(Queue::new);
pub(crate) struct QueueSubmission {
pub(crate) attempts: u8,
pub(crate) body: LtsCommand,
pub(crate) sent: bool,
}
pub(crate) struct Queue {
queue: Mutex<Vec<QueueSubmission>>,
}
impl Queue {
fn new() -> Self {
Self {
queue: Mutex::new(Vec::new()),
}
}
pub async fn push(&self, data: LtsCommand) {
{
let mut lock = self.queue.lock().await;
lock.push(QueueSubmission {
attempts: 0,
sent: false,
body: data,
});
}
}
}
pub(crate) async fn send_queue(stream: &mut TcpStream) -> Result<(), QueueError> {
let mut lock = QUEUE.queue.lock().await;
for message in lock.iter_mut() {
let submission_buffer = encode_submission(&message.body).await?;
let ret = stream.write(&submission_buffer).await;
log::info!("Sent submission: {} bytes.", submission_buffer.len());
if ret.is_err() {
log::error!("Unable to write to TCP stream.");
log::error!("{:?}", ret);
message.sent = false;
return Err(QueueError::SendFail);
} else {
message.sent = true;
}
}
lock.retain(|s| !s.sent);
lock.retain(|s| s.attempts < 200);
Ok(())
}
#[derive(Error, Debug)]
pub(crate) enum QueueError {
#[error("Unable to send")]
SendFail,
}

View File

@ -0,0 +1,74 @@
//! Data-types used for license key exchange and lookup.
use serde::{Serialize, Deserialize};
use dryoc::dryocbox::PublicKey;
use thiserror::Error;
/// Network-transmitted query to ask the status of a license
/// key.
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub enum LicenseRequest {
/// Check the validity of a key
LicenseCheck {
/// The Key to Check
key: String,
},
/// Exchange Keys
KeyExchange {
/// The node ID of the requesting shaper node
node_id: String,
/// The pretty name of the requesting shaper node
node_name: String,
/// The license key of the requesting shaper node
license_key: String,
/// The sodium-style public key of the requesting shaper node
public_key: PublicKey,
},
}
/// License server responses for a key
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub enum LicenseReply {
/// The license is denied
Denied,
/// The license is valid
Valid {
/// When does the license expire?
expiry: u64,
/// Address to which statistics should be submitted
stats_host: String,
},
/// Key Exchange
MyPublicKey {
/// The server's public key
public_key: PublicKey,
},
}
/// Errors that can occur when checking licenses
#[derive(Debug, Error)]
pub enum LicenseCheckError {
/// Serialization error
#[error("Unable to serialize license check")]
SerializeFail,
/// Network error
#[error("Unable to send license check")]
SendFail,
/// Network error
#[error("Unable to receive license result")]
ReceiveFail,
/// Deserialization error
#[error("Unable to deserialize license result")]
DeserializeFail,
}
/// Stores a license id and node id for transport
#[derive(Debug, Serialize, Deserialize)]
pub struct NodeIdAndLicense {
/// The node id
pub node_id: String,
/// The license key
pub license_key: String,
/// The Sodium Nonce
pub nonce: [u8; 24],
}

View File

@ -0,0 +1,173 @@
//! Functions for talking to the license server
//!
//! License requests use the following format:
//! `u16` containing the version number (currently 1), in big-endian (network order)
//! `u64` containing the size of the payload, in big-endian (network order)
//! `payload` containing the actual payload. The payload is a CBOR-encoded.
//!
//! License requests are not expected to be frequent, and the connection is
//! not reused. We use a simple framing protocol, and terminate the connection
//! after use.
use super::{LicenseCheckError, LicenseRequest, LicenseReply, LICENSE_SERVER};
use dryoc::dryocbox::PublicKey;
use tokio::{net::TcpStream, io::{AsyncReadExt, AsyncWriteExt}};
fn build_license_request(key: String) -> Result<Vec<u8>, LicenseCheckError> {
let mut result = Vec::new();
let payload = serde_cbor::to_vec(&LicenseRequest::LicenseCheck { key });
if let Err(e) = payload {
log::warn!("Unable to serialize statistics. Not sending them.");
log::warn!("{e:?}");
return Err(LicenseCheckError::SerializeFail);
}
let payload = payload.unwrap();
// Store the version as network order
result.extend(1u16.to_be_bytes());
// Store the payload size as network order
result.extend((payload.len() as u64).to_be_bytes());
// Store the payload itself
result.extend(payload);
Ok(result)
}
fn build_key_exchange_request(
node_id: String,
node_name: String,
license_key: String,
public_key: PublicKey,
) -> Result<Vec<u8>, LicenseCheckError> {
let mut result = Vec::new();
let payload = serde_cbor::to_vec(&LicenseRequest::KeyExchange {
node_id,
node_name,
license_key,
public_key,
});
if let Err(e) = payload {
log::warn!("Unable to serialize statistics. Not sending them.");
log::warn!("{e:?}");
return Err(LicenseCheckError::SerializeFail);
}
let payload = payload.unwrap();
// Store the version as network order
result.extend(1u16.to_be_bytes());
// Store the payload size as network order
result.extend((payload.len() as u64).to_be_bytes());
// Store the payload itself
result.extend(payload);
Ok(result)
}
/// Ask the license server if the license is valid
///
/// # Arguments
///
/// * `key` - The license key to check
pub async fn ask_license_server(key: String) -> Result<LicenseReply, LicenseCheckError> {
if let Ok(buffer) = build_license_request(key) {
let stream = TcpStream::connect(LICENSE_SERVER).await;
if let Err(e) = &stream {
if e.kind() == std::io::ErrorKind::NotFound {
log::error!("Unable to access {LICENSE_SERVER}. Check that lqosd is running and you have appropriate permissions.");
return Err(LicenseCheckError::SendFail);
}
}
let stream = stream;
match stream {
Ok(mut stream) => {
let ret = stream.write(&buffer).await;
if ret.is_err() {
log::error!("Unable to write to {LICENSE_SERVER} stream.");
log::error!("{:?}", ret);
return Err(LicenseCheckError::SendFail);
}
let mut buf = Vec::with_capacity(10240);
let ret = stream.read_to_end(&mut buf).await;
if ret.is_err() {
log::error!("Unable to read from {LICENSE_SERVER} stream.");
log::error!("{:?}", ret);
return Err(LicenseCheckError::SendFail);
}
decode_response(&buf)
}
Err(e) => {
log::warn!("TCP stream failed to connect: {:?}", e);
Err(LicenseCheckError::ReceiveFail)
}
}
} else {
Err(LicenseCheckError::SerializeFail)
}
}
/// Ask the license server for the public key
pub async fn exchange_keys_with_license_server(
node_id: String,
node_name: String,
license_key: String,
public_key: PublicKey,
) -> Result<LicenseReply, LicenseCheckError> {
if let Ok(buffer) = build_key_exchange_request(node_id, node_name, license_key, public_key) {
let stream = TcpStream::connect(LICENSE_SERVER).await;
if let Err(e) = &stream {
if e.kind() == std::io::ErrorKind::NotFound {
log::error!("Unable to access {LICENSE_SERVER}. Check that lqosd is running and you have appropriate permissions.");
return Err(LicenseCheckError::SendFail);
}
}
let mut stream = stream.unwrap(); // This unwrap is safe, we checked that it exists previously
let ret = stream.write(&buffer).await;
if ret.is_err() {
log::error!("Unable to write to {LICENSE_SERVER} stream.");
log::error!("{:?}", ret);
return Err(LicenseCheckError::SendFail);
}
let mut buf = Vec::with_capacity(10240);
let ret = stream.read_to_end(&mut buf).await;
if ret.is_err() {
log::error!("Unable to read from {LICENSE_SERVER} stream.");
log::error!("{:?}", ret);
return Err(LicenseCheckError::SendFail);
}
decode_response(&buf)
} else {
Err(LicenseCheckError::SerializeFail)
}
}
fn decode_response(buf: &[u8]) -> Result<LicenseReply, LicenseCheckError> {
const U64SIZE: usize = std::mem::size_of::<u64>();
let version_buf = &buf[0..2]
.try_into()
.map_err(|_| LicenseCheckError::DeserializeFail)?;
let version = u16::from_be_bytes(*version_buf);
let size_buf = &buf[2..2 + U64SIZE]
.try_into()
.map_err(|_| LicenseCheckError::DeserializeFail)?;
let size = u64::from_be_bytes(*size_buf);
if version != 1 {
log::error!("License server returned an unknown version: {}", version);
return Err(LicenseCheckError::DeserializeFail);
}
let start = 2 + U64SIZE;
let end = start + size as usize;
let payload: Result<LicenseReply, _> = serde_cbor::from_slice(&buf[start..end]);
match payload {
Ok(payload) => Ok(payload),
Err(e) => {
log::error!("Unable to deserialize license result");
log::error!("{e:?}");
Err(LicenseCheckError::DeserializeFail)
}
}
}

View File

@ -0,0 +1,15 @@
//! Holds data-types and utility functions for the long-term
//! statistics retention system.
//!
//! This is in the bus so that it can be readily shared between
//! server and client code.
mod submissions;
mod license_types;
mod license_utils;
pub use submissions::*;
pub use license_types::*;
pub use license_utils::*;
pub(crate) const LICENSE_SERVER: &str = "license.libreqos.io:9126";

View File

@ -0,0 +1,208 @@
//! Holds data-types to be submitted as part of long-term stats
//! collection.
use lqos_config::ShapedDevice;
use serde::{Serialize, Deserialize};
use uisp::Device;
/// Type that provides a minimum, maximum and average value
/// for a given statistic within the associated time period.
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct StatsSummary {
/// Minimum value
pub min: (u64, u64),
/// Maximum value
pub max: (u64, u64),
/// Average value
pub avg: (u64, u64),
}
/// Type that provides a minimum, maximum and average value
/// for a given RTT value within the associated time period.
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct StatsRttSummary {
/// Minimum value
pub min: u32,
/// Maximum value
pub max: u32,
/// Average value
pub avg: u32,
}
/// Type that holds total traffic statistics for a given time period
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct StatsTotals {
/// Total number of packets
pub packets: StatsSummary,
/// Total number of bits
pub bits: StatsSummary,
/// Total number of shaped bits
pub shaped_bits: StatsSummary,
}
/// Type that holds per-host statistics for a given stats collation
/// period.
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct StatsHost {
/// Host circuit_id as it appears in ShapedDevices.csv
pub circuit_id: Option<String>,
/// Host's IP address
pub ip_address: String,
/// Host's traffic statistics
pub bits: StatsSummary,
/// Host's RTT statistics
pub rtt: StatsRttSummary,
}
/// Node inside a traffic summary tree
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct StatsTreeNode {
/// Index in the tree vector
pub index: usize,
/// Name (from network.json)
pub name: String,
/// Maximum allowed throughput (from network.json)
pub max_throughput: (u32, u32),
/// Current throughput (from network.json)
pub current_throughput: StatsSummary,
/// RTT summaries
pub rtt: StatsRttSummary,
/// Indices of parents in the tree
pub parents: Vec<usize>,
/// Index of immediate parent in the tree
pub immediate_parent: Option<usize>,
/// Node Type
pub node_type: Option<String>,
}
/// Collation of all stats for a given time period
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct StatsSubmission {
/// Timestamp of the collation (UNIX time)
pub timestamp: u64,
/// Total traffic statistics
pub totals: Option<StatsTotals>,
/// Per-host statistics
pub hosts: Option<Vec<StatsHost>>,
/// Tree of traffic summaries
pub tree: Option<Vec<StatsTreeNode>>,
/// CPU utiliation on the shaper
pub cpu_usage: Option<Vec<u32>>,
/// RAM utilization on the shaper
pub ram_percent: Option<u32>,
/// UISP Device Information
pub uisp_devices: Option<Vec<UispExtDevice>>,
}
/// Submission to the `lts_node` process
#[derive(Serialize, Deserialize, Clone, Debug)]
pub enum LtsCommand {
Submit(Box<StatsSubmission>),
Devices(Vec<ShapedDevice>),
}
/// Extended data provided from UISP
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct UispExtDevice {
pub device_id: String,
pub name: String,
pub model: String,
pub firmware: String,
pub status: String,
pub frequency: f64,
pub channel_width: i32,
pub tx_power: i32,
pub rx_signal: i32,
pub downlink_capacity_mbps: i32,
pub uplink_capacity_mbps: i32,
pub noise_floor: i32,
pub mode: String,
pub interfaces: Vec<UispExtDeviceInterface>,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct UispExtDeviceInterface {
pub name: String,
pub mac: String,
pub ip: Vec<String>,
pub status: String,
pub speed: String,
}
impl From<Device> for UispExtDevice {
fn from(d: Device) -> Self {
let device_id = d.identification.id.to_string();
let device_name = d.get_name().as_ref().unwrap_or(&"".to_string()).to_string();
let model = d.identification.modelName.as_ref().unwrap_or(&"".to_string()).to_string();
let firmware = d.identification.firmwareVersion.as_ref().unwrap_or(&"".to_string()).to_string();
let mode = d.mode.as_ref().unwrap_or(&"".to_string()).to_string();
let status;
let frequency;
let channel_width;
let tx_power;
let rx_signal;
let downlink_capacity_mbps;
let uplink_capacity_mbps;
if let Some(ov) = &d.overview {
status = ov.status.as_ref().unwrap_or(&"".to_string()).to_string();
frequency = ov.frequency.unwrap_or(0.0);
channel_width = ov.channelWidth.unwrap_or(0);
tx_power = ov.transmitPower.unwrap_or(0);
rx_signal = ov.signal.unwrap_or(0);
downlink_capacity_mbps = ov.downlinkCapacity.unwrap_or(0);
uplink_capacity_mbps = ov.uplinkCapacity.unwrap_or(0);
} else {
status = "".to_string();
frequency = 0.0;
channel_width = 0;
tx_power = 0;
rx_signal = 0;
downlink_capacity_mbps = 0;
uplink_capacity_mbps = 0;
}
let mut noise_floor = 0;
let mut iflist = Vec::new();
if let Some(interfaces) = &d.interfaces {
interfaces.iter().for_each(|i| {
if let (Some(id), Some(status), Some(wireless), Some(addr)) = (&i.identification, &i.status, &i.wireless, &i.addresses) {
if let Some(nf) = wireless.noiseFloor {
noise_floor = nf;
}
let mut ip = Vec::new();
addr.iter().for_each(|a| {
if let Some(ipaddr) = &a.cidr {
ip.push(ipaddr.to_string());
}
});
iflist.push(UispExtDeviceInterface {
name: id.name.as_ref().unwrap_or(&"".to_string()).to_string(),
mac: id.mac.as_ref().unwrap_or(&"".to_string()).to_string(),
status: status.status.as_ref().unwrap_or(&"".to_string()).to_string(),
speed: status.speed.as_ref().unwrap_or(&"".to_string()).to_string(),
ip,
});
}
});
}
Self {
device_id,
name: device_name,
model,
firmware,
status,
frequency,
channel_width,
tx_power,
rx_signal,
downlink_capacity_mbps,
uplink_capacity_mbps,
noise_floor,
mode,
interfaces: iflist,
}
}
}

View File

@ -0,0 +1,20 @@
[package]
name = "lts_ingestor"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
pgdb = { path = "../pgdb" }
lts_client = { path = "../lts_client" }
lqos_config = { path = "../../lqos_config" }
tokio = { version = "1.25.0", features = ["full"] }
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
anyhow = "1"
influxdb2 = "0"
influxdb2-structmap = "0"
futures = "0"
once_cell = "1"
miniz_oxide = "0.7.1"

Binary file not shown.

View File

@ -0,0 +1,33 @@
use tracing::{error, info};
mod submissions;
mod pki;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
// install global collector configured based on RUST_LOG env var.
tracing_subscriber::fmt::init();
// Get the database connection pool
let pool = pgdb::get_connection_pool(5).await;
if pool.is_err() {
error!("Unable to connect to the database");
error!("{pool:?}");
return Err(anyhow::Error::msg("Unable to connect to the database"));
}
let pool = pool.unwrap();
// Start the submission queue
let submission_sender = {
info!("Starting the submission queue");
submissions::submissions_queue(pool.clone()).await?
};
// Start the submissions serer
info!("Starting the submissions server");
if let Err(e) = tokio::spawn(submissions::submissions_server(pool.clone(), submission_sender)).await {
error!("Server exited with error: {}", e);
}
Ok(())
}

View File

@ -0,0 +1,6 @@
use std::sync::RwLock;
use once_cell::sync::Lazy;
use lts_client::{pki::generate_new_keypair, dryoc::dryocbox::KeyPair};
pub(crate) static LIBREQOS_KEYPAIR: Lazy<RwLock<KeyPair>> = Lazy::new(|| RwLock::new(generate_new_keypair(KEY_PATH)));
const KEY_PATH: &str = "lqkeys.bin"; // Store in the working directory

View File

@ -0,0 +1,5 @@
mod submission_server;
mod submission_queue;
pub use submission_server::submissions_server;
pub use submission_queue::submissions_queue;
pub use submission_queue::get_org_details;

View File

@ -0,0 +1,85 @@
use lqos_config::ShapedDevice;
use pgdb::{OrganizationDetails, sqlx::{Pool, Postgres}};
use tracing::{warn, error};
pub async fn ingest_shaped_devices(
cnn: Pool<Postgres>,
org: &OrganizationDetails,
node_id: &str,
devices: &[ShapedDevice],
) -> anyhow::Result<()> {
let mut trans = cnn.begin().await?;
// Clear existing data from shaped devices
pgdb::sqlx::query("DELETE FROM shaped_devices WHERE key=$1 AND node_id=$2")
.bind(org.key.to_string())
.bind(node_id)
.execute(&mut trans)
.await?;
// Clear existing data from shaped devices IP lists
pgdb::sqlx::query("DELETE FROM shaped_device_ip WHERE key=$1 AND node_id=$2")
.bind(org.key.to_string())
.bind(node_id)
.execute(&mut trans)
.await?;
const SQL_INSERT: &str = "INSERT INTO shaped_devices
(key, node_id, circuit_id, device_id, circuit_name, device_name, parent_node, mac, download_min_mbps, upload_min_mbps, download_max_mbps, upload_max_mbps, comment)
VALUES
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)";
const SQL_IP_INSERT: &str = "INSERT INTO public.shaped_device_ip
(key, node_id, circuit_id, ip_range, subnet)
VALUES
($1, $2, $3, $4, $5)
ON CONFLICT (key, node_id, circuit_id, ip_range, subnet) DO NOTHING;";
for device in devices.iter() {
pgdb::sqlx::query(SQL_INSERT)
.bind(org.key.to_string())
.bind(node_id)
.bind(device.circuit_id.clone())
.bind(device.device_id.clone())
.bind(device.circuit_name.clone())
.bind(device.device_name.clone())
.bind(device.parent_node.clone())
.bind(device.mac.clone())
.bind(device.download_min_mbps as i32)
.bind(device.upload_min_mbps as i32)
.bind(device.download_max_mbps as i32)
.bind(device.upload_max_mbps as i32)
.bind(device.comment.clone())
.execute(&mut trans)
.await?;
for ip in device.ipv4.iter() {
pgdb::sqlx::query(SQL_IP_INSERT)
.bind(org.key.to_string())
.bind(node_id)
.bind(device.circuit_id.clone())
.bind(ip.0.to_string())
.bind(ip.1 as i32)
.execute(&mut trans)
.await?;
}
for ip in device.ipv6.iter() {
pgdb::sqlx::query(SQL_IP_INSERT)
.bind(org.key.to_string())
.bind(node_id)
.bind(device.circuit_id.clone())
.bind(ip.0.to_string())
.bind(ip.1 as i32)
.execute(&mut trans)
.await?;
}
}
let result = trans.commit().await;
warn!("Transaction committed");
if let Err(e) = result {
error!("Error committing transaction: {}", e);
}
Ok(())
}

View File

@ -0,0 +1,83 @@
use futures::prelude::*;
use influxdb2::models::DataPoint;
use influxdb2::Client;
use lts_client::transport_data::StatsTotals;
use pgdb::OrganizationDetails;
pub async fn collect_host_totals(
org: &OrganizationDetails,
node_id: &str,
timestamp: i64,
totals: &Option<StatsTotals>,
) -> anyhow::Result<()> {
if let Some(totals) = totals {
let influx_url = format!("http://{}:8086", org.influx_host);
let client = Client::new(&influx_url, &org.influx_org, &org.influx_token);
let points = vec![
DataPoint::builder("packets")
.tag("host_id", node_id.to_string())
.tag("organization_id", org.key.to_string())
.tag("direction", "down".to_string())
.timestamp(timestamp)
.field("min", totals.packets.min.0 as i64)
.field("max", totals.packets.max.0 as i64)
.field("avg", totals.packets.avg.0 as i64)
.build()?,
DataPoint::builder("packets")
.tag("host_id", node_id.to_string())
.tag("organization_id", org.key.to_string())
.tag("direction", "up".to_string())
.timestamp(timestamp)
.field("min", totals.packets.min.1 as i64)
.field("max", totals.packets.max.1 as i64)
.field("avg", totals.packets.avg.1 as i64)
.build()?,
DataPoint::builder("bits")
.tag("host_id", node_id.to_string())
.tag("organization_id", org.key.to_string())
.tag("direction", "down".to_string())
.timestamp(timestamp)
.field("min", totals.bits.min.0 as i64)
.field("max", totals.bits.max.0 as i64)
.field("avg", totals.bits.avg.0 as i64)
.build()?,
DataPoint::builder("bits")
.tag("host_id", node_id.to_string())
.tag("organization_id", org.key.to_string())
.tag("direction", "up".to_string())
.timestamp(timestamp)
.field("min", totals.bits.min.1 as i64)
.field("max", totals.bits.max.1 as i64)
.field("avg", totals.bits.avg.1 as i64)
.build()?,
DataPoint::builder("shaped_bits")
.tag("host_id", node_id.to_string())
.tag("organization_id", org.key.to_string())
.tag("direction", "down".to_string())
.timestamp(timestamp)
.field("min", totals.shaped_bits.min.0 as i64)
.field("max", totals.shaped_bits.max.0 as i64)
.field("avg", totals.shaped_bits.avg.0 as i64)
.build()?,
DataPoint::builder("shaped_bits")
.tag("host_id", node_id.to_string())
.tag("organization_id", org.key.to_string())
.tag("direction", "up".to_string())
.timestamp(timestamp)
.field("min", totals.shaped_bits.min.1 as i64)
.field("max", totals.shaped_bits.max.1 as i64)
.field("avg", totals.shaped_bits.avg.1 as i64)
.build()?,
];
//client.write(&org.influx_bucket, stream::iter(points)).await?;
client
.write_with_precision(
&org.influx_bucket,
stream::iter(points),
influxdb2::api::write::TimestampPrecision::Seconds,
)
.await?;
}
Ok(())
}

View File

@ -0,0 +1,10 @@
mod queue;
mod devices;
mod host_totals;
mod organization_cache;
mod per_host;
mod tree;
mod node_perf;
mod uisp_devices;
pub use queue::{submissions_queue, SubmissionType};
pub use organization_cache::get_org_details;

View File

@ -0,0 +1,35 @@
use futures::prelude::*;
use influxdb2::{models::DataPoint, Client};
use pgdb::OrganizationDetails;
pub async fn collect_node_perf(
org: &OrganizationDetails,
node_id: &str,
timestamp: i64,
cpu: &Option<Vec<u32>>,
ram: &Option<u32>,
) -> anyhow::Result<()> {
if let (Some(cpu), Some(ram)) = (cpu, ram) {
let influx_url = format!("http://{}:8086", org.influx_host);
let client = Client::new(&influx_url, &org.influx_org, &org.influx_token);
let cpu_sum = cpu.iter().sum::<u32>();
let cpu_avg = cpu_sum / cpu.len() as u32;
let cpu_max = *cpu.iter().max().unwrap();
let points = vec![DataPoint::builder("perf")
.tag("host_id", node_id.to_string())
.tag("organization_id", org.key.to_string())
.timestamp(timestamp)
.field("ram", *ram as i64)
.field("cpu", cpu_avg as i64)
.field("cpu_max", cpu_max as i64)
.build()?];
client
.write_with_precision(
&org.influx_bucket,
stream::iter(points),
influxdb2::api::write::TimestampPrecision::Seconds,
)
.await?;
}
Ok(())
}

View File

@ -0,0 +1,25 @@
use std::collections::HashMap;
use once_cell::sync::Lazy;
use pgdb::{OrganizationDetails, sqlx::{Pool, Postgres}};
use tokio::sync::RwLock;
static ORG_CACHE: Lazy<RwLock<HashMap<String, OrganizationDetails>>> = Lazy::new(|| {
RwLock::new(HashMap::new())
});
pub async fn get_org_details(cnn: &Pool<Postgres>, key: &str) -> Option<OrganizationDetails> {
{ // Safety scope - lock is dropped on exit
let cache = ORG_CACHE.read().await;
if let Some(org) = cache.get(key) {
return Some(org.clone());
}
}
// We can be certain that we don't have a dangling lock now.
// Upgrade to a write lock and try to fetch the org details.
let mut cache = ORG_CACHE.write().await;
if let Ok(org) = pgdb::get_organization(cnn, key).await {
cache.insert(key.to_string(), org.clone());
return Some(org);
}
None
}

View File

@ -0,0 +1,68 @@
use influxdb2::{Client, models::DataPoint};
use lts_client::transport_data::StatsHost;
use pgdb::OrganizationDetails;
use futures::prelude::*;
use tracing::info;
pub async fn collect_per_host(
org: &OrganizationDetails,
node_id: &str,
timestamp: i64,
totals: &Option<Vec<StatsHost>>,
) -> anyhow::Result<()> {
if let Some(hosts) = totals {
let influx_url = format!("http://{}:8086", org.influx_host);
let client = Client::new(&influx_url, &org.influx_org, &org.influx_token);
let mut points: Vec<DataPoint> = Vec::new();
info!("Received per-host stats, {} hosts", hosts.len());
for host in hosts.iter() {
let circuit_id = if let Some(cid) = &host.circuit_id {
cid.clone()
} else {
"unknown".to_string()
};
points.push(DataPoint::builder("host_bits")
.tag("host_id", node_id.to_string())
.tag("organization_id", org.key.to_string())
.tag("direction", "down".to_string())
.tag("circuit_id", &circuit_id)
.tag("ip", host.ip_address.to_string())
.timestamp(timestamp)
.field("min", host.bits.min.0 as i64)
.field("max", host.bits.max.0 as i64)
.field("avg", host.bits.avg.0 as i64)
.build()?);
points.push(DataPoint::builder("host_bits")
.tag("host_id", node_id.to_string())
.tag("organization_id", org.key.to_string())
.tag("direction", "up".to_string())
.tag("circuit_id", &circuit_id)
.tag("ip", host.ip_address.to_string())
.timestamp(timestamp)
.field("min", host.bits.min.1 as i64)
.field("max", host.bits.max.1 as i64)
.field("avg", host.bits.avg.1 as i64)
.build()?);
points.push(DataPoint::builder("rtt")
.tag("host_id", node_id.to_string())
.tag("organization_id", org.key.to_string())
.tag("circuit_id", &circuit_id)
.tag("ip", host.ip_address.to_string())
.timestamp(timestamp)
.field("min", host.rtt.avg as f64 / 100.0)
.field("max", host.rtt.max as f64 / 100.0)
.field("avg", host.rtt.avg as f64 / 100.0)
.build()?);
}
client
.write_with_precision(
&org.influx_bucket,
stream::iter(points),
influxdb2::api::write::TimestampPrecision::Seconds,
)
.await?;
}
Ok(())
}

View File

@ -0,0 +1,91 @@
//! Provides a queue of submissions to be processed by the long-term storage.
//! This is a "fan in" pattern: multi-producer, single-consumer messages
//! send data into the queue, which is managed by a single consumer
//! thread. The consumer thread spawns tokio tasks to actually
//! perform the processing.
use crate::submissions::submission_queue::{
devices::ingest_shaped_devices, host_totals::collect_host_totals, node_perf::collect_node_perf,
organization_cache::get_org_details, tree::collect_tree, per_host::collect_per_host, uisp_devices::collect_uisp_devices,
};
use lts_client::transport_data::{LtsCommand, NodeIdAndLicense};
use pgdb::sqlx::{Pool, Postgres};
use tokio::sync::mpsc::{Receiver, Sender};
use tracing::{info, error, warn};
const SUBMISSION_QUEUE_SIZE: usize = 100;
pub type SubmissionType = (NodeIdAndLicense, LtsCommand);
pub async fn submissions_queue(cnn: Pool<Postgres>) -> anyhow::Result<Sender<SubmissionType>> {
// Create a channel to send data to the consumer thread
let (tx, rx) = tokio::sync::mpsc::channel::<SubmissionType>(SUBMISSION_QUEUE_SIZE);
tokio::spawn(run_queue(cnn, rx)); // Note that'we *moving* rx into the spawned task
Ok(tx)
}
async fn run_queue(cnn: Pool<Postgres>, mut rx: Receiver<SubmissionType>) -> anyhow::Result<()> {
while let Some(message) = rx.recv().await {
info!("Received a message from the submission queue");
let (node_id, command) = message;
tokio::spawn(ingest_stats(cnn.clone(), node_id, command));
}
Ok(())
}
//#[tracing::instrument]
async fn ingest_stats(
cnn: Pool<Postgres>,
node_id: NodeIdAndLicense,
command: LtsCommand,
) -> anyhow::Result<()> {
info!("Ingesting stats for node {}", node_id.node_id);
if let Some(org) = get_org_details(&cnn, &node_id.license_key).await {
//println!("{:?}", command);
match command {
LtsCommand::Devices(devices) => {
info!("Ingesting Shaped Devices");
update_last_seen(cnn.clone(), &node_id).await;
if let Err(e) = ingest_shaped_devices(cnn, &org, &node_id.node_id, &devices).await {
error!("Error ingesting shaped devices: {}", e);
}
}
LtsCommand::Submit(stats) => {
//println!("Submission: {:?}", submission);
info!("Ingesting statistics dump");
let ts = stats.timestamp as i64;
let _ = tokio::join!(
update_last_seen(cnn.clone(), &node_id),
collect_host_totals(&org, &node_id.node_id, ts, &stats.totals),
collect_node_perf(
&org,
&node_id.node_id,
ts,
&stats.cpu_usage,
&stats.ram_percent
),
collect_tree(cnn.clone(), &org, &node_id.node_id, ts, &stats.tree),
collect_per_host(&org, &node_id.node_id, ts, &stats.hosts),
collect_uisp_devices(cnn.clone(), &org, &stats.uisp_devices, ts),
);
}
}
} else {
warn!(
"Unable to find organization for license {}",
node_id.license_key
);
}
Ok(())
}
async fn update_last_seen(cnn: Pool<Postgres>, details: &NodeIdAndLicense) {
let res = pgdb::new_stats_arrived(cnn, &details.license_key, &details.node_id).await;
if res.is_err() {
error!(
"Unable to update last seen for node {}: {}",
details.node_id,
res.unwrap_err()
);
}
}

View File

@ -0,0 +1,105 @@
use futures::prelude::*;
use influxdb2::{models::DataPoint, Client};
use lts_client::transport_data::StatsTreeNode;
use pgdb::{
sqlx::{Pool, Postgres},
OrganizationDetails,
};
use tracing::{info, error};
const SQL: &str = "INSERT INTO site_tree (key, host_id, site_name, index, parent, site_type, max_up, max_down, current_up, current_down, current_rtt) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) ON CONFLICT (key, host_id, site_name) DO NOTHING";
pub async fn collect_tree(
cnn: Pool<Postgres>,
org: &OrganizationDetails,
node_id: &str,
timestamp: i64,
totals: &Option<Vec<StatsTreeNode>>,
) -> anyhow::Result<()> {
if let Some(tree) = totals {
//println!("{tree:?}");
let influx_url = format!("http://{}:8086", org.influx_host);
let client = Client::new(&influx_url, &org.influx_org, &org.influx_token);
let mut points: Vec<DataPoint> = Vec::new();
let mut trans = cnn.begin().await?;
pgdb::sqlx::query("DELETE FROM site_tree WHERE key=$1 AND host_id=$2")
.bind(org.key.to_string())
.bind(node_id)
.execute(&mut trans)
.await?;
for node in tree.iter() {
points.push(
DataPoint::builder("tree")
.tag("host_id", node_id.to_string())
.tag("organization_id", org.key.to_string())
.tag("node_name", node.name.to_string())
.tag("direction", "down".to_string())
.timestamp(timestamp)
.field("bits_min", node.current_throughput.min.0 as i64)
.field("bits_max", node.current_throughput.max.0 as i64)
.field("bits_avg", node.current_throughput.avg.0 as i64)
.build()?,
);
points.push(
DataPoint::builder("tree")
.tag("host_id", node_id.to_string())
.tag("organization_id", org.key.to_string())
.tag("node_name", node.name.to_string())
.tag("direction", "up".to_string())
.timestamp(timestamp)
.field("bits_min", node.current_throughput.min.1 as i64)
.field("bits_max", node.current_throughput.max.1 as i64)
.field("bits_avg", node.current_throughput.avg.1 as i64)
.build()?,
);
points.push(
DataPoint::builder("tree")
.tag("host_id", node_id.to_string())
.tag("organization_id", org.key.to_string())
.tag("node_name", node.name.to_string())
.timestamp(timestamp)
.field("rtt_min", node.rtt.min as i64 / 100)
.field("rtt_max", node.rtt.max as i64 / 100)
.field("rtt_avg", node.rtt.avg as i64 / 100)
.build()?,
);
let result = pgdb::sqlx::query(SQL)
.bind(org.key.to_string())
.bind(node_id)
.bind(&node.name)
.bind(node.index as i32)
.bind(node.immediate_parent.unwrap_or(0) as i32)
.bind(node.node_type.as_ref().unwrap_or(&String::new()).clone())
.bind(node.max_throughput.1 as i64)
.bind(node.max_throughput.0 as i64)
.bind(node.current_throughput.max.1 as i64)
.bind(node.current_throughput.max.0 as i64)
.bind(node.rtt.avg as i64)
.execute(&mut trans)
.await;
if let Err(e) = result {
error!("Error inserting tree node: {}", e);
}
}
let result = trans.commit().await;
info!("Transaction committed");
if let Err(e) = result {
error!("Error committing transaction: {}", e);
}
client
.write_with_precision(
&org.influx_bucket,
stream::iter(points),
influxdb2::api::write::TimestampPrecision::Seconds,
)
.await?;
}
Ok(())
}

View File

@ -0,0 +1,115 @@
use futures::prelude::*;
use influxdb2::{models::DataPoint, Client};
use lts_client::transport_data::UispExtDevice;
use pgdb::{
sqlx::{Pool, Postgres},
OrganizationDetails,
};
pub async fn collect_uisp_devices(
cnn: Pool<Postgres>,
org: &OrganizationDetails,
devices: &Option<Vec<UispExtDevice>>,
ts: i64,
) {
let (sql, influx) = tokio::join!(uisp_sql(cnn, org, devices), uisp_influx(org, devices, ts),);
if let Err(e) = sql {
tracing::error!("Error writing uisp sql: {:?}", e);
}
if let Err(e) = influx {
tracing::error!("Error writing uisp influx: {:?}", e);
}
}
async fn uisp_sql(
cnn: Pool<Postgres>,
org: &OrganizationDetails,
devices: &Option<Vec<UispExtDevice>>,
) -> anyhow::Result<()> {
if let Some(devices) = devices {
let mut trans = cnn.begin().await.unwrap();
// Handle the SQL portion (things that don't need to be graphed, just displayed)
pgdb::sqlx::query("DELETE FROM uisp_devices_ext WHERE key=$1")
.bind(org.key.to_string())
.execute(&mut trans)
.await?;
pgdb::sqlx::query("DELETE FROM uisp_devices_interfaces WHERE key=$1")
.bind(org.key.to_string())
.execute(&mut trans)
.await?;
for device in devices.iter() {
pgdb::sqlx::query("INSERT INTO uisp_devices_ext (key, device_id, name, model, firmware, status, mode) VALUES ($1, $2, $3, $4, $5, $6, $7)")
.bind(org.key.to_string())
.bind(&device.device_id)
.bind(&device.name)
.bind(&device.model)
.bind(&device.firmware)
.bind(&device.status)
.bind(&device.mode)
.execute(&mut trans)
.await?;
for interface in device.interfaces.iter() {
let mut ip_list = String::new();
for ip in interface.ip.iter() {
ip_list.push_str(&format!("{} ", ip));
}
pgdb::sqlx::query("INSERT INTO uisp_devices_interfaces (key, device_id, name, mac, status, speed, ip_list) VALUES ($1, $2, $3, $4, $5, $6, $7)")
.bind(org.key.to_string())
.bind(&device.device_id)
.bind(&interface.name)
.bind(&interface.mac)
.bind(&interface.status)
.bind(&interface.speed)
.bind(ip_list)
.execute(&mut trans)
.await?;
}
}
trans.commit().await?;
}
Ok(())
}
async fn uisp_influx(
org: &OrganizationDetails,
devices: &Option<Vec<UispExtDevice>>,
timestamp: i64,
) -> anyhow::Result<()> {
if let Some(devices) = devices {
let influx_url = format!("http://{}:8086", org.influx_host);
let client = Client::new(&influx_url, &org.influx_org, &org.influx_token);
let mut points: Vec<DataPoint> = Vec::new();
for device in devices.iter() {
points.push(
DataPoint::builder("device_ext")
.tag("device_id", &device.device_id)
.tag("organization_id", org.key.to_string())
.tag("direction", "down".to_string())
.timestamp(timestamp)
.field("rx_signal", device.rx_signal as i64)
.field("noise_floor", device.noise_floor as i64)
.field("dl_capacity", device.downlink_capacity_mbps as i64)
.field("ul_capacity", device.uplink_capacity_mbps as i64)
.build()?,
);
}
client
.write_with_precision(
&org.influx_bucket,
stream::iter(points),
influxdb2::api::write::TimestampPrecision::Seconds,
)
.await?;
}
Ok(())
}

View File

@ -0,0 +1,102 @@
//! Provides a TCP handler server, listening on port 9128. Connections
//! are expected in the encrypted LTS format (see the `lq_bus` crate).
//! If everything checks out, they are sent to the submission queue
//! for storage.
use super::submission_queue::SubmissionType;
use crate::pki::LIBREQOS_KEYPAIR;
use lts_client::{
dryoc::dryocbox::{DryocBox, PublicKey},
transport_data::{LtsCommand, NodeIdAndLicense},
};
use pgdb::sqlx::{Pool, Postgres};
use tokio::{io::AsyncReadExt, net::{TcpListener, TcpStream}, spawn, sync::mpsc::Sender};
use tracing::{info, error, warn};
/// Starts the submission server, listening on port 9128.
/// The server runs in the background.
pub async fn submissions_server(
cnn: Pool<Postgres>,
sender: Sender<SubmissionType>,
) -> anyhow::Result<()> {
let listener = TcpListener::bind(":::9128").await?;
info!("Listening for stats submissions on :::9128");
loop {
let (mut socket, address) = listener.accept().await?;
info!("Connection from {address:?}");
let pool = cnn.clone();
let my_sender = sender.clone();
spawn(async move {
loop {
if let Ok(message) = read_message(&mut socket, pool.clone()).await {
my_sender.send(message).await.unwrap();
} else {
error!("Read failed. Dropping socket.");
std::mem::drop(socket);
break;
}
}
});
}
}
#[tracing::instrument]
async fn read_message(socket: &mut TcpStream, pool: Pool<Postgres>) -> anyhow::Result<SubmissionType> {
read_version(socket).await?;
let header_size = read_size(socket).await?;
let header = read_header(socket, header_size as usize).await?;
let body_size = read_size(socket).await?;
let message = read_body(socket, pool.clone(), body_size as usize, &header).await?;
Ok((header, message))
}
async fn read_version(stream: &mut TcpStream) -> anyhow::Result<()> {
let version = stream.read_u16().await?;
if version != 1 {
warn!("Received a version {version} header.");
return Err(anyhow::Error::msg("Received an unknown version header"));
}
Ok(())
}
async fn read_size(stream: &mut TcpStream) -> anyhow::Result<u64> {
let size = stream.read_u64().await?;
Ok(size)
}
async fn read_header(stream: &mut TcpStream, size: usize) -> anyhow::Result<NodeIdAndLicense> {
let mut buffer = vec![0u8; size];
let _bytes_read = stream.read(&mut buffer).await?;
let header: NodeIdAndLicense = lts_client::cbor::from_slice(&buffer)?;
Ok(header)
}
async fn read_body(stream: &mut TcpStream, pool: Pool<Postgres>, size: usize, header: &NodeIdAndLicense) -> anyhow::Result<LtsCommand> {
info!("Reading body of size {size}");
info!("{header:?}");
let mut buffer = vec![0u8; size];
let bytes_read = stream.read_exact(&mut buffer).await?;
if bytes_read != size {
warn!("Received a body of size {bytes_read}, expected {size}");
return Err(anyhow::Error::msg("Received a body of unexpected size"));
}
// Check the header against the database and retrieve the current
// public key
let public_key = pgdb::fetch_public_key(pool, &header.license_key, &header.node_id).await?;
let public_key: PublicKey = lts_client::cbor::from_slice(&public_key)?;
let private_key = LIBREQOS_KEYPAIR.read().unwrap().secret_key.clone();
// Decrypt
let dryocbox = DryocBox::from_bytes(&buffer).expect("failed to read box");
let decrypted = dryocbox
.decrypt_to_vec(&header.nonce.into(), &public_key, &private_key)?;
let decrypted = miniz_oxide::inflate::decompress_to_vec(&decrypted).expect("failed to decompress");
// Try to deserialize
let payload = lts_client::cbor::from_slice(&decrypted)?;
Ok(payload)
}

View File

@ -0,0 +1,28 @@
[package]
name = "lts_node"
version = "0.1.0"
edition = "2021"
license = "GPL-2.0-only"
[dependencies]
tokio = { version = "1.25.0", features = ["full"] }
anyhow = "1"
serde = { version = "1.0", features = ["derive"] }
axum = {version = "0.6", features = ["ws", "headers"] }
lts_client = { path = "../lts_client" }
lqos_config = { path = "../../lqos_config" }
serde_json = "1"
pgdb = { path = "../pgdb" }
once_cell = "1"
influxdb2 = "0"
influxdb2-structmap = "0"
num-traits = "0"
futures = "0"
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
tower = { version = "0.4", features = ["util"] }
tower-http = { version = "0.4.0", features = ["fs", "trace"] }
chrono = "0"
miniz_oxide = "0.7.1"
tokio-util = { version = "0.7.8", features = ["io"] }
wasm_pipe_types = { path = "../wasm_pipe_types" }

View File

@ -0,0 +1,13 @@
#!/bin/bash
pushd ../wasm_pipe
./build.sh
popd
pushd ../site_build
./esbuild.mjs
popd
pushd web
cp ../../site_build/output/* .
cp ../../site_build/src/main.html .
cp ../../site_build/wasm/wasm_pipe_bg.wasm .
popd
RUST_LOG=info RUST_BACKTRACE=1 cargo run --release

View File

@ -0,0 +1,23 @@
mod web;
use tracing::{info, error};
#[tokio::main]
async fn main() -> anyhow::Result<()> {
// install global collector configured based on RUST_LOG env var.
tracing_subscriber::fmt::init();
// Get the database connection pool
let pool = pgdb::get_connection_pool(5).await;
if pool.is_err() {
error!("Unable to connect to the database");
error!("{pool:?}");
return Err(anyhow::Error::msg("Unable to connect to the database"));
}
let pool = pool.unwrap();
// Start the webserver
info!("Starting the webserver");
let _ = tokio::spawn(web::webserver(pool)).await;
Ok(())
}

View File

@ -0,0 +1,92 @@
//! The webserver listens on port 9127, but it is intended that this only
//! listen on localhost and have a reverse proxy in front of it. The proxy
//! should provide HTTPS.
mod wss;
use crate::web::wss::ws_handler;
use axum::body::StreamBody;
use axum::http::{header, HeaderMap};
use axum::response::IntoResponse;
use axum::{response::Html, routing::get, Router};
use pgdb::sqlx::Pool;
use pgdb::sqlx::Postgres;
use tokio_util::io::ReaderStream;
use tower_http::trace::TraceLayer;
use tower_http::trace::DefaultMakeSpan;
const JS_BUNDLE: &str = include_str!("../../web/app.js");
const JS_MAP: &str = include_str!("../../web/app.js.map");
const CSS: &str = include_str!("../../web/style.css");
const CSS_MAP: &str = include_str!("../../web/style.css.map");
const HTML_MAIN: &str = include_str!("../../web/main.html");
const WASM_BODY: &[u8] = include_bytes!("../../web/wasm_pipe_bg.wasm");
pub async fn webserver(cnn: Pool<Postgres>) {
let app = Router::new()
.route("/", get(index_page))
.route("/app.js", get(js_bundle))
.route("/app.js.map", get(js_map))
.route("/style.css", get(css))
.route("/style.css.map", get(css_map))
.route("/ws", get(ws_handler))
.route("/wasm_pipe_bg.wasm", get(wasm_file))
.with_state(cnn)
.layer(
TraceLayer::new_for_http()
.make_span_with(DefaultMakeSpan::default().include_headers(true)),
);
tracing::info!("Listening for web traffic on 0.0.0.0:9127");
axum::Server::bind(&"0.0.0.0:9127".parse().unwrap())
.serve(app.into_make_service())
.await
.unwrap();
}
async fn index_page() -> Html<String> {
Html(HTML_MAIN.to_string())
}
async fn js_bundle() -> axum::response::Response<String> {
axum::response::Response::builder()
.header("Content-Type", "text/javascript")
.body(JS_BUNDLE.to_string())
.unwrap()
}
async fn js_map() -> axum::response::Response<String> {
axum::response::Response::builder()
.header("Content-Type", "text/json")
.body(JS_MAP.to_string())
.unwrap()
}
async fn css() -> axum::response::Response<String> {
axum::response::Response::builder()
.header("Content-Type", "text/css")
.body(CSS.to_string())
.unwrap()
}
async fn css_map() -> axum::response::Response<String> {
axum::response::Response::builder()
.header("Content-Type", "text/json")
.body(CSS_MAP.to_string())
.unwrap()
}
async fn wasm_file() -> impl IntoResponse {
let mut headers = HeaderMap::new();
headers.insert(
header::CONTENT_TYPE,
header::HeaderValue::from_static("application/wasm"),
);
headers.insert(
header::CONTENT_DISPOSITION,
header::HeaderValue::from_static("attachment; filename=wasm_pipe_bg.wasm"),
);
axum::response::Response::builder()
.header(header::CONTENT_TYPE, header::HeaderValue::from_static("application/wasm"))
.header(header::CONTENT_DISPOSITION, header::HeaderValue::from_static("attachment; filename=wasm_pipe_bg.wasm"))
.body(StreamBody::new(ReaderStream::new(WASM_BODY)))
.unwrap()
}

View File

@ -0,0 +1,57 @@
use axum::extract::ws::WebSocket;
use pgdb::sqlx::{Pool, Postgres};
use serde::Serialize;
use wasm_pipe_types::WasmResponse;
use super::send_response;
#[derive(Debug, Serialize)]
pub struct LoginResult {
pub msg: String,
pub token: String,
pub name: String,
pub license_key: String,
}
pub async fn on_login(license: &str, username: &str, password: &str, socket: &mut WebSocket, cnn: Pool<Postgres>) -> Option<LoginResult> {
let login = pgdb::try_login(cnn, license, username, password).await;
if let Ok(login) = login {
let lr = WasmResponse::LoginOk {
token: login.token.clone(),
name: login.name.clone(),
license_key: license.to_string(),
};
send_response(socket, lr).await;
return Some(LoginResult {
msg: "Login Ok".to_string(),
token: login.token.to_string(),
name: login.name.to_string(),
license_key: license.to_string(),
});
} else {
let lr = WasmResponse::LoginFail;
send_response(socket, lr).await;
}
None
}
pub async fn on_token_auth(token_id: &str, socket: &mut WebSocket, cnn: Pool<Postgres>) -> Option<LoginResult> {
let login = pgdb::token_to_credentials(cnn, token_id).await;
if let Ok(login) = login {
let lr = WasmResponse::AuthOk {
token: login.token.clone(),
name: login.name.clone(),
license_key: login.license.clone(),
};
send_response(socket, lr).await;
return Some(LoginResult {
msg: "Login Ok".to_string(),
token: login.token.to_string(),
name: login.name.to_string(),
license_key: login.license.to_string(),
});
} else {
send_response(socket, WasmResponse::AuthFail).await;
}
None
}

View File

@ -0,0 +1,316 @@
use crate::web::wss::{
nodes::node_status,
queries::{
ext_device::{
send_extended_device_capacity_graph, send_extended_device_info,
send_extended_device_snr_graph,
},
omnisearch, root_heat_map, send_circuit_info, send_packets_for_all_nodes,
send_packets_for_node, send_perf_for_node, send_rtt_for_all_nodes,
send_rtt_for_all_nodes_circuit, send_rtt_for_all_nodes_site, send_rtt_for_node,
send_site_info, send_site_parents, send_site_stack_map, send_throughput_for_all_nodes,
send_throughput_for_all_nodes_by_circuit, send_throughput_for_all_nodes_by_site,
send_throughput_for_node, site_heat_map,
site_tree::send_site_tree,
time_period::InfluxTimePeriod,
send_circuit_parents, send_root_parents,
},
};
use axum::{
extract::{
ws::{Message, WebSocket, WebSocketUpgrade},
State,
},
response::IntoResponse,
};
use pgdb::sqlx::{Pool, Postgres};
use wasm_pipe_types::{WasmRequest, WasmResponse};
mod login;
mod nodes;
mod queries;
pub async fn ws_handler(
ws: WebSocketUpgrade,
State(state): State<Pool<Postgres>>,
) -> impl IntoResponse {
ws.on_upgrade(move |sock| handle_socket(sock, state))
}
async fn handle_socket(mut socket: WebSocket, cnn: Pool<Postgres>) {
tracing::info!("WebSocket Connected");
let mut credentials: Option<login::LoginResult> = None;
while let Some(msg) = socket.recv().await {
let cnn = cnn.clone();
let msg = msg.unwrap();
// Get the binary message and decompress it
tracing::info!("Received a message: {:?}", msg);
let raw = msg.into_data();
let uncompressed = miniz_oxide::inflate::decompress_to_vec(&raw).unwrap();
let msg = lts_client::cbor::from_slice::<WasmRequest>(&uncompressed).unwrap();
tracing::info!("{msg:?}");
// Update the token credentials (if there are any)
if let Some(credentials) = &credentials {
let _ = pgdb::refresh_token(cnn.clone(), &credentials.token).await;
}
// Handle the message by type
let matcher = (&msg, &mut credentials);
let wss = &mut socket;
match matcher {
// Handle login with just a token
(WasmRequest::Auth { token }, _) => {
let result = login::on_token_auth(token, &mut socket, cnn).await;
if let Some(result) = result {
credentials = Some(result);
}
}
// Handle login with a username and password
(
WasmRequest::Login {
license,
username,
password,
},
_,
) => {
let result = login::on_login(license, username, password, &mut socket, cnn).await;
if let Some(result) = result {
credentials = Some(result);
}
}
// Node status for dashboard
(WasmRequest::GetNodeStatus, Some(credentials)) => {
node_status(&cnn, wss, &credentials.license_key).await;
}
// Packet chart for dashboard
(WasmRequest::PacketChart { period }, Some(credentials)) => {
let _ =
send_packets_for_all_nodes(&cnn, wss, &credentials.license_key, period.into())
.await;
}
// Packet chart for individual node
(
WasmRequest::PacketChartSingle {
period,
node_id,
node_name,
},
Some(credentials),
) => {
let _ = send_packets_for_node(
&cnn,
wss,
&credentials.license_key,
period.into(),
node_id,
node_name,
)
.await;
}
// Throughput chart for the dashboard
(WasmRequest::ThroughputChart { period }, Some(credentials)) => {
let _ = send_throughput_for_all_nodes(
&cnn,
wss,
&credentials.license_key,
InfluxTimePeriod::new(period),
)
.await;
}
// Throughput chart for a single shaper node
(
WasmRequest::ThroughputChartSingle {
period,
node_id,
node_name,
},
Some(credentials),
) => {
let _ = send_throughput_for_node(
&cnn,
wss,
&credentials.license_key,
InfluxTimePeriod::new(period),
node_id.to_string(),
node_name.to_string(),
)
.await;
}
(WasmRequest::ThroughputChartSite { period, site_id }, Some(credentials)) => {
let _ = send_throughput_for_all_nodes_by_site(
&cnn,
wss,
&credentials.license_key,
site_id.to_string(),
InfluxTimePeriod::new(period),
)
.await;
}
(WasmRequest::ThroughputChartCircuit { period, circuit_id }, Some(credentials)) => {
let _ = send_throughput_for_all_nodes_by_circuit(
&cnn,
wss,
&credentials.license_key,
circuit_id.to_string(),
InfluxTimePeriod::new(period),
)
.await;
}
// Rtt Chart
(WasmRequest::RttChart { period }, Some(credentials)) => {
let _ = send_rtt_for_all_nodes(
&cnn,
wss,
&credentials.license_key,
InfluxTimePeriod::new(period),
)
.await;
}
(WasmRequest::RttChartSite { period, site_id }, Some(credentials)) => {
let _ = send_rtt_for_all_nodes_site(
&cnn,
wss,
&credentials.license_key,
site_id.to_string(),
InfluxTimePeriod::new(period),
)
.await;
}
(
WasmRequest::RttChartSingle {
period,
node_id,
node_name,
},
Some(credentials),
) => {
let _ = send_rtt_for_node(
&cnn,
wss,
&credentials.license_key,
InfluxTimePeriod::new(period),
node_id.to_string(),
node_name.to_string(),
)
.await;
}
(WasmRequest::RttChartCircuit { period, circuit_id }, Some(credentials)) => {
let _ = send_rtt_for_all_nodes_circuit(
&cnn,
wss,
&credentials.license_key,
circuit_id.to_string(),
InfluxTimePeriod::new(period),
)
.await;
}
// Site Stack
(WasmRequest::SiteStack { period, site_id }, Some(credentials)) => {
let _ = send_site_stack_map(
&cnn,
wss,
&credentials.license_key,
InfluxTimePeriod::new(period),
site_id.to_string(),
)
.await;
}
(WasmRequest::RootHeat { period }, Some(credentials)) => {
let _ = root_heat_map(
&cnn,
wss,
&credentials.license_key,
InfluxTimePeriod::new(period),
)
.await;
}
(WasmRequest::SiteHeat { period, site_id }, Some(credentials)) => {
let _ = site_heat_map(
&cnn,
wss,
&credentials.license_key,
site_id,
InfluxTimePeriod::new(period),
)
.await;
}
(
WasmRequest::NodePerfChart {
period,
node_id,
node_name,
},
Some(credentials),
) => {
let _ = send_perf_for_node(
&cnn,
wss,
&credentials.license_key,
InfluxTimePeriod::new(period),
node_id.to_string(),
node_name.to_string(),
)
.await;
}
(WasmRequest::Tree { parent }, Some(credentials)) => {
send_site_tree(&cnn, wss, &credentials.license_key, parent).await;
}
(WasmRequest::SiteInfo { site_id }, Some(credentials)) => {
send_site_info(&cnn, wss, &credentials.license_key, site_id).await;
}
(WasmRequest::SiteParents { site_id }, Some(credentials)) => {
send_site_parents(&cnn, wss, &credentials.license_key, site_id).await;
}
(WasmRequest::CircuitParents { circuit_id }, Some(credentials)) => {
send_circuit_parents(&cnn, wss, &credentials.license_key, circuit_id).await;
}
(WasmRequest::RootParents, Some(credentials)) => {
send_root_parents(&cnn, wss, &credentials.license_key).await;
}
(WasmRequest::Search { term }, Some(credentials)) => {
let _ = omnisearch(&cnn, wss, &credentials.license_key, term).await;
}
(WasmRequest::CircuitInfo { circuit_id }, Some(credentials)) => {
send_circuit_info(&cnn, wss, &credentials.license_key, circuit_id).await;
}
(WasmRequest::ExtendedDeviceInfo { circuit_id }, Some(credentials)) => {
send_extended_device_info(&cnn, wss, &credentials.license_key, circuit_id).await;
}
(WasmRequest::SignalNoiseChartExt { period, device_id }, Some(credentials)) => {
let _ = send_extended_device_snr_graph(
&cnn,
wss,
&credentials.license_key,
device_id,
InfluxTimePeriod::new(period),
)
.await;
}
(WasmRequest::DeviceCapacityChartExt { period, device_id }, Some(credentials)) => {
let _ = send_extended_device_capacity_graph(
&cnn,
wss,
&credentials.license_key,
device_id,
InfluxTimePeriod::new(period),
)
.await;
}
(_, None) => {
tracing::error!("No credentials");
}
}
}
}
fn serialize_response(response: WasmResponse) -> Vec<u8> {
let cbor = lts_client::cbor::to_vec(&response).unwrap();
miniz_oxide::deflate::compress_to_vec(&cbor, 8)
}
pub async fn send_response(socket: &mut WebSocket, response: WasmResponse) {
let serialized = serialize_response(response);
socket.send(Message::Binary(serialized)).await.unwrap();
}

View File

@ -0,0 +1,27 @@
use axum::extract::ws::WebSocket;
use pgdb::sqlx::{Pool, Postgres};
use wasm_pipe_types::Node;
use crate::web::wss::send_response;
fn convert(ns: pgdb::NodeStatus) -> Node {
Node {
node_id: ns.node_id,
node_name: ns.node_name,
last_seen: ns.last_seen,
}
}
pub async fn node_status(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str) {
tracing::info!("Fetching node status, {key}");
let nodes = pgdb::node_status(cnn, key).await;
match nodes {
Ok(nodes) => {
let nodes: Vec<Node> = nodes.into_iter().map(convert).collect();
send_response(socket, wasm_pipe_types::WasmResponse::NodeStatus { nodes }).await;
},
Err(e) => {
tracing::error!("Unable to obtain node status: {}", e);
}
}
}

View File

@ -0,0 +1,29 @@
use axum::extract::ws::WebSocket;
use pgdb::sqlx::{Pool, Postgres};
use wasm_pipe_types::CircuitList;
use crate::web::wss::send_response;
fn from(circuit: pgdb::CircuitInfo) -> CircuitList {
CircuitList {
circuit_name: circuit.circuit_name,
device_id: circuit.device_id,
device_name: circuit.device_name,
parent_node: circuit.parent_node,
mac: circuit.mac,
download_min_mbps: circuit.download_min_mbps,
download_max_mbps: circuit.download_max_mbps,
upload_min_mbps: circuit.upload_min_mbps,
upload_max_mbps: circuit.upload_max_mbps,
comment: circuit.comment,
ip_range: circuit.ip_range,
subnet: circuit.subnet,
}
}
pub async fn send_circuit_info(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str, circuit_id: &str) {
if let Ok(hosts) = pgdb::get_circuit_info(cnn, key, circuit_id).await {
let hosts = hosts.into_iter().map(from).collect::<Vec<_>>();
send_response(socket, wasm_pipe_types::WasmResponse::CircuitInfo { data: hosts }).await;
}
}

View File

@ -0,0 +1,166 @@
use std::collections::HashSet;
use axum::extract::ws::WebSocket;
use chrono::{DateTime, FixedOffset};
use influxdb2::{FromDataPoint, models::Query, Client};
use pgdb::{sqlx::{Pool, Postgres}, organization_cache::get_org_details};
use crate::web::wss::send_response;
use super::time_period::InfluxTimePeriod;
pub async fn send_extended_device_info(
cnn: &Pool<Postgres>,
socket: &mut WebSocket,
key: &str,
circuit_id: &str,
) {
// Get devices for circuit
if let Ok(hosts_list) = pgdb::get_circuit_info(cnn, key, circuit_id).await {
// Get the hosts known to be in this circuit
let mut hosts = HashSet::new();
hosts_list.into_iter().for_each(|h| {
hosts.insert(h.device_id);
});
if hosts.is_empty() {
return;
}
println!("{hosts:?}");
// Get extended data
let mut extended_data = Vec::new();
for host in hosts.iter() {
let ext = pgdb::get_device_info_ext(cnn, key, host).await;
if let Ok(ext) = ext {
let mut ext_wasm = wasm_pipe_types::ExtendedDeviceInfo {
device_id: ext.device_id.clone(),
name: ext.name.clone(),
model: ext.model.clone(),
firmware: ext.firmware.clone(),
status: ext.status.clone(),
mode: ext.mode.clone(),
channel_width: ext.channel_width,
tx_power: ext.tx_power,
interfaces: Vec::new(),
};
if let Ok(interfaces) = pgdb::get_device_interfaces_ext(cnn, key, host).await {
for ed in interfaces {
let edw = wasm_pipe_types::ExtendedDeviceInterface {
name: ed.name,
mac: ed.mac,
status: ed.status,
speed: ed.speed,
ip_list: ed.ip_list,
};
ext_wasm.interfaces.push(edw);
}
}
extended_data.push(ext_wasm);
} else {
tracing::error!("Error getting extended device info: {:?}", ext);
}
}
// If there is any, send it
println!("{extended_data:?}");
if !extended_data.is_empty() {
send_response(socket, wasm_pipe_types::WasmResponse::DeviceExt { data: extended_data }).await;
}
}
}
pub async fn send_extended_device_snr_graph(
cnn: &Pool<Postgres>,
socket: &mut WebSocket,
key: &str,
device_id: &str,
period: InfluxTimePeriod,
) -> anyhow::Result<()> {
if let Some(org) = get_org_details(cnn, key).await {
let influx_url = format!("http://{}:8086", org.influx_host);
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
let qs = format!(
"from(bucket: \"{}\")
|> {}
|> filter(fn: (r) => r[\"_measurement\"] == \"device_ext\")
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|> filter(fn: (r) => r[\"device_id\"] == \"{}\")
|> filter(fn: (r) => r[\"_field\"] == \"noise_floor\" or r[\"_field\"] == \"rx_signal\")
|> {}
|> yield(name: \"last\")",
org.influx_bucket, period.range(), org.key, device_id, period.aggregate_window()
);
//println!("{qs}");
let query = Query::new(qs);
let rows = client.query::<SnrRow>(Some(query)).await?;
let mut sn = Vec::new();
rows.iter().for_each(|row| {
let snr = wasm_pipe_types::SignalNoiseChartExt {
noise: row.noise_floor,
signal: row.rx_signal,
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
};
sn.push(snr);
});
send_response(socket, wasm_pipe_types::WasmResponse::DeviceExtSnr { data: sn, device_id: device_id.to_string() }).await;
}
Ok(())
}
#[derive(Debug, FromDataPoint, Default)]
pub struct SnrRow {
pub device_id: String,
pub noise_floor: f64,
pub rx_signal: f64,
pub time: DateTime<FixedOffset>,
}
pub async fn send_extended_device_capacity_graph(
cnn: &Pool<Postgres>,
socket: &mut WebSocket,
key: &str,
device_id: &str,
period: InfluxTimePeriod,
) -> anyhow::Result<()> {
if let Some(org) = get_org_details(cnn, key).await {
let influx_url = format!("http://{}:8086", org.influx_host);
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
let qs = format!(
"from(bucket: \"{}\")
|> {}
|> filter(fn: (r) => r[\"_measurement\"] == \"device_ext\")
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|> filter(fn: (r) => r[\"device_id\"] == \"{}\")
|> filter(fn: (r) => r[\"_field\"] == \"dl_capacity\" or r[\"_field\"] == \"ul_capacity\")
|> {}
|> yield(name: \"last\")",
org.influx_bucket, period.range(), org.key, device_id, period.aggregate_window()
);
//println!("{qs}");
let query = Query::new(qs);
let rows = client.query::<CapacityRow>(Some(query)).await?;
let mut sn = Vec::new();
rows.iter().for_each(|row| {
let snr = wasm_pipe_types::CapacityChartExt {
dl: row.dl_capacity,
ul: row.ul_capacity,
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
};
sn.push(snr);
});
send_response(socket, wasm_pipe_types::WasmResponse::DeviceExtCapacity { data: sn, device_id: device_id.to_string() }).await;
}
Ok(())
}
#[derive(Debug, FromDataPoint, Default)]
pub struct CapacityRow {
pub device_id: String,
pub dl_capacity: f64,
pub ul_capacity: f64,
pub time: DateTime<FixedOffset>,
}

View File

@ -0,0 +1,28 @@
//! Provides pre-packaged queries for obtaining data, that will
//! then be used by the web server to respond to requests.
mod circuit_info;
mod node_perf;
mod packet_counts;
mod rtt;
mod search;
mod site_heat_map;
mod site_info;
mod site_parents;
pub mod site_tree;
mod throughput;
pub mod ext_device;
pub mod time_period;
pub use circuit_info::send_circuit_info;
pub use node_perf::send_perf_for_node;
pub use packet_counts::{send_packets_for_all_nodes, send_packets_for_node};
pub use rtt::{send_rtt_for_all_nodes, send_rtt_for_all_nodes_site, send_rtt_for_node, send_rtt_for_all_nodes_circuit};
pub use search::omnisearch;
pub use site_heat_map::{root_heat_map, site_heat_map};
pub use site_info::send_site_info;
pub use site_parents::{send_site_parents, send_circuit_parents, send_root_parents};
pub use throughput::{
send_throughput_for_all_nodes, send_throughput_for_all_nodes_by_circuit,
send_throughput_for_all_nodes_by_site, send_throughput_for_node,
send_site_stack_map,
};

View File

@ -0,0 +1,97 @@
use axum::extract::ws::WebSocket;
use chrono::{DateTime, FixedOffset, Utc};
use influxdb2::{Client, FromDataPoint, models::Query};
use pgdb::{sqlx::{Pool, Postgres}, organization_cache::get_org_details};
use wasm_pipe_types::{PerfHost, Perf};
use crate::web::wss::send_response;
use super::time_period::InfluxTimePeriod;
#[derive(Debug, FromDataPoint)]
pub struct PerfRow {
pub host_id: String,
pub time: DateTime<FixedOffset>,
pub cpu: f64,
pub cpu_max: f64,
pub ram: f64,
}
impl Default for PerfRow {
fn default() -> Self {
Self {
host_id: "".to_string(),
time: DateTime::<Utc>::MIN_UTC.into(),
cpu: 0.0,
cpu_max: 0.0,
ram: 0.0,
}
}
}
pub async fn send_perf_for_node(
cnn: &Pool<Postgres>,
socket: &mut WebSocket,
key: &str,
period: InfluxTimePeriod,
node_id: String,
node_name: String,
) -> anyhow::Result<()> {
let node = get_perf_for_node(cnn, key, node_id, node_name, period).await?;
send_response(socket, wasm_pipe_types::WasmResponse::NodePerfChart { nodes: vec![node] }).await;
Ok(())
}
pub async fn get_perf_for_node(
cnn: &Pool<Postgres>,
key: &str,
node_id: String,
node_name: String,
period: InfluxTimePeriod,
) -> anyhow::Result<PerfHost> {
if let Some(org) = get_org_details(cnn, key).await {
let influx_url = format!("http://{}:8086", org.influx_host);
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
let qs = format!(
"from(bucket: \"{}\")
|> {}
|> filter(fn: (r) => r[\"_measurement\"] == \"perf\")
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|> filter(fn: (r) => r[\"host_id\"] == \"{}\")
|> {}
|> yield(name: \"last\")",
org.influx_bucket, period.range(), org.key, node_id, period.aggregate_window()
);
let query = Query::new(qs);
let rows = client.query::<PerfRow>(Some(query)).await;
match rows {
Err(e) => {
tracing::error!("Error querying InfluxDB (node-perf): {}", e);
return Err(anyhow::Error::msg("Unable to query influx"));
}
Ok(rows) => {
// Parse and send the data
//println!("{rows:?}");
let mut stats = Vec::new();
// Fill download
for row in rows.iter() {
stats.push(Perf {
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
cpu: row.cpu,
cpu_max: row.cpu_max,
ram: row.ram,
});
}
return Ok(PerfHost{
node_id,
node_name,
stats,
});
}
}
}
Err(anyhow::Error::msg("Unable to query influx"))
}

View File

@ -0,0 +1,144 @@
//! Packet-per-second data queries
mod packet_row;
use self::packet_row::PacketRow;
use super::time_period::InfluxTimePeriod;
use crate::web::wss::send_response;
use axum::extract::ws::WebSocket;
use futures::future::join_all;
use influxdb2::{models::Query, Client};
use pgdb::{sqlx::{Pool, Postgres}, organization_cache::get_org_details};
use wasm_pipe_types::{PacketHost, Packets};
pub async fn send_packets_for_all_nodes(
cnn: &Pool<Postgres>,
socket: &mut WebSocket,
key: &str,
period: InfluxTimePeriod,
) -> anyhow::Result<()> {
let nodes = get_packets_for_all_nodes(cnn, key, period).await?;
send_response(socket, wasm_pipe_types::WasmResponse::PacketChart { nodes }).await;
Ok(())
}
pub async fn send_packets_for_node(
cnn: &Pool<Postgres>,
socket: &mut WebSocket,
key: &str,
period: InfluxTimePeriod,
node_id: &str,
node_name: &str,
) -> anyhow::Result<()> {
let node =
get_packets_for_node(cnn, key, node_id.to_string(), node_name.to_string(), period).await?;
send_response(
socket,
wasm_pipe_types::WasmResponse::PacketChart { nodes: vec![node] },
)
.await;
Ok(())
}
/// Requests packet-per-second data for all shaper nodes for a given organization
///
/// # Arguments
/// * `cnn` - A connection pool to the database
/// * `key` - The organization's license key
pub async fn get_packets_for_all_nodes(
cnn: &Pool<Postgres>,
key: &str,
period: InfluxTimePeriod,
) -> anyhow::Result<Vec<PacketHost>> {
let node_status = pgdb::node_status(cnn, key).await?;
let mut futures = Vec::new();
for node in node_status {
futures.push(get_packets_for_node(
cnn,
key,
node.node_id.to_string(),
node.node_name.to_string(),
period.clone(),
));
}
let all_nodes: anyhow::Result<Vec<PacketHost>> = join_all(futures).await.into_iter().collect();
all_nodes
}
/// Requests packet-per-second data for a single shaper node.
///
/// # Arguments
/// * `cnn` - A connection pool to the database
/// * `key` - The organization's license key
/// * `node_id` - The ID of the node to query
/// * `node_name` - The name of the node to query
pub async fn get_packets_for_node(
cnn: &Pool<Postgres>,
key: &str,
node_id: String,
node_name: String,
period: InfluxTimePeriod,
) -> anyhow::Result<PacketHost> {
if let Some(org) = get_org_details(cnn, key).await {
let influx_url = format!("http://{}:8086", org.influx_host);
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
let qs = format!(
"from(bucket: \"{}\")
|> {}
|> filter(fn: (r) => r[\"_measurement\"] == \"packets\")
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|> filter(fn: (r) => r[\"host_id\"] == \"{}\")
|> {}
|> yield(name: \"last\")",
org.influx_bucket,
period.range(),
org.key,
node_id,
period.aggregate_window()
);
let query = Query::new(qs);
let rows = client.query::<PacketRow>(Some(query)).await;
match rows {
Err(e) => {
tracing::error!("Error querying InfluxDB (packets by node): {}", e);
return Err(anyhow::Error::msg("Unable to query influx"));
}
Ok(rows) => {
// Parse and send the data
//println!("{rows:?}");
let mut down = Vec::new();
let mut up = Vec::new();
// Fill download
for row in rows.iter().filter(|r| r.direction == "down") {
down.push(Packets {
value: row.avg,
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
l: row.min,
u: row.max - row.min,
});
}
// Fill upload
for row in rows.iter().filter(|r| r.direction == "up") {
up.push(Packets {
value: row.avg,
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
l: row.min,
u: row.max - row.min,
});
}
return Ok(PacketHost {
node_id,
node_name,
down,
up,
});
}
}
}
Err(anyhow::Error::msg("Unable to query influx"))
}

View File

@ -0,0 +1,25 @@
use chrono::{DateTime, FixedOffset, Utc};
use influxdb2::FromDataPoint;
#[derive(Debug, FromDataPoint)]
pub struct PacketRow {
pub direction: String,
pub host_id: String,
pub min: f64,
pub max: f64,
pub avg: f64,
pub time: DateTime<FixedOffset>,
}
impl Default for PacketRow {
fn default() -> Self {
Self {
direction: "".to_string(),
host_id: "".to_string(),
min: 0.0,
max: 0.0,
avg: 0.0,
time: DateTime::<Utc>::MIN_UTC.into(),
}
}
}

View File

@ -0,0 +1,314 @@
use axum::extract::ws::WebSocket;
use futures::future::join_all;
use influxdb2::{Client, models::Query};
use pgdb::{sqlx::{Pool, Postgres}, organization_cache::get_org_details};
use wasm_pipe_types::{RttHost, Rtt};
use crate::{web::wss::{queries::rtt::rtt_row::RttCircuitRow, send_response}};
use self::rtt_row::{RttRow, RttSiteRow};
use super::time_period::InfluxTimePeriod;
mod rtt_row;
pub async fn send_rtt_for_all_nodes(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str, period: InfluxTimePeriod) -> anyhow::Result<()> {
let nodes = get_rtt_for_all_nodes(cnn, key, period).await?;
let mut histogram = vec![0; 20];
for node in nodes.iter() {
for rtt in node.rtt.iter() {
let bucket = usize::min(19, (rtt.value / 10.0) as usize);
histogram[bucket] += 1;
}
}
let nodes = vec![RttHost { node_id: "".to_string(), node_name: "".to_string(), rtt: rtt_bucket_merge(&nodes) }];
send_response(socket, wasm_pipe_types::WasmResponse::RttChart { nodes, histogram }).await;
Ok(())
}
pub async fn send_rtt_for_all_nodes_site(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str, site_id: String, period: InfluxTimePeriod) -> anyhow::Result<()> {
let nodes = get_rtt_for_all_nodes_site(cnn, key, &site_id, period).await?;
let mut histogram = vec![0; 20];
for node in nodes.iter() {
for rtt in node.rtt.iter() {
let bucket = usize::min(19, (rtt.value / 200.0) as usize);
histogram[bucket] += 1;
}
}
send_response(socket, wasm_pipe_types::WasmResponse::RttChartSite { nodes, histogram }).await;
Ok(())
}
pub async fn send_rtt_for_all_nodes_circuit(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str, site_id: String, period: InfluxTimePeriod) -> anyhow::Result<()> {
let nodes = get_rtt_for_all_nodes_circuit(cnn, key, &site_id, period).await?;
let mut histogram = vec![0; 20];
for node in nodes.iter() {
for rtt in node.rtt.iter() {
let bucket = usize::min(19, (rtt.value / 200.0) as usize);
histogram[bucket] += 1;
}
}
send_response(socket, wasm_pipe_types::WasmResponse::RttChartCircuit { nodes, histogram }).await;
Ok(())
}
pub async fn send_rtt_for_node(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str, period: InfluxTimePeriod, node_id: String, node_name: String) -> anyhow::Result<()> {
let node = get_rtt_for_node(cnn, key, node_id, node_name, period).await?;
let nodes = vec![node];
let mut histogram = vec![0; 20];
for node in nodes.iter() {
for rtt in node.rtt.iter() {
let bucket = usize::min(19, (rtt.value / 200.0) as usize);
histogram[bucket] += 1;
}
}
send_response(socket, wasm_pipe_types::WasmResponse::RttChart { nodes, histogram }).await;
Ok(())
}
fn rtt_bucket_merge(rtt: &[RttHost]) -> Vec<Rtt> {
let mut entries: Vec<Rtt> = Vec::new();
for entry in rtt.iter() {
for entry in entry.rtt.iter() {
if let Some(e) = entries.iter().position(|d| d.date == entry.date) {
entries[e].l = f64::min(entries[e].l, entry.l);
entries[e].u = f64::max(entries[e].u, entry.u);
} else {
entries.push(entry.clone());
}
}
}
return entries;
}
pub async fn get_rtt_for_all_nodes(cnn: &Pool<Postgres>, key: &str, period: InfluxTimePeriod) -> anyhow::Result<Vec<RttHost>> {
let node_status = pgdb::node_status(cnn, key).await?;
let mut futures = Vec::new();
for node in node_status {
futures.push(get_rtt_for_node(
cnn,
key,
node.node_id.to_string(),
node.node_name.to_string(),
period.clone(),
));
}
let all_nodes: anyhow::Result<Vec<RttHost>> = join_all(futures).await
.into_iter().collect();
all_nodes
}
pub async fn get_rtt_for_all_nodes_site(cnn: &Pool<Postgres>, key: &str, site_id: &str, period: InfluxTimePeriod) -> anyhow::Result<Vec<RttHost>> {
let node_status = pgdb::node_status(cnn, key).await?;
let mut futures = Vec::new();
for node in node_status {
futures.push(get_rtt_for_node_site(
cnn,
key,
node.node_id.to_string(),
node.node_name.to_string(),
site_id.to_string(),
period.clone(),
));
}
let all_nodes: anyhow::Result<Vec<RttHost>> = join_all(futures).await
.into_iter().collect();
all_nodes
}
pub async fn get_rtt_for_all_nodes_circuit(cnn: &Pool<Postgres>, key: &str, circuit_id: &str, period: InfluxTimePeriod) -> anyhow::Result<Vec<RttHost>> {
let node_status = pgdb::node_status(cnn, key).await?;
let mut futures = Vec::new();
for node in node_status {
futures.push(get_rtt_for_node_circuit(
cnn,
key,
node.node_id.to_string(),
node.node_name.to_string(),
circuit_id.to_string(),
period.clone(),
));
}
let all_nodes: anyhow::Result<Vec<RttHost>> = join_all(futures).await
.into_iter().collect();
all_nodes
}
pub async fn get_rtt_for_node(
cnn: &Pool<Postgres>,
key: &str,
node_id: String,
node_name: String,
period: InfluxTimePeriod,
) -> anyhow::Result<RttHost> {
if let Some(org) = get_org_details(cnn, key).await {
let influx_url = format!("http://{}:8086", org.influx_host);
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
let qs = format!(
"from(bucket: \"{}\")
|> {}
|> filter(fn: (r) => r[\"_measurement\"] == \"rtt\")
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|> filter(fn: (r) => r[\"host_id\"] == \"{}\")
|> {}
|> yield(name: \"last\")",
org.influx_bucket, period.range(), org.key, node_id, period.aggregate_window()
);
let query = Query::new(qs);
let rows = client.query::<RttRow>(Some(query)).await;
match rows {
Err(e) => {
tracing::error!("Error querying InfluxDB (rtt node): {}", e);
return Err(anyhow::Error::msg("Unable to query influx"));
}
Ok(rows) => {
// Parse and send the data
//println!("{rows:?}");
let mut rtt = Vec::new();
// Fill RTT
for row in rows.iter() {
rtt.push(Rtt {
value: f64::min(200.0, row.avg),
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
l: f64::min(200.0, row.min),
u: f64::min(200.0, row.max) - f64::min(200.0, row.min),
});
}
return Ok(RttHost{
node_id,
node_name,
rtt,
});
}
}
}
Err(anyhow::Error::msg("Unable to query influx"))
}
pub async fn get_rtt_for_node_site(
cnn: &Pool<Postgres>,
key: &str,
node_id: String,
node_name: String,
site_id: String,
period: InfluxTimePeriod,
) -> anyhow::Result<RttHost> {
if let Some(org) = get_org_details(cnn, key).await {
let influx_url = format!("http://{}:8086", org.influx_host);
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
let qs = format!(
"from(bucket: \"{}\")
|> {}
|> filter(fn: (r) => r[\"_measurement\"] == \"tree\")
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|> filter(fn: (r) => r[\"host_id\"] == \"{}\")
|> filter(fn: (r) => r[\"node_name\"] == \"{}\")
|> filter(fn: (r) => r[\"_field\"] == \"rtt_avg\" or r[\"_field\"] == \"rtt_max\" or r[\"_field\"] == \"rtt_min\")
|> {}
|> yield(name: \"last\")",
org.influx_bucket, period.range(), org.key, node_id, site_id, period.aggregate_window()
);
let query = Query::new(qs);
let rows = client.query::<RttSiteRow>(Some(query)).await;
match rows {
Err(e) => {
tracing::error!("Error querying InfluxDB (rtt node site): {}", e);
return Err(anyhow::Error::msg("Unable to query influx"));
}
Ok(rows) => {
// Parse and send the data
//println!("{rows:?}");
let mut rtt = Vec::new();
// Fill download
for row in rows.iter() {
rtt.push(Rtt {
value: row.rtt_avg,
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
l: row.rtt_min,
u: row.rtt_max - row.rtt_min,
});
}
return Ok(RttHost{
node_id,
node_name,
rtt,
});
}
}
}
Err(anyhow::Error::msg("Unable to query influx"))
}
pub async fn get_rtt_for_node_circuit(
cnn: &Pool<Postgres>,
key: &str,
node_id: String,
node_name: String,
circuit_id: String,
period: InfluxTimePeriod,
) -> anyhow::Result<RttHost> {
if let Some(org) = get_org_details(cnn, key).await {
let influx_url = format!("http://{}:8086", org.influx_host);
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
let qs = format!(
"from(bucket: \"{}\")
|> {}
|> filter(fn: (r) => r[\"_measurement\"] == \"rtt\")
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|> filter(fn: (r) => r[\"host_id\"] == \"{}\")
|> filter(fn: (r) => r[\"circuit_id\"] == \"{}\")
|> filter(fn: (r) => r[\"_field\"] == \"avg\" or r[\"_field\"] == \"max\" or r[\"_field\"] == \"min\")
|> {}
|> yield(name: \"last\")",
org.influx_bucket, period.range(), org.key, node_id, circuit_id, period.aggregate_window()
);
//log::warn!("{qs}");
let query = Query::new(qs);
let rows = client.query::<RttCircuitRow>(Some(query)).await;
match rows {
Err(e) => {
tracing::error!("Error querying InfluxDB (rtt_node_circuit): {}", e);
return Err(anyhow::Error::msg("Unable to query influx"));
}
Ok(rows) => {
// Parse and send the data
//println!("{rows:?}");
let mut rtt = Vec::new();
// Fill download
for row in rows.iter() {
rtt.push(Rtt {
value: row.avg,
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
l: row.min,
u: row.max - row.min,
});
}
return Ok(RttHost{
node_id,
node_name,
rtt,
});
}
}
}
Err(anyhow::Error::msg("Unable to query influx"))
}

View File

@ -0,0 +1,65 @@
use chrono::{DateTime, FixedOffset, Utc};
use influxdb2::FromDataPoint;
#[derive(Debug, FromDataPoint)]
pub struct RttRow {
pub host_id: String,
pub min: f64,
pub max: f64,
pub avg: f64,
pub time: DateTime<FixedOffset>,
}
impl Default for RttRow {
fn default() -> Self {
Self {
host_id: "".to_string(),
min: 0.0,
max: 0.0,
avg: 0.0,
time: DateTime::<Utc>::MIN_UTC.into(),
}
}
}
#[derive(Debug, FromDataPoint)]
pub struct RttSiteRow {
pub host_id: String,
pub rtt_min: f64,
pub rtt_max: f64,
pub rtt_avg: f64,
pub time: DateTime<FixedOffset>,
}
impl Default for RttSiteRow {
fn default() -> Self {
Self {
host_id: "".to_string(),
rtt_min: 0.0,
rtt_max: 0.0,
rtt_avg: 0.0,
time: DateTime::<Utc>::MIN_UTC.into(),
}
}
}
#[derive(Debug, FromDataPoint)]
pub struct RttCircuitRow {
pub host_id: String,
pub min: f64,
pub max: f64,
pub avg: f64,
pub time: DateTime<FixedOffset>,
}
impl Default for RttCircuitRow {
fn default() -> Self {
Self {
host_id: "".to_string(),
min: 0.0,
max: 0.0,
avg: 0.0,
time: DateTime::<Utc>::MIN_UTC.into(),
}
}
}

View File

@ -0,0 +1,88 @@
use axum::extract::ws::WebSocket;
use pgdb::sqlx::{Pool, Postgres};
use wasm_pipe_types::SearchResult;
use crate::web::wss::send_response;
pub async fn omnisearch(
cnn: &Pool<Postgres>,
socket: &mut WebSocket,
key: &str,
term: &str,
) -> anyhow::Result<()> {
tracing::warn!("Searching for {term}");
let hits = search_devices(cnn, key, term).await;
if let Err(e) = &hits {
tracing::error!("{e:?}");
}
let mut hits = hits.unwrap();
hits.extend(search_ips(cnn, key, term).await?);
hits.extend(search_sites(cnn, key, term).await?);
hits.sort_by(|a,b| a.name.cmp(&b.name));
hits.dedup_by(|a,b| a.name == b.name && a.url == b.url);
hits.sort_by(|a,b| a.score.partial_cmp(&b.score).unwrap());
send_response(socket, wasm_pipe_types::WasmResponse::SearchResult { hits }).await;
Ok(())
}
async fn search_devices(
cnn: &Pool<Postgres>,
key: &str,
term: &str,
) -> anyhow::Result<Vec<SearchResult>> {
let hits = pgdb::search_devices(cnn, key, term).await?;
Ok(hits
.iter()
.map(|hit| SearchResult {
name: hit.circuit_name.to_string(),
url: format!("circuit:{}", hit.circuit_id),
score: hit.score,
icon: "circuit".to_string(),
})
.collect())
}
async fn search_ips(
cnn: &Pool<Postgres>,
key: &str,
term: &str,
) -> anyhow::Result<Vec<SearchResult>> {
let hits = pgdb::search_ip(cnn, key, term).await?;
Ok(hits
.iter()
.map(|hit| SearchResult {
name: hit.circuit_name.to_string(),
url: format!("circuit:{}", hit.circuit_id),
score: hit.score,
icon: "circuit".to_string(),
})
.collect())
}
async fn search_sites(
cnn: &Pool<Postgres>,
key: &str,
term: &str,
) -> anyhow::Result<Vec<SearchResult>> {
let hits = pgdb::search_sites(cnn, key, term).await?;
Ok(hits
.iter()
.map(|hit| {
let t = if hit.site_type.is_empty() {
"site".to_string()
} else {
hit.site_type.to_string()
};
SearchResult {
name: hit.site_name.to_string(),
url: format!("{t}:{}", hit.site_name),
score: hit.score,
icon: t,
}})
.collect())
}

View File

@ -0,0 +1,282 @@
use super::time_period::InfluxTimePeriod;
use crate::web::wss::send_response;
use axum::extract::ws::WebSocket;
use chrono::{DateTime, FixedOffset, Utc};
use influxdb2::Client;
use influxdb2::{models::Query, FromDataPoint};
use pgdb::organization_cache::get_org_details;
use pgdb::sqlx::{query, Pool, Postgres, Row};
use pgdb::OrganizationDetails;
use serde::Serialize;
use std::collections::HashMap;
use wasm_pipe_types::WasmResponse;
pub async fn root_heat_map(
cnn: &Pool<Postgres>,
socket: &mut WebSocket,
key: &str,
period: InfluxTimePeriod,
) -> anyhow::Result<()> {
if let Some(org) = get_org_details(cnn, key).await {
let influx_url = format!("http://{}:8086", org.influx_host);
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
// Get sites where parent=0 (for this setup)
let hosts: Vec<String> =
query("SELECT DISTINCT site_name FROM site_tree WHERE key=$1 AND parent=0")
.bind(key)
.fetch_all(cnn)
.await?
.iter()
.map(|row| row.try_get("site_name").unwrap())
.filter(|row| row != "Root")
.collect();
let mut host_filter = "filter(fn: (r) => ".to_string();
for host in hosts.iter() {
host_filter += &format!("r[\"node_name\"] == \"{host}\" or ");
}
host_filter = host_filter[0..host_filter.len() - 4].to_string();
host_filter += ")";
// Query influx for RTT averages
let qs = format!(
"from(bucket: \"{}\")
|> {}
|> filter(fn: (r) => r[\"_measurement\"] == \"tree\")
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|> filter(fn: (r) => r[\"_field\"] == \"rtt_avg\")
|> {}
|> {}
|> yield(name: \"last\")",
org.influx_bucket,
period.range(),
org.key,
host_filter,
period.aggregate_window()
);
//println!("{qs}");
let query = Query::new(qs);
let rows = client.query::<HeatRow>(Some(query)).await;
match rows {
Err(e) => {
tracing::error!("Error querying InfluxDB (root heat map): {}", e);
return Err(anyhow::Error::msg("Unable to query influx"));
}
Ok(rows) => {
let mut sorter: HashMap<String, Vec<(DateTime<FixedOffset>, f64)>> = HashMap::new();
for row in rows.iter() {
if let Some(hat) = sorter.get_mut(&row.node_name) {
hat.push((row.time, row.rtt_avg));
} else {
sorter.insert(row.node_name.clone(), vec![(row.time, row.rtt_avg)]);
}
}
send_response(socket, WasmResponse::RootHeat { data: sorter }).await;
}
}
}
Ok(())
}
async fn site_circuits_heat_map(
cnn: &Pool<Postgres>,
key: &str,
site_name: &str,
period: InfluxTimePeriod,
sorter: &mut HashMap<String, Vec<(DateTime<FixedOffset>, f64)>>,
client: Client,
org: &OrganizationDetails,
) -> anyhow::Result<()> {
// Get sites where parent=site_id (for this setup)
let hosts: Vec<(String, String)> =
query("SELECT DISTINCT circuit_id, circuit_name FROM shaped_devices WHERE key=$1 AND parent_node=$2")
.bind(key)
.bind(site_name)
.fetch_all(cnn)
.await?
.iter()
.map(|row| (row.try_get("circuit_id").unwrap(), row.try_get("circuit_name").unwrap()))
.collect();
let mut circuit_map = HashMap::new();
for (id, name) in hosts.iter() {
circuit_map.insert(id, name);
}
let hosts = hosts.iter().map(|(id, _)| id).collect::<Vec<_>>();
let mut host_filter = "filter(fn: (r) => ".to_string();
for host in hosts.iter() {
host_filter += &format!("r[\"circuit_id\"] == \"{host}\" or ");
}
host_filter = host_filter[0..host_filter.len() - 4].to_string();
host_filter += ")";
// Query influx for RTT averages
let qs = format!(
"from(bucket: \"{}\")
|> {}
|> filter(fn: (r) => r[\"_measurement\"] == \"rtt\")
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|> filter(fn: (r) => r[\"_field\"] == \"avg\")
|> {}
|> {}
|> yield(name: \"last\")",
org.influx_bucket,
period.range(),
org.key,
host_filter,
period.aggregate_window()
);
//println!("{qs}\n\n");
if qs.contains("filter(fn: (r))") {
// No hosts to filter
return Ok(());
}
let query = Query::new(qs);
let rows = client.query::<HeatCircuitRow>(Some(query)).await;
match rows {
Err(e) => {
tracing::error!("Error querying InfluxDB (site_circuits_heat_map): {}", e);
return Err(anyhow::Error::msg("Unable to query influx"));
}
Ok(rows) => {
for row in rows.iter() {
if let Some(name) = circuit_map.get(&row.circuit_id) {
if let Some(hat) = sorter.get_mut(*name) {
hat.push((row.time, row.avg));
} else {
sorter.insert(name.to_string(), vec![(row.time, row.avg)]);
}
}
}
}
}
Ok(())
}
pub async fn site_heat_map(
cnn: &Pool<Postgres>,
socket: &mut WebSocket,
key: &str,
site_name: &str,
period: InfluxTimePeriod,
) -> anyhow::Result<()> {
if let Some(org) = get_org_details(cnn, key).await {
let influx_url = format!("http://{}:8086", org.influx_host);
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
// Get the site index
let site_id = pgdb::get_site_id_from_name(cnn, key, site_name).await?;
// Get sites where parent=site_id (for this setup)
let hosts: Vec<String> =
query("SELECT DISTINCT site_name FROM site_tree WHERE key=$1 AND parent=$2")
.bind(key)
.bind(site_id)
.fetch_all(cnn)
.await?
.iter()
.map(|row| row.try_get("site_name").unwrap())
.collect();
let mut host_filter = "filter(fn: (r) => ".to_string();
for host in hosts.iter() {
host_filter += &format!("r[\"node_name\"] == \"{host}\" or ");
}
host_filter = host_filter[0..host_filter.len() - 4].to_string();
host_filter += ")";
if host_filter.ends_with("(r))") {
host_filter =
"filter(fn: (r) => r[\"node_name\"] == \"bad_sheep_no_data\")".to_string();
}
// Query influx for RTT averages
let qs = format!(
"from(bucket: \"{}\")
|> {}
|> filter(fn: (r) => r[\"_measurement\"] == \"tree\")
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|> filter(fn: (r) => r[\"_field\"] == \"rtt_avg\")
|> {}
|> {}
|> yield(name: \"last\")",
org.influx_bucket,
period.range(),
org.key,
host_filter,
period.aggregate_window()
);
//println!("{qs}\n\n");
let query = Query::new(qs);
let rows = client.query::<HeatRow>(Some(query)).await;
match rows {
Err(e) => {
tracing::error!("Error querying InfluxDB (site-heat-map): {}", e);
return Err(anyhow::Error::msg("Unable to query influx"));
}
Ok(rows) => {
let mut sorter: HashMap<String, Vec<(DateTime<FixedOffset>, f64)>> = HashMap::new();
for row in rows.iter() {
if let Some(hat) = sorter.get_mut(&row.node_name) {
hat.push((row.time, row.rtt_avg));
} else {
sorter.insert(row.node_name.clone(), vec![(row.time, row.rtt_avg)]);
}
}
site_circuits_heat_map(cnn, key, site_name, period, &mut sorter, client, &org)
.await?;
send_response(socket, WasmResponse::SiteHeat { data: sorter }).await;
}
}
}
Ok(())
}
#[derive(Serialize)]
struct HeatMessage {
msg: String,
data: HashMap<String, Vec<(DateTime<FixedOffset>, f64)>>,
}
#[derive(Debug, FromDataPoint)]
pub struct HeatRow {
pub node_name: String,
pub rtt_avg: f64,
pub time: DateTime<FixedOffset>,
}
impl Default for HeatRow {
fn default() -> Self {
Self {
node_name: "".to_string(),
rtt_avg: 0.0,
time: DateTime::<Utc>::MIN_UTC.into(),
}
}
}
#[derive(Debug, FromDataPoint)]
pub struct HeatCircuitRow {
pub circuit_id: String,
pub avg: f64,
pub time: DateTime<FixedOffset>,
}
impl Default for HeatCircuitRow {
fn default() -> Self {
Self {
circuit_id: "".to_string(),
avg: 0.0,
time: DateTime::<Utc>::MIN_UTC.into(),
}
}
}

View File

@ -0,0 +1,20 @@
use axum::extract::ws::WebSocket;
use pgdb::sqlx::{Pool, Postgres};
use serde::Serialize;
use wasm_pipe_types::{SiteTree, WasmResponse};
use crate::web::wss::send_response;
use super::site_tree::tree_to_host;
#[derive(Serialize)]
struct SiteInfoMessage {
msg: String,
data: SiteTree,
}
pub async fn send_site_info(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str, site_id: &str) {
if let Ok(host) = pgdb::get_site_info(cnn, key, site_id).await {
let host = tree_to_host(host);
send_response(socket, WasmResponse::SiteInfo { data: host }).await;
}
}

View File

@ -0,0 +1,47 @@
use axum::extract::ws::WebSocket;
use pgdb::sqlx::{Pool, Postgres};
use crate::web::wss::send_response;
pub async fn send_site_parents(
cnn: &Pool<Postgres>,
socket: &mut WebSocket,
key: &str,
site_name: &str,
) {
if let Ok(parents) = pgdb::get_parent_list(cnn, key, site_name).await {
send_response(socket, wasm_pipe_types::WasmResponse::SiteParents { data: parents }).await;
}
let child_result = pgdb::get_child_list(cnn, key, site_name).await;
if let Ok(children) = child_result {
send_response(socket, wasm_pipe_types::WasmResponse::SiteChildren { data: children }).await;
} else {
tracing::error!("Error getting children: {:?}", child_result);
}
}
pub async fn send_circuit_parents(
cnn: &Pool<Postgres>,
socket: &mut WebSocket,
key: &str,
circuit_id: &str,
) {
if let Ok(parents) = pgdb::get_circuit_parent_list(cnn, key, circuit_id).await {
send_response(socket, wasm_pipe_types::WasmResponse::SiteParents { data: parents }).await;
}
}
pub async fn send_root_parents(
cnn: &Pool<Postgres>,
socket: &mut WebSocket,
key: &str,
) {
let site_name = "Root";
let child_result = pgdb::get_child_list(cnn, key, site_name).await;
if let Ok(children) = child_result {
send_response(socket, wasm_pipe_types::WasmResponse::SiteChildren { data: children }).await;
} else {
tracing::error!("Error getting children: {:?}", child_result);
}
}

View File

@ -0,0 +1,31 @@
use axum::extract::ws::WebSocket;
use pgdb::{
sqlx::{Pool, Postgres},
TreeNode,
};
use wasm_pipe_types::SiteTree;
use crate::web::wss::send_response;
pub async fn send_site_tree(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str, parent: &str) {
let tree = pgdb::get_site_tree(cnn, key, parent).await.unwrap();
let tree = tree
.into_iter()
.map(tree_to_host)
.collect::<Vec<SiteTree>>();
send_response(socket, wasm_pipe_types::WasmResponse::SiteTree { data: tree }).await;
}
pub(crate) fn tree_to_host(row: TreeNode) -> SiteTree {
SiteTree {
index: row.index,
site_name: row.site_name,
site_type: row.site_type,
parent: row.parent,
max_down: row.max_down,
max_up: row.max_up,
current_down: row.current_down,
current_up: row.current_up,
current_rtt: row.current_rtt,
}
}

View File

@ -0,0 +1,319 @@
use std::collections::HashMap;
mod site_stack;
use axum::extract::ws::WebSocket;
use futures::future::join_all;
use influxdb2::{Client, models::Query};
use pgdb::{sqlx::{Pool, Postgres}, organization_cache::get_org_details};
use wasm_pipe_types::{ThroughputHost, Throughput};
use crate::web::wss::send_response;
use self::throughput_row::{ThroughputRow, ThroughputRowBySite, ThroughputRowByCircuit};
use super::time_period::InfluxTimePeriod;
mod throughput_row;
pub use site_stack::send_site_stack_map;
pub async fn send_throughput_for_all_nodes(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str, period: InfluxTimePeriod) -> anyhow::Result<()> {
let nodes = get_throughput_for_all_nodes(cnn, key, period).await?;
send_response(socket, wasm_pipe_types::WasmResponse::BitsChart { nodes }).await;
Ok(())
}
pub async fn send_throughput_for_all_nodes_by_site(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str, site_name: String, period: InfluxTimePeriod) -> anyhow::Result<()> {
let nodes = get_throughput_for_all_nodes_by_site(cnn, key, period, &site_name).await?;
send_response(socket, wasm_pipe_types::WasmResponse::BitsChart { nodes }).await;
Ok(())
}
pub async fn send_throughput_for_all_nodes_by_circuit(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str, circuit_id: String, period: InfluxTimePeriod) -> anyhow::Result<()> {
let nodes = get_throughput_for_all_nodes_by_circuit(cnn, key, period, &circuit_id).await?;
send_response(socket, wasm_pipe_types::WasmResponse::BitsChart { nodes }).await;
Ok(())
}
pub async fn send_throughput_for_node(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str, period: InfluxTimePeriod, node_id: String, node_name: String) -> anyhow::Result<()> {
let node = get_throughput_for_node(cnn, key, node_id, node_name, period).await?;
send_response(socket, wasm_pipe_types::WasmResponse::BitsChart { nodes: vec![node] }).await;
Ok(())
}
pub async fn get_throughput_for_all_nodes(cnn: &Pool<Postgres>, key: &str, period: InfluxTimePeriod) -> anyhow::Result<Vec<ThroughputHost>> {
let node_status = pgdb::node_status(cnn, key).await?;
let mut futures = Vec::new();
for node in node_status {
futures.push(get_throughput_for_node(
cnn,
key,
node.node_id.to_string(),
node.node_name.to_string(),
period.clone(),
));
}
let all_nodes: anyhow::Result<Vec<ThroughputHost>> = join_all(futures).await
.into_iter().collect();
all_nodes
}
pub async fn get_throughput_for_all_nodes_by_site(cnn: &Pool<Postgres>, key: &str, period: InfluxTimePeriod, site_name: &str) -> anyhow::Result<Vec<ThroughputHost>> {
let node_status = pgdb::node_status(cnn, key).await?;
let mut futures = Vec::new();
for node in node_status {
futures.push(get_throughput_for_node_by_site(
cnn,
key,
node.node_id.to_string(),
node.node_name.to_string(),
site_name.to_string(),
period.clone(),
));
}
let all_nodes: anyhow::Result<Vec<ThroughputHost>> = join_all(futures).await
.into_iter().collect();
all_nodes
}
pub async fn get_throughput_for_all_nodes_by_circuit(cnn: &Pool<Postgres>, key: &str, period: InfluxTimePeriod, circuit_id: &str) -> anyhow::Result<Vec<ThroughputHost>> {
let node_status = pgdb::node_status(cnn, key).await?;
let mut futures = Vec::new();
for node in node_status {
futures.push(get_throughput_for_node_by_circuit(
cnn,
key,
node.node_id.to_string(),
node.node_name.to_string(),
circuit_id.to_string(),
period.clone(),
));
}
let mut all_nodes = Vec::new();
for node in (join_all(futures).await).into_iter().flatten() {
all_nodes.extend(node);
}
Ok(all_nodes)
}
pub async fn get_throughput_for_node(
cnn: &Pool<Postgres>,
key: &str,
node_id: String,
node_name: String,
period: InfluxTimePeriod,
) -> anyhow::Result<ThroughputHost> {
if let Some(org) = get_org_details(cnn, key).await {
let influx_url = format!("http://{}:8086", org.influx_host);
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
let qs = format!(
"from(bucket: \"{}\")
|> {}
|> filter(fn: (r) => r[\"_measurement\"] == \"bits\")
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|> filter(fn: (r) => r[\"host_id\"] == \"{}\")
|> {}
|> yield(name: \"last\")",
org.influx_bucket, period.range(), org.key, node_id, period.aggregate_window()
);
let query = Query::new(qs);
let rows = client.query::<ThroughputRow>(Some(query)).await;
match rows {
Err(e) => {
tracing::error!("Error querying InfluxDB (throughput node): {}", e);
return Err(anyhow::Error::msg("Unable to query influx"));
}
Ok(rows) => {
// Parse and send the data
//println!("{rows:?}");
let mut down = Vec::new();
let mut up = Vec::new();
// Fill download
for row in rows.iter().filter(|r| r.direction == "down") {
down.push(Throughput {
value: row.avg,
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
l: row.min,
u: row.max - row.min,
});
}
// Fill upload
for row in rows.iter().filter(|r| r.direction == "up") {
up.push(Throughput {
value: row.avg,
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
l: row.min,
u: row.max - row.min,
});
}
return Ok(ThroughputHost{
node_id,
node_name,
down,
up,
});
}
}
}
Err(anyhow::Error::msg("Unable to query influx"))
}
pub async fn get_throughput_for_node_by_site(
cnn: &Pool<Postgres>,
key: &str,
node_id: String,
node_name: String,
site_name: String,
period: InfluxTimePeriod,
) -> anyhow::Result<ThroughputHost> {
if let Some(org) = get_org_details(cnn, key).await {
let influx_url = format!("http://{}:8086", org.influx_host);
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
let qs = format!(
"from(bucket: \"{}\")
|> {}
|> filter(fn: (r) => r[\"_measurement\"] == \"tree\")
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|> filter(fn: (r) => r[\"host_id\"] == \"{}\")
|> filter(fn: (r) => r[\"node_name\"] == \"{}\")
|> filter(fn: (r) => r[\"_field\"] == \"bits_avg\" or r[\"_field\"] == \"bits_max\" or r[\"_field\"] == \"bits_min\")
|> {}
|> yield(name: \"last\")",
org.influx_bucket, period.range(), org.key, node_id, site_name, period.aggregate_window()
);
let query = Query::new(qs);
//println!("{:?}", query);
let rows = client.query::<ThroughputRowBySite>(Some(query)).await;
match rows {
Err(e) => {
tracing::error!("Error querying InfluxDB (throughput site): {}", e);
return Err(anyhow::Error::msg("Unable to query influx"));
}
Ok(rows) => {
// Parse and send the data
//println!("{rows:?}");
let mut down = Vec::new();
let mut up = Vec::new();
// Fill download
for row in rows.iter().filter(|r| r.direction == "down") {
down.push(Throughput {
value: row.bits_avg,
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
l: row.bits_min,
u: row.bits_max - row.bits_min,
});
}
// Fill upload
for row in rows.iter().filter(|r| r.direction == "up") {
up.push(Throughput {
value: row.bits_avg,
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
l: row.bits_min,
u: row.bits_max - row.bits_min,
});
}
return Ok(ThroughputHost{
node_id,
node_name,
down,
up,
});
}
}
}
Err(anyhow::Error::msg("Unable to query influx"))
}
pub async fn get_throughput_for_node_by_circuit(
cnn: &Pool<Postgres>,
key: &str,
node_id: String,
node_name: String,
circuit_id: String,
period: InfluxTimePeriod,
) -> anyhow::Result<Vec<ThroughputHost>> {
if let Some(org) = get_org_details(cnn, key).await {
let influx_url = format!("http://{}:8086", org.influx_host);
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
let qs = format!(
"from(bucket: \"{}\")
|> {}
|> filter(fn: (r) => r[\"_measurement\"] == \"host_bits\")
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|> filter(fn: (r) => r[\"host_id\"] == \"{}\")
|> filter(fn: (r) => r[\"circuit_id\"] == \"{}\")
|> filter(fn: (r) => r[\"_field\"] == \"avg\" or r[\"_field\"] == \"max\" or r[\"_field\"] == \"min\")
|> {}
|> yield(name: \"last\")",
org.influx_bucket, period.range(), org.key, node_id, circuit_id, period.aggregate_window()
);
let query = Query::new(qs);
//println!("{:?}", query);
let rows = client.query::<ThroughputRowByCircuit>(Some(query)).await;
match rows {
Err(e) => {
tracing::error!(" (throughput circuit): {}", e);
return Err(anyhow::Error::msg("Unable to query influx"));
}
Ok(rows) => {
// Parse and send the data
//println!("{rows:?}");
let mut sorter: HashMap<String, (Vec<Throughput>, Vec<Throughput>)> = HashMap::new();
// Fill download
for row in rows.iter().filter(|r| r.direction == "down") {
let tp = Throughput {
value: row.avg,
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
l: row.min,
u: row.max - row.min,
};
if let Some(hat) = sorter.get_mut(&row.ip) {
hat.0.push(tp);
} else {
sorter.insert(row.ip.clone(), (vec![tp], Vec::new()));
}
}
// Fill upload
for row in rows.iter().filter(|r| r.direction == "up") {
let tp = Throughput {
value: row.avg,
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
l: row.min,
u: row.max - row.min,
};
if let Some(hat) = sorter.get_mut(&row.ip) {
hat.1.push(tp);
} else {
sorter.insert(row.ip.clone(), (Vec::new(), vec![tp]));
}
}
let mut result = Vec::new();
for (ip, (down, up)) in sorter.iter() {
result.push(ThroughputHost{
node_id: node_id.clone(),
node_name: format!("{ip} {node_name}"),
down: down.clone(),
up: up.clone(),
});
}
return Ok(result);
}
}
}
Err(anyhow::Error::msg("Unable to query influx"))
}

View File

@ -0,0 +1,111 @@
use crate::web::wss::{queries::time_period::InfluxTimePeriod, send_response};
use axum::extract::ws::WebSocket;
use pgdb::sqlx::{Pool, Postgres, Row};
use wasm_pipe_types::Throughput;
use super::{get_throughput_for_all_nodes_by_circuit, get_throughput_for_all_nodes_by_site};
pub async fn send_site_stack_map(
cnn: &Pool<Postgres>,
socket: &mut WebSocket,
key: &str,
period: InfluxTimePeriod,
site_id: String,
) -> anyhow::Result<()> {
let site_index = pgdb::get_site_id_from_name(cnn, key, &site_id).await?;
//println!("Site index: {site_index}");
let sites: Vec<String> =
pgdb::sqlx::query("SELECT DISTINCT site_name FROM site_tree WHERE key=$1 AND parent=$2")
.bind(key)
.bind(site_index)
.fetch_all(cnn)
.await?
.iter()
.map(|row| row.try_get("site_name").unwrap())
.collect();
//println!("{sites:?}");
let circuits: Vec<(String, String)> =
pgdb::sqlx::query("SELECT DISTINCT circuit_id, circuit_name FROM shaped_devices WHERE key=$1 AND parent_node=$2")
.bind(key)
.bind(site_id)
.fetch_all(cnn)
.await?
.iter()
.map(|row| (row.try_get("circuit_id").unwrap(), row.try_get("circuit_name").unwrap()))
.collect();
//println!("{circuits:?}");
let mut result = Vec::new();
for site in sites.into_iter() {
let mut throughput =
get_throughput_for_all_nodes_by_site(cnn, key, period.clone(), &site).await?;
throughput
.iter_mut()
.for_each(|row| row.node_name = site.clone());
result.extend(throughput);
}
for circuit in circuits.into_iter() {
let mut throughput =
get_throughput_for_all_nodes_by_circuit(cnn, key, period.clone(), &circuit.0).await?;
throughput
.iter_mut()
.for_each(|row| row.node_name = circuit.1.clone());
result.extend(throughput);
}
//println!("{result:?}");
// Sort by total
result.sort_by(|a, b| {
b.total()
.partial_cmp(&a.total())
.unwrap_or(std::cmp::Ordering::Equal)
});
// If there are more than 9 entries, create an "others" to handle the remainder
if result.len() > 9 {
let mut others = wasm_pipe_types::ThroughputHost {
node_id: "others".to_string(),
node_name: "others".to_string(),
down: Vec::new(),
up: Vec::new(),
};
result[0].down.iter().for_each(|x| {
others.down.push(Throughput {
value: 0.0,
date: x.date.clone(),
l: 0.0,
u: 0.0,
});
});
result[0].up.iter().for_each(|x| {
others.up.push(Throughput {
value: 0.0,
date: x.date.clone(),
l: 0.0,
u: 0.0,
});
});
result.iter().skip(9).for_each(|row| {
row.down.iter().enumerate().for_each(|(i, x)| {
others.down[i].value += x.value;
});
row.up.iter().enumerate().for_each(|(i, x)| {
others.up[i].value += x.value;
});
});
result.truncate(9);
result.push(others);
}
send_response(
socket,
wasm_pipe_types::WasmResponse::SiteStack { nodes: result },
)
.await;
Ok(())
}

View File

@ -0,0 +1,71 @@
use chrono::{DateTime, FixedOffset, Utc};
use influxdb2::FromDataPoint;
#[derive(Debug, FromDataPoint)]
pub struct ThroughputRow {
pub direction: String,
pub host_id: String,
pub min: f64,
pub max: f64,
pub avg: f64,
pub time: DateTime<FixedOffset>,
}
impl Default for ThroughputRow {
fn default() -> Self {
Self {
direction: "".to_string(),
host_id: "".to_string(),
min: 0.0,
max: 0.0,
avg: 0.0,
time: DateTime::<Utc>::MIN_UTC.into(),
}
}
}
#[derive(Debug, FromDataPoint)]
pub struct ThroughputRowBySite {
pub direction: String,
pub host_id: String,
pub bits_min: f64,
pub bits_max: f64,
pub bits_avg: f64,
pub time: DateTime<FixedOffset>,
}
impl Default for ThroughputRowBySite {
fn default() -> Self {
Self {
direction: "".to_string(),
host_id: "".to_string(),
bits_min: 0.0,
bits_max: 0.0,
bits_avg: 0.0,
time: DateTime::<Utc>::MIN_UTC.into(),
}
}
}
#[derive(Debug, FromDataPoint)]
pub struct ThroughputRowByCircuit {
pub direction: String,
pub ip: String,
pub min: f64,
pub max: f64,
pub avg: f64,
pub time: DateTime<FixedOffset>,
}
impl Default for ThroughputRowByCircuit {
fn default() -> Self {
Self {
direction: "".to_string(),
ip: "".to_string(),
min: 0.0,
max: 0.0,
avg: 0.0,
time: DateTime::<Utc>::MIN_UTC.into(),
}
}
}

View File

@ -0,0 +1,55 @@
#[derive(Clone)]
pub struct InfluxTimePeriod {
start: String,
aggregate: String,
}
impl InfluxTimePeriod {
pub fn new(period: &str) -> Self {
let start = match period {
"5m" => "-5m",
"15m" => "-15m",
"1h" => "-60m",
"6h" => "-360m",
"12h" => "-720m",
"24h" => "-1440m",
"7d" => "-10080m",
"28d" => "-40320m",
_ => "-5m",
};
let aggregate = match period {
"5m" => "10s",
"15m" => "30s",
"1h" => "1m",
"6h" => "6m",
"12h" => "12m",
"24h" => "24m",
"7d" => "210m",
"28d" => "4h",
_ => "10s",
};
Self {
start: start.to_string(),
aggregate: aggregate.to_string(),
}
}
pub fn range(&self) -> String {
format!("range(start: {})", self.start)
}
pub fn aggregate_window(&self) -> String {
format!(
"aggregateWindow(every: {}, fn: mean, createEmpty: false)",
self.aggregate
)
}
}
impl From<&String> for InfluxTimePeriod {
fn from(period: &String) -> Self {
Self::new(period)
}
}

View File

@ -0,0 +1,20 @@
# Site Build
This folder compiles and packages the website used by `lts_node`. It
needs to be compiled and made available to the `lts_node` process.
Steps: TBA
## Requirements
To run the build (as opposed to shipping pre-built files), you need to
install `esbuild` and `npm` (ugh). You can do this with:
```bash
(change directory to site_build folder)
sudo apt-get install npm
npm install
````
You can run the build manually by running `./esbuild.sh` in this
directory.

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,18 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<title>LibreQoS Long-Term Statistics</title>
<link rel="shortcut icon" href="#" />
<script type="module" src="/app.js"></script>
<link rel="stylesheet" href="style.css" />
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css" />
</head>
<body>
<div id="main"></div>
<footer>Copyright &copy; 2023 LibreQoS</footer>
</body>
</html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,18 @@
[package]
name = "pgdb"
version = "0.1.0"
edition = "2021"
[dependencies]
once_cell = "1"
thiserror = "1"
env_logger = "0"
log = "0"
lqos_bus = { path = "../../lqos_bus" }
sqlx = { version = "0.6.3", features = [ "runtime-tokio-rustls", "postgres" ] }
futures = "0"
uuid = { version = "1", features = ["v4", "fast-rng" ] }
influxdb2 = "0"
sha2 = "0"
dashmap = "5"
lqos_utils = { path = "../../lqos_utils" }

View File

@ -0,0 +1,184 @@
-- Creates the initial tables for the license server
-- We're using Trigrams for faster text search
CREATE EXTENSION pg_trgm;
CREATE TABLE public.licenses (
key character varying(254) NOT NULL,
stats_host integer NOT NULL
);
CREATE TABLE public.organizations (
key character varying(254) NOT NULL,
name character varying(254) NOT NULL,
influx_host character varying(254) NOT NULL,
influx_org character varying(254) NOT NULL,
influx_token character varying(254) NOT NULL,
influx_bucket character varying(254) NOT NULL
);
CREATE TABLE public.shaper_nodes (
license_key character varying(254) NOT NULL,
node_id character varying(254) NOT NULL,
node_name character varying(254) NOT NULL,
last_seen timestamp without time zone DEFAULT now() NOT NULL,
public_key bytea
);
CREATE TABLE public.site_tree
(
key character varying(254) NOT NULL,
site_name character varying(254) NOT NULL,
host_id character varying(254) NOT NULL,
index integer NOT NULL,
parent integer NOT NULL,
site_type character varying(32),
max_up integer NOT NULL DEFAULT 0,
max_down integer NOT NULL DEFAULT 0,
current_up integer NOT NULL DEFAULT 0,
current_down integer NOT NULL DEFAULT 0,
current_rtt integer NOT NULL DEFAULT 0,
PRIMARY KEY (key, site_name, host_id)
);
CREATE TABLE public.shaped_devices
(
key character varying(254) NOT NULL,
node_id character varying(254) NOT NULL,
circuit_id character varying(254) NOT NULL,
device_id character varying(254) NOT NULL,
circuit_name character varying(254) NOT NULL DEFAULT '',
device_name character varying(254) NOT NULL DEFAULT '',
parent_node character varying(254) NOT NULL DEFAULT '',
mac character varying(254) NOT NULL DEFAULT '',
download_min_mbps integer NOT NULL DEFAULT 0,
upload_min_mbps integer NOT NULL DEFAULT 0,
download_max_mbps integer NOT NULL DEFAULT 0,
upload_max_mbps integer NOT NULL DEFAULT 0,
comment text,
PRIMARY KEY (key, node_id, circuit_id, device_id)
);
CREATE TABLE public.shaped_device_ip
(
key character varying(254) COLLATE pg_catalog."default" NOT NULL,
node_id character varying(254) COLLATE pg_catalog."default" NOT NULL,
circuit_id character varying(254) COLLATE pg_catalog."default" NOT NULL,
ip_range character varying(254) COLLATE pg_catalog."default" NOT NULL,
subnet integer NOT NULL,
CONSTRAINT shaped_device_ip_pkey PRIMARY KEY (key, node_id, circuit_id, ip_range, subnet)
);
CREATE TABLE public.stats_hosts (
id integer NOT NULL,
ip_address character varying(128) NOT NULL,
can_accept_new_clients boolean NOT NULL DEFAULT true,
influx_host character varying(128) NOT NULL,
api_key character varying(255) NOT NULL
);
CREATE SEQUENCE public.stats_hosts_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
ALTER TABLE ONLY public.stats_hosts
ALTER COLUMN id SET DEFAULT nextval('public.stats_hosts_id_seq'::regclass);
ALTER TABLE ONLY public.licenses
ADD CONSTRAINT licenses_pkey PRIMARY KEY (key);
ALTER TABLE ONLY public.organizations
ADD CONSTRAINT pk_organizations PRIMARY KEY (key);
ALTER TABLE ONLY public.shaper_nodes
ADD CONSTRAINT shaper_nodes_pk PRIMARY KEY (license_key, node_id);
ALTER TABLE ONLY public.stats_hosts
ADD CONSTRAINT stats_hosts_pkey PRIMARY KEY (id);
ALTER TABLE ONLY public.organizations
ADD CONSTRAINT organizations_license_fk FOREIGN KEY (key) REFERENCES public.licenses(key);
ALTER TABLE ONLY public.licenses
ADD CONSTRAINT stats_host_fk FOREIGN KEY (stats_host) REFERENCES public.stats_hosts(id) NOT VALID;
CREATE TABLE public.logins
(
key character varying(254) NOT NULL,
username character varying(64) NOT NULL,
password_hash character varying(64) NOT NULL,
nicename character varying(64) NOT NULL,
CONSTRAINT pk_logins_licenses PRIMARY KEY (key, username),
CONSTRAINT fk_login_licenses FOREIGN KEY (key)
REFERENCES public.licenses (key) MATCH SIMPLE
ON UPDATE NO ACTION
ON DELETE NO ACTION
NOT VALID
);
CREATE TABLE public.active_tokens
(
key character varying(254) NOT NULL,
token character varying(254) NOT NULL,
username character varying(64) NOT NULL,
expires timestamp without time zone NOT NULL DEFAULT NOW() + interval '2 hours',
PRIMARY KEY (token)
);
CREATE TABLE public.uisp_devices_ext
(
key character varying(254) NOT NULL,
device_id character varying(254) NOT NULL,
name character varying(254) NOT NULL DEFAULT '',
model character varying(254) NOT NULL DEFAULT '',
firmware character varying(64) NOT NULL DEFAULT '',
status character varying(64) NOT NULL DEFAULT '',
mode character varying(64) NOT NULL DEFAULT '',
channel_width integer NOT NULL DEFAULT 0,
tx_power integer NOT NULL DEFAULT 0,
PRIMARY KEY (key, device_id)
);
CREATE TABLE public.uisp_devices_interfaces
(
key character varying(254) NOT NULL,
device_id character varying(254) NOT NULL,
id serial NOT NULL,
name character varying(64) NOT NULL DEFAULT '',
mac character varying(64) NOT NULL DEFAULT '',
status character varying(64) NOT NULL DEFAULT '',
speed character varying(64) NOT NULL DEFAULT '',
ip_list character varying(254) NOT NULL DEFAULT '',
PRIMARY KEY (key, device_id, id)
);
---- Indices
CREATE INDEX site_tree_key
ON public.site_tree USING btree
(key ASC NULLS LAST)
;
CREATE INDEX site_tree_key_parent
ON public.site_tree USING btree
(key ASC NULLS LAST, parent ASC NULLS LAST)
;
CREATE INDEX shaped_devices_key_circuit_id
ON public.shaped_devices USING btree
(key ASC NULLS LAST, circuit_id ASC NULLS LAST)
;
CREATE INDEX stats_host_ip
ON public.stats_hosts USING btree
(ip_address ASC NULLS LAST)
;
CREATE INDEX shaper_nodes_license_key_idx
ON public.shaper_nodes USING btree
(license_key ASC NULLS LAST)
;

View File

@ -0,0 +1,83 @@
use sqlx::{Pool, Postgres, FromRow};
use crate::license::StatsHostError;
#[derive(Debug, FromRow)]
pub struct CircuitInfo {
pub circuit_name: String,
pub device_id: String,
pub device_name: String,
pub parent_node: String,
pub mac: String,
pub download_min_mbps: i32,
pub download_max_mbps: i32,
pub upload_min_mbps: i32,
pub upload_max_mbps: i32,
pub comment: String,
pub ip_range: String,
pub subnet: i32,
}
pub async fn get_circuit_info(
cnn: &Pool<Postgres>,
key: &str,
circuit_id: &str,
) -> Result<Vec<CircuitInfo>, StatsHostError> {
const SQL: &str = "SELECT circuit_name, device_id, device_name, parent_node, mac, download_min_mbps, download_max_mbps, upload_min_mbps, upload_max_mbps, comment, ip_range, subnet FROM shaped_devices INNER JOIN shaped_device_ip ON shaped_device_ip.key = shaped_devices.key AND shaped_device_ip.circuit_id = shaped_devices.circuit_id WHERE shaped_devices.key=$1 AND shaped_devices.circuit_id=$2";
sqlx::query_as::<_, CircuitInfo>(SQL)
.bind(key)
.bind(circuit_id)
.fetch_all(cnn)
.await
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))
}
#[derive(Debug, FromRow)]
pub struct DeviceInfoExt {
pub device_id: String,
pub name: String,
pub model: String,
pub firmware: String,
pub status: String,
pub mode: String,
pub channel_width: i32,
pub tx_power: i32,
}
pub async fn get_device_info_ext(
cnn: &Pool<Postgres>,
key: &str,
device_id: &str,
) -> Result<DeviceInfoExt, StatsHostError> {
sqlx::query_as::<_, DeviceInfoExt>("SELECT device_id, name, model, firmware, status, mode, channel_width, tx_power FROM uisp_devices_ext WHERE key=$1 AND device_id=$2")
.bind(key)
.bind(device_id)
.fetch_one(cnn)
.await
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))
}
#[derive(Debug, FromRow)]
pub struct DeviceInterfaceExt {
pub name: String,
pub mac: String,
pub status: String,
pub speed: String,
pub ip_list: String,
}
pub async fn get_device_interfaces_ext(
cnn: &Pool<Postgres>,
key: &str,
device_id: &str,
) -> Result<Vec<DeviceInterfaceExt>, StatsHostError>
{
sqlx::query_as::<_, DeviceInterfaceExt>("SELECT name, mac, status, speed, ip_list FROM uis_devices_interfaces WHERE key=$1 AND device_id=$2")
.bind(key)
.bind(device_id)
.fetch_all(cnn)
.await
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))
}

View File

@ -0,0 +1,37 @@
//! Manages access to the safely stored connection string, in `/etc/lqdb`.
//! Failure to obtain a database connection is a fatal error.
//! The connection string is read once, on the first call to `get_connection_string()`.
//! Please be careful to never include `/etc/lqdb` in any git commits.
use std::path::Path;
use std::fs::File;
use std::io::Read;
use once_cell::sync::Lazy;
pub static CONNECTION_STRING: Lazy<String> = Lazy::new(read_connection_string);
/// Read the connection string from /etc/lqdb
/// Called by the `Lazy` on CONNECTION_STRING
fn read_connection_string() -> String {
let path = Path::new("/etc/lqdb");
if !path.exists() {
log::error!("{} does not exist", path.display());
panic!("{} does not exist", path.display());
}
match File::open(path) {
Ok(mut file) => {
let mut buf = String::new();
if let Ok(_size) = file.read_to_string(&mut buf) {
buf
} else {
log::error!("Could not read {}", path.display());
panic!("Could not read {}", path.display());
}
}
Err(e) => {
log::error!("Could not open {}: {e:?}", path.display());
panic!("Could not open {}: {e:?}", path.display());
}
}
}

View File

@ -0,0 +1,4 @@
mod connection_string;
mod pool;
pub use pool::get_connection_pool;

View File

@ -0,0 +1,13 @@
use sqlx::{postgres::PgPoolOptions, Postgres, Pool};
use super::connection_string::CONNECTION_STRING;
/// Obtain a connection pool to the database.
///
/// # Arguments
/// * `max_connections` - The maximum number of connections to the database.
pub async fn get_connection_pool(max_connections: u32) -> Result<Pool<Postgres>, sqlx::Error> {
PgPoolOptions::new()
.max_connections(max_connections)
.connect(&CONNECTION_STRING)
.await
}

View File

@ -0,0 +1,59 @@
use sqlx::{Pool, Postgres, Row};
use crate::license::StatsHostError;
pub async fn add_stats_host(cnn: Pool<Postgres>, hostname: String, influx_host: String, api_key: String) -> Result<i64, StatsHostError> {
// Does the stats host already exist? We don't want duplicates
let row = sqlx::query("SELECT COUNT(*) AS count FROM stats_hosts WHERE ip_address=$1")
.bind(&hostname)
.fetch_one(&cnn)
.await
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
let count: i64 = row.try_get("count").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
if count != 0 {
return Err(StatsHostError::HostAlreadyExists);
}
// Get the new primary key
log::info!("Getting new primary key for stats host");
let row = sqlx::query("SELECT NEXTVAL('stats_hosts_id_seq') AS id")
.fetch_one(&cnn)
.await
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
let new_id: i64 = row.try_get("id").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
// Insert the stats host
log::info!("Inserting new stats host: {} ({})", hostname, new_id);
sqlx::query("INSERT INTO stats_hosts (id, ip_address, can_accept_new_clients, influx_host, api_key) VALUES ($1, $2, $3, $4, $5)")
.bind(new_id)
.bind(&hostname)
.bind(true)
.bind(&influx_host)
.bind(&api_key)
.execute(&cnn)
.await
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
Ok(new_id)
}
const FIND_STATS_HOST: &str = "SELECT a.id AS id, a.influx_host AS influx_host, a.api_key AS api_key
FROM stats_hosts a
WHERE can_accept_new_clients = true
ORDER BY (SELECT COUNT(organizations.\"key\") FROM organizations WHERE a.influx_host = influx_host)
LIMIT 1";
pub async fn find_emptiest_stats_host(cnn: Pool<Postgres>) -> Result<(i32, String, String), StatsHostError> {
let row = sqlx::query(FIND_STATS_HOST)
.fetch_one(&cnn)
.await
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
let id: i32 = row.try_get("id").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
let influx_host: String = row.try_get("influx_host").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
let api_key: String = row.try_get("api_key").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
Ok((id, influx_host, api_key))
}

View File

@ -0,0 +1,26 @@
mod connection;
mod license;
mod organization;
mod hosts;
mod orchestrator;
mod logins;
mod nodes;
mod search;
mod tree;
mod circuit;
pub mod organization_cache;
pub mod sqlx {
pub use sqlx::*;
}
pub use connection::get_connection_pool;
pub use license::{get_stats_host_for_key, insert_or_update_node_public_key, fetch_public_key};
pub use organization::{OrganizationDetails, get_organization};
pub use hosts::add_stats_host;
pub use orchestrator::create_free_trial;
pub use logins::{try_login, delete_user, add_user, refresh_token, token_to_credentials};
pub use nodes::{new_stats_arrived, node_status, NodeStatus};
pub use search::*;
pub use tree::*;
pub use circuit::*;

View File

@ -0,0 +1,87 @@
//! Handles license checks from the `license_server`.
use sqlx::{Pool, Postgres, Row};
use thiserror::Error;
pub async fn get_stats_host_for_key(cnn: Pool<Postgres>, key: &str) -> Result<String, StatsHostError> {
let row = sqlx::query("SELECT ip_address FROM licenses INNER JOIN stats_hosts ON stats_hosts.id = licenses.stats_host WHERE key=$1")
.bind(key)
.fetch_one(&cnn)
.await
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
let ip_address: &str = row.try_get("ip_address").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
log::info!("Found stats host for key: {}", ip_address);
Ok(ip_address.to_string())
}
pub async fn insert_or_update_node_public_key(cnn: Pool<Postgres>, node_id: &str, node_name: &str, license_key: &str, public_key: &[u8]) -> Result<(), StatsHostError> {
let row = sqlx::query("SELECT COUNT(*) AS count FROM shaper_nodes WHERE node_id=$1 AND license_key=$2")
.bind(node_id)
.bind(license_key)
.fetch_one(&cnn)
.await
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
let count: i64 = row.try_get("count").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
match count {
0 => {
// Insert
log::info!("Inserting new node: {} {}", node_id, license_key);
sqlx::query("INSERT INTO shaper_nodes (license_key, node_id, public_key, node_name) VALUES ($1, $2, $3, $4)")
.bind(license_key)
.bind(node_id)
.bind(public_key)
.bind(node_name)
.execute(&cnn)
.await
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
}
1 => {
// Update
log::info!("Updating node: {} {}", node_id, license_key);
sqlx::query("UPDATE shaper_nodes SET public_key=$1, last_seen=NOW(), node_name=$4 WHERE node_id=$2 AND license_key=$3")
.bind(public_key)
.bind(node_id)
.bind(license_key)
.bind(node_name)
.execute(&cnn)
.await
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
}
_ => {
log::error!("Found multiple nodes with the same node_id and license_key");
return Err(StatsHostError::DatabaseError("Found multiple nodes with the same node_id and license_key".to_string()));
}
}
Ok(())
}
pub async fn fetch_public_key(cnn: Pool<Postgres>, license_key: &str, node_id: &str) -> Result<Vec<u8>, StatsHostError> {
let row = sqlx::query("SELECT public_key FROM shaper_nodes WHERE license_key=$1 AND node_id=$2")
.bind(license_key)
.bind(node_id)
.fetch_one(&cnn)
.await
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
let public_key: Vec<u8> = row.try_get("public_key").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
Ok(public_key)
}
#[derive(Debug, Error)]
pub enum StatsHostError {
#[error("Database error occurred")]
DatabaseError(String),
#[error("Host already exists")]
HostAlreadyExists,
#[error("Organization already exists")]
OrganizationAlreadyExists,
#[error("No available stats hosts")]
NoStatsHostsAvailable,
#[error("InfluxDB Error")]
InfluxError(String),
#[error("No such login")]
InvalidLogin,
}

View File

@ -0,0 +1,28 @@
use sqlx::{Pool, Postgres};
use crate::license::StatsHostError;
use super::hasher::hash_password;
pub async fn delete_user(cnn: Pool<Postgres>, key: &str, username: &str) -> Result<(), StatsHostError> {
sqlx::query("DELETE FROM logins WHERE key = $1 AND username = $2")
.bind(key)
.bind(username)
.execute(&cnn)
.await
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
Ok(())
}
pub async fn add_user(cnn: Pool<Postgres>, key: &str, username: &str, password: &str, nicename: &str) -> Result<(), StatsHostError> {
let password = hash_password(password);
sqlx::query("INSERT INTO logins (key, username, password_hash, nicename) VALUES ($1, $2, $3, $4)")
.bind(key)
.bind(username)
.bind(password)
.bind(nicename)
.execute(&cnn)
.await
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
Ok(())
}

View File

@ -0,0 +1,9 @@
use sha2::Sha256;
use sha2::Digest;
pub(crate) fn hash_password(password: &str) -> String {
let salted = format!("!x{password}_SaltIsGoodForYou");
let mut sha256 = Sha256::new();
sha256.update(salted);
format!("{:X}", sha256.finalize())
}

View File

@ -0,0 +1,35 @@
use sqlx::{Pool, Postgres, Row};
use uuid::Uuid;
use crate::license::StatsHostError;
use super::{hasher::hash_password, token_cache::create_token};
#[derive(Debug, Clone)]
pub struct LoginDetails {
pub token: String,
pub license: String,
pub name: String,
}
pub async fn try_login(cnn: Pool<Postgres>, key: &str, username: &str, password: &str) -> Result<LoginDetails, StatsHostError> {
let password = hash_password(password);
let row = sqlx::query("SELECT nicename FROM logins WHERE key = $1 AND username = $2 AND password_hash = $3")
.bind(key)
.bind(username)
.bind(password)
.fetch_one(&cnn)
.await
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
let nicename: String = row.try_get("nicename").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
let uuid = Uuid::new_v4().to_string();
let details = LoginDetails {
token: uuid,
name: nicename,
license: key.to_string(),
};
create_token(&cnn, &details, key, username).await?;
Ok(details)
}

Some files were not shown because too many files have changed in this diff Show More