mirror of
https://github.com/LibreQoE/LibreQoS.git
synced 2025-02-25 18:55:32 -06:00
commit
11e4e06e91
1
.gitignore
vendored
1
.gitignore
vendored
@ -63,6 +63,7 @@ src/rust/long_term_stats/site_build/node_modules
|
|||||||
src/rust/long_term_stats/site_build/output
|
src/rust/long_term_stats/site_build/output
|
||||||
src/rust/long_term_stats/site_build/package-lock.json
|
src/rust/long_term_stats/site_build/package-lock.json
|
||||||
src/rust/long_term_stats/wasm_pipe/staging
|
src/rust/long_term_stats/wasm_pipe/staging
|
||||||
|
src/rust/long_term_stats/lts_node/deploy.sh
|
||||||
|
|
||||||
# Ignore Rust build artifacts
|
# Ignore Rust build artifacts
|
||||||
src/rust/target
|
src/rust/target
|
||||||
|
@ -18,8 +18,8 @@ Run ```sudo crontab -e``` and remove any entries pertaining to LibreQoS from v1.
|
|||||||
Use the deb package from the [latest v1.4 release](https://github.com/LibreQoE/LibreQoS/releases/).
|
Use the deb package from the [latest v1.4 release](https://github.com/LibreQoE/LibreQoS/releases/).
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
sudo echo "deb http://stats.libreqos.io/ubuntu jammy main" > /etc/apt/sources.list.d/libreqos.list
|
sudo echo "deb http://stats.libreqos.io/ubuntu jammy main" | sudo tee -a /etc/apt/sources.list.d/libreqos.list
|
||||||
wget -O - -q http://stats.libreqos.io/repo.asc | apt-key add -
|
sudo wget -O - -q http://stats.libreqos.io/repo.asc | sudo apt-key add -
|
||||||
apt-get update
|
apt-get update
|
||||||
apt-get install libreqos
|
apt-get install libreqos
|
||||||
```
|
```
|
||||||
|
@ -100,23 +100,23 @@ Let's attach some access points and point-of-presence sites:
|
|||||||
```python
|
```python
|
||||||
net.addRawNode(NetworkNode(id="AP_A", displayName="AP_A", parentId="Site_1", type=NodeType.ap, download=500, upload=500))
|
net.addRawNode(NetworkNode(id="AP_A", displayName="AP_A", parentId="Site_1", type=NodeType.ap, download=500, upload=500))
|
||||||
net.addRawNode(NetworkNode(id="Site_3", displayName="Site_3", parentId="Site_1", type=NodeType.site, download=500, upload=500))
|
net.addRawNode(NetworkNode(id="Site_3", displayName="Site_3", parentId="Site_1", type=NodeType.site, download=500, upload=500))
|
||||||
net.addRawNode(NetworkNode(id="PoP_5", displayName="PoP_5", parentId="Site_3", type=NodeType.site, download=200, upload=200))
|
net.addRawNode(NetworkNode(id="Site_5", displayName="Site_5", parentId="Site_3", type=NodeType.site, download=200, upload=200))
|
||||||
net.addRawNode(NetworkNode(id="AP_9", displayName="AP_9", parentId="PoP_5", type=NodeType.ap, download=120, upload=120))
|
net.addRawNode(NetworkNode(id="AP_9", displayName="AP_9", parentId="Site_5", type=NodeType.ap, download=120, upload=120))
|
||||||
net.addRawNode(NetworkNode(id="PoP_6", displayName="PoP_6", parentId="PoP_5", type=NodeType.site, download=60, upload=60))
|
net.addRawNode(NetworkNode(id="Site_6", displayName="Site_6", parentId="Site_5", type=NodeType.site, download=60, upload=60))
|
||||||
net.addRawNode(NetworkNode(id="AP_11", displayName="AP_11", parentId="PoP_6", type=NodeType.ap, download=30, upload=30))
|
net.addRawNode(NetworkNode(id="AP_11", displayName="AP_11", parentId="Site_6", type=NodeType.ap, download=30, upload=30))
|
||||||
net.addRawNode(NetworkNode(id="PoP_1", displayName="PoP_1", parentId="Site_2", type=NodeType.site, download=200, upload=200))
|
net.addRawNode(NetworkNode(id="Site_4", displayName="Site_4", parentId="Site_2", type=NodeType.site, download=200, upload=200))
|
||||||
net.addRawNode(NetworkNode(id="AP_7", displayName="AP_7", parentId="PoP_1", type=NodeType.ap, download=100, upload=100))
|
net.addRawNode(NetworkNode(id="AP_7", displayName="AP_7", parentId="Site_4", type=NodeType.ap, download=100, upload=100))
|
||||||
net.addRawNode(NetworkNode(id="AP_1", displayName="AP_1", parentId="Site_2", type=NodeType.ap, download=150, upload=150))
|
net.addRawNode(NetworkNode(id="AP_1", displayName="AP_1", parentId="Site_2", type=NodeType.ap, download=150, upload=150))
|
||||||
```
|
```
|
||||||
|
|
||||||
When you attach a customer, you can specify a tree entry (e.g. `PoP_5`) as a parent:
|
When you attach a customer, you can specify a tree entry (e.g. `Site_5`) as a parent:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
# Add the customer
|
# Add the customer
|
||||||
customer = NetworkNode(
|
customer = NetworkNode(
|
||||||
id="Unique Customer ID",
|
id="Unique Customer ID",
|
||||||
displayName="The Doe Family",
|
displayName="The Doe Family",
|
||||||
parentId="PoP_5",
|
parentId="Site_5",
|
||||||
type=NodeType.client,
|
type=NodeType.client,
|
||||||
download=100, # Download is in Mbit/second
|
download=100, # Download is in Mbit/second
|
||||||
upload=20, # Upload is in Mbit/second
|
upload=20, # Upload is in Mbit/second
|
||||||
@ -146,7 +146,7 @@ net.createShapedDevices() # Create the `ShapedDevices.csv` file.
|
|||||||
|
|
||||||
You can also add a call to `net.plotNetworkGraph(False)` (use `True` to also include every customer; this can make for a HUGE file) to create a PDF file (currently named `network.pdf.pdf`) displaying your topology. The example shown here looks like this:
|
You can also add a call to `net.plotNetworkGraph(False)` (use `True` to also include every customer; this can make for a HUGE file) to create a PDF file (currently named `network.pdf.pdf`) displaying your topology. The example shown here looks like this:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## Longest Prefix Match Tip
|
## Longest Prefix Match Tip
|
||||||
You could theoretically throttle all unknown IPs until they are associated with a client. For example, you could limit every unknown to 1.5x0.5 with single entry in ShapedDevices.csv, until you associate them with an account. IPs need to be non-exact matches. So you can't have two 192.168.1.1 entries, but you can have a 192.168.1.0/24 subnet and a 192.168.1.2/32 - they aren't duplicates, and the LPM search is smart enough to pick the most exact match.
|
You could theoretically throttle all unknown IPs until they are associated with a client. For example, you could limit every unknown to 1.5x0.5 with single entry in ShapedDevices.csv, until you associate them with an account. IPs need to be non-exact matches. So you can't have two 192.168.1.1 entries, but you can have a 192.168.1.0/24 subnet and a 192.168.1.2/32 - they aren't duplicates, and the LPM search is smart enough to pick the most exact match.
|
||||||
|
@ -90,34 +90,34 @@ def tearDown(interfaceA, interfaceB):
|
|||||||
clear_ip_mappings() # Use the bus
|
clear_ip_mappings() # Use the bus
|
||||||
clearPriorSettings(interfaceA, interfaceB)
|
clearPriorSettings(interfaceA, interfaceB)
|
||||||
|
|
||||||
def findQueuesAvailable():
|
def findQueuesAvailable(interfaceName):
|
||||||
# Find queues and CPU cores available. Use min between those two as queuesAvailable
|
# Find queues and CPU cores available. Use min between those two as queuesAvailable
|
||||||
if enableActualShellCommands:
|
if enableActualShellCommands:
|
||||||
if queuesAvailableOverride == 0:
|
if queuesAvailableOverride == 0:
|
||||||
queuesAvailable = 0
|
queuesAvailable = 0
|
||||||
path = '/sys/class/net/' + interfaceA + '/queues/'
|
path = '/sys/class/net/' + interfaceName + '/queues/'
|
||||||
directory_contents = os.listdir(path)
|
directory_contents = os.listdir(path)
|
||||||
for item in directory_contents:
|
for item in directory_contents:
|
||||||
if "tx-" in str(item):
|
if "tx-" in str(item):
|
||||||
queuesAvailable += 1
|
queuesAvailable += 1
|
||||||
print("NIC queues:\t\t\t" + str(queuesAvailable))
|
print(f"Interface {interfaceName} NIC queues:\t\t\t" + str(queuesAvailable))
|
||||||
else:
|
else:
|
||||||
queuesAvailable = queuesAvailableOverride
|
queuesAvailable = queuesAvailableOverride
|
||||||
print("NIC queues (Override):\t\t\t" + str(queuesAvailable))
|
print(f"Interface {interfaceName} NIC queues (Override):\t\t\t" + str(queuesAvailable))
|
||||||
cpuCount = multiprocessing.cpu_count()
|
cpuCount = multiprocessing.cpu_count()
|
||||||
print("CPU cores:\t\t\t" + str(cpuCount))
|
print("CPU cores:\t\t\t" + str(cpuCount))
|
||||||
if queuesAvailable < 2:
|
if queuesAvailable < 2:
|
||||||
raise SystemError('Only 1 NIC rx/tx queue available. You will need to use a NIC with 2 or more rx/tx queues available.')
|
raise SystemError(f'Only 1 NIC rx/tx queue available for interface {interfaceName}. You will need to use a NIC with 2 or more rx/tx queues available.')
|
||||||
if queuesAvailable < 2:
|
if queuesAvailable < 2:
|
||||||
raise SystemError('Only 1 CPU core available. You will need to use a CPU with 2 or more CPU cores.')
|
raise SystemError('Only 1 CPU core available. You will need to use a CPU with 2 or more CPU cores.')
|
||||||
queuesAvailable = min(queuesAvailable,cpuCount)
|
queuesAvailable = min(queuesAvailable,cpuCount)
|
||||||
print("queuesAvailable set to:\t" + str(queuesAvailable))
|
print(f"queuesAvailable for interface {interfaceName} set to:\t" + str(queuesAvailable))
|
||||||
else:
|
else:
|
||||||
print("As enableActualShellCommands is False, CPU core / queue count has been set to 16")
|
print("As enableActualShellCommands is False, CPU core / queue count has been set to 16")
|
||||||
logging.info("NIC queues:\t\t\t" + str(16))
|
logging.info(f"Interface {interfaceName} NIC queues:\t\t\t" + str(16))
|
||||||
cpuCount = multiprocessing.cpu_count()
|
cpuCount = multiprocessing.cpu_count()
|
||||||
logging.info("CPU cores:\t\t\t" + str(16))
|
logging.info("CPU cores:\t\t\t" + str(16))
|
||||||
logging.info("queuesAvailable set to:\t" + str(16))
|
logging.info(f"queuesAvailable for interface {interfaceName} set to:\t" + str(16))
|
||||||
queuesAvailable = 16
|
queuesAvailable = 16
|
||||||
return queuesAvailable
|
return queuesAvailable
|
||||||
|
|
||||||
@ -137,12 +137,28 @@ def validateNetworkAndDevices():
|
|||||||
devicesValidatedOrNot = False
|
devicesValidatedOrNot = False
|
||||||
with open('network.json') as file:
|
with open('network.json') as file:
|
||||||
try:
|
try:
|
||||||
temporaryVariable = json.load(file) # put JSON-data to a variable
|
data = json.load(file) # put JSON-data to a variable
|
||||||
|
if data != {}:
|
||||||
|
#Traverse
|
||||||
|
observedNodes = {} # Will not be used later
|
||||||
|
def traverseToVerifyValidity(data):
|
||||||
|
for elem in data:
|
||||||
|
if isinstance(elem, str):
|
||||||
|
if (isinstance(data[elem], dict)) and (elem != 'children'):
|
||||||
|
if elem not in observedNodes:
|
||||||
|
observedNodes[elem] = {'downloadBandwidthMbps': data[elem]['uploadBandwidthMbps'], 'downloadBandwidthMbps': data[elem]['uploadBandwidthMbps']}
|
||||||
|
if 'children' in data[elem]:
|
||||||
|
traverseToVerifyValidity(data[elem]['children'])
|
||||||
|
else:
|
||||||
|
warnings.warn("Non-unique Node name in network.json: " + elem, stacklevel=2)
|
||||||
|
networkValidatedOrNot = False
|
||||||
|
traverseToVerifyValidity(data)
|
||||||
|
if len(observedNodes) < 1:
|
||||||
|
warnings.warn("network.json had 0 valid nodes. Only {} is accepted for that scenario.", stacklevel=2)
|
||||||
|
networkValidatedOrNot = False
|
||||||
except json.decoder.JSONDecodeError:
|
except json.decoder.JSONDecodeError:
|
||||||
warnings.warn("network.json is an invalid JSON file", stacklevel=2) # in case json is invalid
|
warnings.warn("network.json is an invalid JSON file", stacklevel=2) # in case json is invalid
|
||||||
networkValidatedOrNot
|
networkValidatedOrNot = False
|
||||||
if networkValidatedOrNot == True:
|
|
||||||
print("network.json passed validation")
|
|
||||||
rowNum = 2
|
rowNum = 2
|
||||||
with open('ShapedDevices.csv') as csv_file:
|
with open('ShapedDevices.csv') as csv_file:
|
||||||
csv_reader = csv.reader(csv_file, delimiter=',')
|
csv_reader = csv.reader(csv_file, delimiter=',')
|
||||||
@ -255,8 +271,11 @@ def validateNetworkAndDevices():
|
|||||||
print("ShapedDevices.csv passed validation")
|
print("ShapedDevices.csv passed validation")
|
||||||
else:
|
else:
|
||||||
print("ShapedDevices.csv failed validation")
|
print("ShapedDevices.csv failed validation")
|
||||||
|
if networkValidatedOrNot == True:
|
||||||
if (devicesValidatedOrNot == True) and (devicesValidatedOrNot == True):
|
print("network.json passed validation")
|
||||||
|
else:
|
||||||
|
print("network.json failed validation")
|
||||||
|
if (devicesValidatedOrNot == True) and (networkValidatedOrNot == True):
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
@ -454,7 +473,10 @@ def refreshShapers():
|
|||||||
|
|
||||||
|
|
||||||
# Pull rx/tx queues / CPU cores available
|
# Pull rx/tx queues / CPU cores available
|
||||||
queuesAvailable = findQueuesAvailable()
|
# Handling the case when the number of queues for interfaces are different
|
||||||
|
InterfaceAQueuesAvailable = findQueuesAvailable(interfaceA)
|
||||||
|
InterfaceBQueuesAvailable = findQueuesAvailable(interfaceB)
|
||||||
|
queuesAvailable = min(InterfaceAQueuesAvailable, InterfaceBQueuesAvailable)
|
||||||
stickOffset = 0
|
stickOffset = 0
|
||||||
if OnAStick:
|
if OnAStick:
|
||||||
print("On-a-stick override dividing queues")
|
print("On-a-stick override dividing queues")
|
||||||
|
1
src/VERSION_STRING
Normal file
1
src/VERSION_STRING
Normal file
@ -0,0 +1 @@
|
|||||||
|
1.4-rc10-devel
|
@ -60,6 +60,10 @@ for prog in $PROGS
|
|||||||
do
|
do
|
||||||
pushd $prog > /dev/null
|
pushd $prog > /dev/null
|
||||||
cargo build $BUILD_FLAGS
|
cargo build $BUILD_FLAGS
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "Cargo build failed. Exiting with code 1."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
popd > /dev/null
|
popd > /dev/null
|
||||||
done
|
done
|
||||||
|
|
||||||
|
@ -356,6 +356,10 @@ class NetworkGraph:
|
|||||||
def createShapedDevices(self):
|
def createShapedDevices(self):
|
||||||
import csv
|
import csv
|
||||||
from ispConfig import bandwidthOverheadFactor
|
from ispConfig import bandwidthOverheadFactor
|
||||||
|
try:
|
||||||
|
from ispConfig import committedBandwidthMultiplier
|
||||||
|
except:
|
||||||
|
committedBandwidthMultiplier = 0.98
|
||||||
# Builds ShapedDevices.csv from the network tree.
|
# Builds ShapedDevices.csv from the network tree.
|
||||||
circuits = []
|
circuits = []
|
||||||
for (i, node) in enumerate(self.nodes):
|
for (i, node) in enumerate(self.nodes):
|
||||||
@ -416,8 +420,8 @@ class NetworkGraph:
|
|||||||
device["mac"],
|
device["mac"],
|
||||||
device["ipv4"],
|
device["ipv4"],
|
||||||
device["ipv6"],
|
device["ipv6"],
|
||||||
int(float(circuit["download"]) * 0.98),
|
int(float(circuit["download"]) * committedBandwidthMultiplier),
|
||||||
int(float(circuit["upload"]) * 0.98),
|
int(float(circuit["upload"]) * committedBandwidthMultiplier),
|
||||||
int(float(circuit["download"]) * bandwidthOverheadFactor),
|
int(float(circuit["download"]) * bandwidthOverheadFactor),
|
||||||
int(float(circuit["upload"]) * bandwidthOverheadFactor),
|
int(float(circuit["upload"]) * bandwidthOverheadFactor),
|
||||||
""
|
""
|
||||||
|
@ -115,6 +115,8 @@ findIPv6usingMikrotik = False
|
|||||||
# If you want to provide a safe cushion for speed test results to prevent customer complains, you can set this to
|
# If you want to provide a safe cushion for speed test results to prevent customer complains, you can set this to
|
||||||
# 1.15 (15% above plan rate). If not, you can leave as 1.0
|
# 1.15 (15% above plan rate). If not, you can leave as 1.0
|
||||||
bandwidthOverheadFactor = 1.0
|
bandwidthOverheadFactor = 1.0
|
||||||
|
# Number to multiply the maximum/ceiling bandwidth with to determine the minimum bandwidth.
|
||||||
|
committedBandwidthMultiplier = 0.98
|
||||||
# For edge cases, set the respective ParentNode for these CPEs
|
# For edge cases, set the respective ParentNode for these CPEs
|
||||||
exceptionCPEs = {}
|
exceptionCPEs = {}
|
||||||
# exceptionCPEs = {
|
# exceptionCPEs = {
|
||||||
|
2152
src/rust/Cargo.lock
generated
2152
src/rust/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -28,14 +28,7 @@ members = [
|
|||||||
"lqos_heimdall", # Library for managing Heimdall flow watching
|
"lqos_heimdall", # Library for managing Heimdall flow watching
|
||||||
"lqos_map_perf", # A CLI tool for testing eBPF map performance
|
"lqos_map_perf", # A CLI tool for testing eBPF map performance
|
||||||
"lqstats", # A CLI utility for retrieving long-term statistics
|
"lqstats", # A CLI utility for retrieving long-term statistics
|
||||||
"long_term_stats/license_server", # Licensing Server for LibreQoS Long-term stats
|
"lts_client", # Shared data and client-side code for long-term stats
|
||||||
"long_term_stats/lts_node", # Long-term stats cluster node (web interface)
|
|
||||||
"long_term_stats/lts_ingestor", # Long-term stats data ingestor (feeding databases)
|
|
||||||
"long_term_stats/pgdb", # PostgreSQL interface for the LTS system
|
|
||||||
"long_term_stats/licman", # A CLI tool for managing the licensing server
|
|
||||||
"long_term_stats/lts_client", # Shared data and client-side code for long-term stats
|
|
||||||
"long_term_stats/wasm_pipe", # Provides a WebAssembly tight/compressed data pipeline
|
|
||||||
"long_term_stats/wasm_pipe_types", # Common types between the WASM conduit and the WASM server
|
|
||||||
"lqos_map_perf", # A CLI tool for testing eBPF map performance
|
"lqos_map_perf", # A CLI tool for testing eBPF map performance
|
||||||
"uisp", # REST support for the UISP API
|
"uisp", # REST support for the UISP API
|
||||||
]
|
]
|
||||||
|
@ -1,23 +0,0 @@
|
|||||||
# Long Term Stats
|
|
||||||
|
|
||||||
We'd really rather you let us host your long-term statistics. It's a lot
|
|
||||||
of work, and gives us a revenue stream to keep building LibreQoS.
|
|
||||||
|
|
||||||
If you really want to self-host, setup is a bit convoluted - but we won't
|
|
||||||
stop you.
|
|
||||||
|
|
||||||
## PostgreSQL
|
|
||||||
|
|
||||||
* Install PostgreSQL somewhere on your network. You only want one PostgreSQL host per long-term node stats cluster.
|
|
||||||
* Setup the database schema (TBD).
|
|
||||||
* Put the connection string for your database in `/etc/lqdb` on each host.
|
|
||||||
* Install the `sqlx` tool with `cargo install sqlx-cli --no-default-features --features rustls,postgres`
|
|
||||||
|
|
||||||
## For each stats node in the cluster
|
|
||||||
|
|
||||||
* Install InfluxDB.
|
|
||||||
* Install lts_node.
|
|
||||||
* Setup `/etc/lqdb`.
|
|
||||||
* Copy `lts_keys.bin` from the license server to the `lts_node` directory.
|
|
||||||
* Run the process.
|
|
||||||
* Login to the licensing server, and run `licman host add <ip of the new host>`
|
|
@ -1,14 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "license_server"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
tokio = { version = "1.25.0", features = ["full"] }
|
|
||||||
anyhow = "1"
|
|
||||||
env_logger = "0"
|
|
||||||
log = "0"
|
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
|
||||||
lts_client = { path = "../lts_client" }
|
|
||||||
pgdb = { path = "../pgdb" }
|
|
||||||
once_cell = "1"
|
|
@ -1,3 +0,0 @@
|
|||||||
# License Server
|
|
||||||
|
|
||||||
Runs at LibreQoS and matches license keys with an "is valid" list. If you're running your very own licensing server, then you will need to set this up on your server to accept your key. Details will be provided later.
|
|
@ -1,14 +0,0 @@
|
|||||||
mod server;
|
|
||||||
mod pki;
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() -> anyhow::Result<()> {
|
|
||||||
// Start the logger
|
|
||||||
env_logger::init_from_env(
|
|
||||||
env_logger::Env::default()
|
|
||||||
.filter_or(env_logger::DEFAULT_FILTER_ENV, "warn"),
|
|
||||||
);
|
|
||||||
|
|
||||||
let _ = server::start().await;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
@ -1,6 +0,0 @@
|
|||||||
use lts_client::{dryoc::dryocbox::*, pki::generate_new_keypair};
|
|
||||||
use once_cell::sync::Lazy;
|
|
||||||
use tokio::sync::RwLock;
|
|
||||||
|
|
||||||
pub(crate) static LIBREQOS_KEYPAIR: Lazy<RwLock<KeyPair>> = Lazy::new(|| RwLock::new(generate_new_keypair(KEY_PATH)));
|
|
||||||
const KEY_PATH: &str = "lqkeys.bin"; // Store in the working directory
|
|
@ -1,149 +0,0 @@
|
|||||||
use lts_client::transport_data::{LicenseReply, LicenseRequest};
|
|
||||||
use pgdb::sqlx::{Pool, Postgres};
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
use tokio::{
|
|
||||||
io::{AsyncReadExt, AsyncWriteExt},
|
|
||||||
net::TcpListener,
|
|
||||||
spawn,
|
|
||||||
};
|
|
||||||
use crate::pki::LIBREQOS_KEYPAIR;
|
|
||||||
|
|
||||||
pub async fn start() -> anyhow::Result<()> {
|
|
||||||
let listener = TcpListener::bind(":::9126").await?;
|
|
||||||
log::info!("Listening on :::9126");
|
|
||||||
|
|
||||||
let pool = pgdb::get_connection_pool(5).await;
|
|
||||||
if pool.is_err() {
|
|
||||||
log::error!("Unable to connect to the database");
|
|
||||||
log::error!("{pool:?}");
|
|
||||||
return Err(anyhow::Error::msg("Unable to connect to the database"));
|
|
||||||
}
|
|
||||||
let pool = pool.unwrap();
|
|
||||||
|
|
||||||
loop {
|
|
||||||
let (mut socket, address) = listener.accept().await?;
|
|
||||||
log::info!("Connection from {address:?}");
|
|
||||||
let pool = pool.clone();
|
|
||||||
spawn(async move {
|
|
||||||
let mut buf = vec![0u8; 10240];
|
|
||||||
if let Ok(bytes) = socket.read(&mut buf).await {
|
|
||||||
log::info!("Received {bytes} bytes from {address:?}");
|
|
||||||
match decode(&buf, address, pool).await {
|
|
||||||
Err(e) => log::error!("{e:?}"),
|
|
||||||
Ok(reply) => {
|
|
||||||
let bytes = build_reply(&reply);
|
|
||||||
match bytes {
|
|
||||||
Ok(bytes) => {
|
|
||||||
log::info!("Submitting {} bytes to network", bytes.len());
|
|
||||||
if let Err(e) = socket.write_all(&bytes).await {
|
|
||||||
log::error!("Write error: {e:?}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
log::error!("{e:?}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn decode(
|
|
||||||
buf: &[u8],
|
|
||||||
address: SocketAddr,
|
|
||||||
pool: Pool<Postgres>,
|
|
||||||
) -> anyhow::Result<LicenseReply> {
|
|
||||||
const U64SIZE: usize = std::mem::size_of::<u64>();
|
|
||||||
let version_buf = &buf[0..2].try_into()?;
|
|
||||||
let version = u16::from_be_bytes(*version_buf);
|
|
||||||
let size_buf = &buf[2..2 + U64SIZE].try_into()?;
|
|
||||||
let size = u64::from_be_bytes(*size_buf);
|
|
||||||
log::info!("Received a version {version} payload of serialized size {size} from {address:?}");
|
|
||||||
|
|
||||||
match version {
|
|
||||||
1 => {
|
|
||||||
let start = 2 + U64SIZE;
|
|
||||||
let end = start + size as usize;
|
|
||||||
let payload: LicenseRequest = lts_client::cbor::from_slice(&buf[start..end])?;
|
|
||||||
let license = check_license(&payload, address, pool).await?;
|
|
||||||
Ok(license)
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
log::error!("Unknown version of statistics: {version}, dumped {size} bytes");
|
|
||||||
Err(anyhow::Error::msg("Version error"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn check_license(
|
|
||||||
request: &LicenseRequest,
|
|
||||||
address: SocketAddr,
|
|
||||||
pool: Pool<Postgres>,
|
|
||||||
) -> anyhow::Result<LicenseReply> {
|
|
||||||
match request {
|
|
||||||
LicenseRequest::LicenseCheck { key } => {
|
|
||||||
log::info!("Checking license from {address:?}, key: {key}");
|
|
||||||
if key == "test" {
|
|
||||||
log::info!("License is valid");
|
|
||||||
Ok(LicenseReply::Valid {
|
|
||||||
expiry: 0, // Temporary value
|
|
||||||
stats_host: "127.0.0.1:9127".to_string(), // Also temporary
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
match pgdb::get_stats_host_for_key(pool, key).await {
|
|
||||||
Ok(host) => {
|
|
||||||
log::info!("License is valid");
|
|
||||||
return Ok(LicenseReply::Valid {
|
|
||||||
expiry: 0, // Temporary value
|
|
||||||
stats_host: host,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
log::warn!("Unable to get stats host for key: {e:?}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log::info!("License is denied");
|
|
||||||
Ok(LicenseReply::Denied)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
LicenseRequest::KeyExchange { node_id, node_name, license_key, public_key } => {
|
|
||||||
log::info!("Public key exchange requested by {node_id}");
|
|
||||||
|
|
||||||
// Check if the node_id / license key combination exists
|
|
||||||
// If it does, update it to the current last-seen and the new public key
|
|
||||||
// If it doesn't, insert it
|
|
||||||
let public_key = lts_client::cbor::to_vec(&public_key).unwrap();
|
|
||||||
let result = pgdb::insert_or_update_node_public_key(pool, node_id, node_name, license_key, &public_key).await;
|
|
||||||
if result.is_err() {
|
|
||||||
log::warn!("Unable to insert or update node public key: {result:?}");
|
|
||||||
return Err(anyhow::Error::msg("Unable to insert or update node public key"));
|
|
||||||
}
|
|
||||||
|
|
||||||
let public_key = LIBREQOS_KEYPAIR.read().await.public_key.clone();
|
|
||||||
Ok(LicenseReply::MyPublicKey { public_key })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_reply(reply: &LicenseReply) -> anyhow::Result<Vec<u8>> {
|
|
||||||
let mut result = Vec::new();
|
|
||||||
let payload = lts_client::cbor::to_vec(reply);
|
|
||||||
if let Err(e) = payload {
|
|
||||||
log::warn!("Unable to serialize statistics. Not sending them.");
|
|
||||||
log::warn!("{e:?}");
|
|
||||||
return Err(anyhow::Error::msg("Unable to serialize"));
|
|
||||||
}
|
|
||||||
let payload = payload.unwrap();
|
|
||||||
|
|
||||||
// Store the version as network order
|
|
||||||
result.extend(1u16.to_be_bytes());
|
|
||||||
// Store the payload size as network order
|
|
||||||
result.extend((payload.len() as u64).to_be_bytes());
|
|
||||||
// Store the payload itself
|
|
||||||
result.extend(payload);
|
|
||||||
|
|
||||||
Ok(result)
|
|
||||||
}
|
|
@ -1,12 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "licman"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
clap = { version = "4", features = ["derive"] }
|
|
||||||
anyhow = "1"
|
|
||||||
pgdb = { path = "../pgdb" }
|
|
||||||
tokio = { version = "1", features = [ "rt", "macros", "net", "io-util", "time" ] }
|
|
||||||
env_logger = "0"
|
|
||||||
log = "0"
|
|
@ -1,112 +0,0 @@
|
|||||||
use anyhow::Result;
|
|
||||||
use clap::{Parser, Subcommand};
|
|
||||||
use pgdb::create_free_trial;
|
|
||||||
use std::process::exit;
|
|
||||||
|
|
||||||
#[derive(Parser)]
|
|
||||||
#[command()]
|
|
||||||
struct Args {
|
|
||||||
#[command(subcommand)]
|
|
||||||
command: Option<Commands>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Subcommand)]
|
|
||||||
enum Commands {
|
|
||||||
/// Manage stats hosts
|
|
||||||
Hosts {
|
|
||||||
#[command(subcommand)]
|
|
||||||
command: Option<HostsCommands>,
|
|
||||||
},
|
|
||||||
/// Manage licenses
|
|
||||||
License {
|
|
||||||
#[command(subcommand)]
|
|
||||||
command: Option<LicenseCommands>,
|
|
||||||
},
|
|
||||||
/// Manage users
|
|
||||||
Users {
|
|
||||||
#[command(subcommand)]
|
|
||||||
command: Option<UsersCommands>,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Subcommand)]
|
|
||||||
enum HostsCommands {
|
|
||||||
/// Add a host to the list of available stats storing hosts
|
|
||||||
Add { hostname: String, influx_host: String, api_key: String },
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Subcommand)]
|
|
||||||
enum LicenseCommands {
|
|
||||||
/// Create a new free trial license
|
|
||||||
FreeTrial { organization: String },
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Subcommand)]
|
|
||||||
enum UsersCommands {
|
|
||||||
/// Add a new user
|
|
||||||
Add { key: String, username: String, password: String, nicename: String },
|
|
||||||
/// Delete a user
|
|
||||||
Delete { key: String, username: String },
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::main(flavor = "current_thread")]
|
|
||||||
async fn main() -> Result<()> {
|
|
||||||
env_logger::init_from_env(
|
|
||||||
env_logger::Env::default()
|
|
||||||
.filter_or(env_logger::DEFAULT_FILTER_ENV, "warn"),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Get the database connection pool
|
|
||||||
let pool = pgdb::get_connection_pool(5).await;
|
|
||||||
if pool.is_err() {
|
|
||||||
log::error!("Unable to connect to the database");
|
|
||||||
log::error!("{pool:?}");
|
|
||||||
return Err(anyhow::Error::msg("Unable to connect to the database"));
|
|
||||||
}
|
|
||||||
let pool = pool.unwrap();
|
|
||||||
|
|
||||||
let cli = Args::parse();
|
|
||||||
match cli.command {
|
|
||||||
Some(Commands::Hosts {
|
|
||||||
command: Some(HostsCommands::Add { hostname, influx_host, api_key }),
|
|
||||||
}) => {
|
|
||||||
match pgdb::add_stats_host(pool, hostname, influx_host, api_key).await {
|
|
||||||
Err(e) => {
|
|
||||||
log::error!("Unable to add stats host: {e:?}");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
Ok(new_id) => {
|
|
||||||
println!("Added stats host with id {}", new_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(Commands::License{command: Some(LicenseCommands::FreeTrial { organization })}) => {
|
|
||||||
match create_free_trial(pool, &organization).await {
|
|
||||||
Err(e) => {
|
|
||||||
log::error!("Unable to create free trial: {e:?}");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
Ok(key) => {
|
|
||||||
println!("Your new license key is: {}", key);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(Commands::Users{command: Some(UsersCommands::Add { key, username, password, nicename })}) => {
|
|
||||||
match pgdb::add_user(pool, &key, &username, &password, &nicename).await {
|
|
||||||
Err(e) => {
|
|
||||||
log::error!("Unable to add user: {e:?}");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
Ok(_) => {
|
|
||||||
println!("Added user {}", username);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
println!("Run with --help to see instructions");
|
|
||||||
exit(0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
@ -1,60 +0,0 @@
|
|||||||
use dryoc::{dryocbox::{Nonce, DryocBox}, types::{NewByteArray, ByteArray}};
|
|
||||||
use lqos_config::EtcLqos;
|
|
||||||
use crate::{transport_data::{LtsCommand, NodeIdAndLicense}, submission_queue::queue::QueueError};
|
|
||||||
use super::keys::{SERVER_PUBLIC_KEY, KEYPAIR};
|
|
||||||
|
|
||||||
pub(crate) async fn encode_submission(submission: &LtsCommand) -> Result<Vec<u8>, QueueError> {
|
|
||||||
let nonce = Nonce::gen();
|
|
||||||
let mut result = Vec::new();
|
|
||||||
|
|
||||||
// Store the version as network order
|
|
||||||
result.extend(1u16.to_be_bytes());
|
|
||||||
|
|
||||||
// Pack the license key and node id into a header
|
|
||||||
let header = get_license_key_and_node_id(&nonce)?;
|
|
||||||
let header_bytes = serde_cbor::to_vec(&header).map_err(|_| QueueError::SendFail)?;
|
|
||||||
|
|
||||||
// Store the size of the header and the header
|
|
||||||
result.extend((header_bytes.len() as u64).to_be_bytes());
|
|
||||||
result.extend(header_bytes);
|
|
||||||
|
|
||||||
// Pack the submission body into bytes
|
|
||||||
let payload_bytes = serde_cbor::to_vec(&submission).map_err(|_| QueueError::SendFail)?;
|
|
||||||
|
|
||||||
// TODO: Compress it?
|
|
||||||
let payload_bytes = miniz_oxide::deflate::compress_to_vec(&payload_bytes, 8);
|
|
||||||
|
|
||||||
// Encrypt it
|
|
||||||
let remote_public = SERVER_PUBLIC_KEY.read().await.clone().unwrap();
|
|
||||||
let my_private = KEYPAIR.read().await.secret_key.clone();
|
|
||||||
let dryocbox = DryocBox::encrypt_to_vecbox(
|
|
||||||
&payload_bytes,
|
|
||||||
&nonce,
|
|
||||||
&remote_public,
|
|
||||||
&my_private,
|
|
||||||
).map_err(|_| QueueError::SendFail)?;
|
|
||||||
let encrypted_bytes = dryocbox.to_vec();
|
|
||||||
|
|
||||||
// Store the size of the submission
|
|
||||||
result.extend((encrypted_bytes.len() as u64).to_be_bytes());
|
|
||||||
result.extend(encrypted_bytes);
|
|
||||||
|
|
||||||
// Store the encrypted, zipped submission itself
|
|
||||||
Ok(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_license_key_and_node_id(nonce: &Nonce) -> Result<NodeIdAndLicense, QueueError> {
|
|
||||||
let cfg = EtcLqos::load().map_err(|_| QueueError::SendFail)?;
|
|
||||||
if let Some(node_id) = cfg.node_id {
|
|
||||||
if let Some(lts) = &cfg.long_term_stats {
|
|
||||||
if let Some(license_key) = <s.license_key {
|
|
||||||
return Ok(NodeIdAndLicense {
|
|
||||||
node_id,
|
|
||||||
license_key: license_key.clone(),
|
|
||||||
nonce: *nonce.as_array(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(QueueError::SendFail)
|
|
||||||
}
|
|
@ -1,83 +0,0 @@
|
|||||||
use std::time::Duration;
|
|
||||||
use tokio::{sync::mpsc::Receiver, time::sleep, net::TcpStream};
|
|
||||||
use self::keys::key_exchange;
|
|
||||||
use super::{licensing::{get_license_status, LicenseState}, queue::send_queue};
|
|
||||||
mod keys;
|
|
||||||
mod encode;
|
|
||||||
pub(crate) use encode::encode_submission;
|
|
||||||
|
|
||||||
pub(crate) enum SenderChannelMessage {
|
|
||||||
QueueReady,
|
|
||||||
Quit,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) async fn start_communication_channel(mut rx: Receiver<SenderChannelMessage>) {
|
|
||||||
let mut connected = false;
|
|
||||||
let mut stream: Option<TcpStream> = None;
|
|
||||||
loop {
|
|
||||||
match rx.try_recv() {
|
|
||||||
Ok(SenderChannelMessage::QueueReady) => {
|
|
||||||
// If not connected, see if we are allowed to connect and get a target
|
|
||||||
if !connected || stream.is_none() {
|
|
||||||
log::info!("Establishing LTS TCP channel.");
|
|
||||||
stream = connect_if_permitted().await;
|
|
||||||
if stream.is_some() {
|
|
||||||
connected = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we're still not connected, skip - otherwise, send the
|
|
||||||
// queued data
|
|
||||||
if let Some(tcpstream) = &mut stream {
|
|
||||||
if connected && tcpstream.writable().await.is_ok() {
|
|
||||||
// Send the data
|
|
||||||
let all_good = send_queue(tcpstream).await;
|
|
||||||
if all_good.is_err() {
|
|
||||||
log::error!("Stream fail during send. Will re-send");
|
|
||||||
connected = false;
|
|
||||||
stream = None;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
stream = None;
|
|
||||||
connected = false;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
connected = false;
|
|
||||||
stream = None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(SenderChannelMessage::Quit) => {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
|
|
||||||
sleep(Duration::from_secs(10)).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn connect_if_permitted() -> Option<TcpStream> {
|
|
||||||
let license = get_license_status().await;
|
|
||||||
if let LicenseState::Valid { stats_host, .. } = license {
|
|
||||||
if !key_exchange().await {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
let host = format!("{stats_host}:9128");
|
|
||||||
let stream = TcpStream::connect(&host).await;
|
|
||||||
match stream {
|
|
||||||
Err(e) => {
|
|
||||||
log::error!("Unable to connect to {host}: {e}");
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
Ok(stream) => {
|
|
||||||
if stream.writable().await.is_err() {
|
|
||||||
log::error!("Unable to write to {host}");
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
return Some(stream);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
@ -1,20 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "lts_ingestor"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
pgdb = { path = "../pgdb" }
|
|
||||||
lts_client = { path = "../lts_client" }
|
|
||||||
lqos_config = { path = "../../lqos_config" }
|
|
||||||
tokio = { version = "1.25.0", features = ["full"] }
|
|
||||||
tracing = "0.1"
|
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
|
||||||
anyhow = "1"
|
|
||||||
influxdb2 = "0"
|
|
||||||
influxdb2-structmap = "0"
|
|
||||||
futures = "0"
|
|
||||||
once_cell = "1"
|
|
||||||
miniz_oxide = "0.7.1"
|
|
Binary file not shown.
@ -1,33 +0,0 @@
|
|||||||
use tracing::{error, info};
|
|
||||||
mod submissions;
|
|
||||||
mod pki;
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() -> anyhow::Result<()> {
|
|
||||||
// install global collector configured based on RUST_LOG env var.
|
|
||||||
tracing_subscriber::fmt::init();
|
|
||||||
|
|
||||||
// Get the database connection pool
|
|
||||||
let pool = pgdb::get_connection_pool(5).await;
|
|
||||||
if pool.is_err() {
|
|
||||||
error!("Unable to connect to the database");
|
|
||||||
error!("{pool:?}");
|
|
||||||
return Err(anyhow::Error::msg("Unable to connect to the database"));
|
|
||||||
}
|
|
||||||
let pool = pool.unwrap();
|
|
||||||
|
|
||||||
// Start the submission queue
|
|
||||||
let submission_sender = {
|
|
||||||
info!("Starting the submission queue");
|
|
||||||
submissions::submissions_queue(pool.clone()).await?
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
// Start the submissions serer
|
|
||||||
info!("Starting the submissions server");
|
|
||||||
if let Err(e) = tokio::spawn(submissions::submissions_server(pool.clone(), submission_sender)).await {
|
|
||||||
error!("Server exited with error: {}", e);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
@ -1,6 +0,0 @@
|
|||||||
use std::sync::RwLock;
|
|
||||||
use once_cell::sync::Lazy;
|
|
||||||
use lts_client::{pki::generate_new_keypair, dryoc::dryocbox::KeyPair};
|
|
||||||
|
|
||||||
pub(crate) static LIBREQOS_KEYPAIR: Lazy<RwLock<KeyPair>> = Lazy::new(|| RwLock::new(generate_new_keypair(KEY_PATH)));
|
|
||||||
const KEY_PATH: &str = "lqkeys.bin"; // Store in the working directory
|
|
@ -1,5 +0,0 @@
|
|||||||
mod submission_server;
|
|
||||||
mod submission_queue;
|
|
||||||
pub use submission_server::submissions_server;
|
|
||||||
pub use submission_queue::submissions_queue;
|
|
||||||
pub use submission_queue::get_org_details;
|
|
@ -1,85 +0,0 @@
|
|||||||
use lqos_config::ShapedDevice;
|
|
||||||
use pgdb::{OrganizationDetails, sqlx::{Pool, Postgres}};
|
|
||||||
use tracing::{warn, error};
|
|
||||||
|
|
||||||
pub async fn ingest_shaped_devices(
|
|
||||||
cnn: Pool<Postgres>,
|
|
||||||
org: &OrganizationDetails,
|
|
||||||
node_id: &str,
|
|
||||||
devices: &[ShapedDevice],
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
let mut trans = cnn.begin().await?;
|
|
||||||
|
|
||||||
// Clear existing data from shaped devices
|
|
||||||
pgdb::sqlx::query("DELETE FROM shaped_devices WHERE key=$1 AND node_id=$2")
|
|
||||||
.bind(org.key.to_string())
|
|
||||||
.bind(node_id)
|
|
||||||
.execute(&mut trans)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// Clear existing data from shaped devices IP lists
|
|
||||||
pgdb::sqlx::query("DELETE FROM shaped_device_ip WHERE key=$1 AND node_id=$2")
|
|
||||||
.bind(org.key.to_string())
|
|
||||||
.bind(node_id)
|
|
||||||
.execute(&mut trans)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
const SQL_INSERT: &str = "INSERT INTO shaped_devices
|
|
||||||
(key, node_id, circuit_id, device_id, circuit_name, device_name, parent_node, mac, download_min_mbps, upload_min_mbps, download_max_mbps, upload_max_mbps, comment)
|
|
||||||
VALUES
|
|
||||||
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)";
|
|
||||||
|
|
||||||
const SQL_IP_INSERT: &str = "INSERT INTO public.shaped_device_ip
|
|
||||||
(key, node_id, circuit_id, ip_range, subnet)
|
|
||||||
VALUES
|
|
||||||
($1, $2, $3, $4, $5)
|
|
||||||
ON CONFLICT (key, node_id, circuit_id, ip_range, subnet) DO NOTHING;";
|
|
||||||
|
|
||||||
for device in devices.iter() {
|
|
||||||
pgdb::sqlx::query(SQL_INSERT)
|
|
||||||
.bind(org.key.to_string())
|
|
||||||
.bind(node_id)
|
|
||||||
.bind(device.circuit_id.clone())
|
|
||||||
.bind(device.device_id.clone())
|
|
||||||
.bind(device.circuit_name.clone())
|
|
||||||
.bind(device.device_name.clone())
|
|
||||||
.bind(device.parent_node.clone())
|
|
||||||
.bind(device.mac.clone())
|
|
||||||
.bind(device.download_min_mbps as i32)
|
|
||||||
.bind(device.upload_min_mbps as i32)
|
|
||||||
.bind(device.download_max_mbps as i32)
|
|
||||||
.bind(device.upload_max_mbps as i32)
|
|
||||||
.bind(device.comment.clone())
|
|
||||||
.execute(&mut trans)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
for ip in device.ipv4.iter() {
|
|
||||||
pgdb::sqlx::query(SQL_IP_INSERT)
|
|
||||||
.bind(org.key.to_string())
|
|
||||||
.bind(node_id)
|
|
||||||
.bind(device.circuit_id.clone())
|
|
||||||
.bind(ip.0.to_string())
|
|
||||||
.bind(ip.1 as i32)
|
|
||||||
.execute(&mut trans)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
for ip in device.ipv6.iter() {
|
|
||||||
pgdb::sqlx::query(SQL_IP_INSERT)
|
|
||||||
.bind(org.key.to_string())
|
|
||||||
.bind(node_id)
|
|
||||||
.bind(device.circuit_id.clone())
|
|
||||||
.bind(ip.0.to_string())
|
|
||||||
.bind(ip.1 as i32)
|
|
||||||
.execute(&mut trans)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let result = trans.commit().await;
|
|
||||||
warn!("Transaction committed");
|
|
||||||
if let Err(e) = result {
|
|
||||||
error!("Error committing transaction: {}", e);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
@ -1,83 +0,0 @@
|
|||||||
use futures::prelude::*;
|
|
||||||
use influxdb2::models::DataPoint;
|
|
||||||
use influxdb2::Client;
|
|
||||||
use lts_client::transport_data::StatsTotals;
|
|
||||||
use pgdb::OrganizationDetails;
|
|
||||||
|
|
||||||
pub async fn collect_host_totals(
|
|
||||||
org: &OrganizationDetails,
|
|
||||||
node_id: &str,
|
|
||||||
timestamp: i64,
|
|
||||||
totals: &Option<StatsTotals>,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
if let Some(totals) = totals {
|
|
||||||
let influx_url = format!("http://{}:8086", org.influx_host);
|
|
||||||
let client = Client::new(&influx_url, &org.influx_org, &org.influx_token);
|
|
||||||
let points = vec![
|
|
||||||
DataPoint::builder("packets")
|
|
||||||
.tag("host_id", node_id.to_string())
|
|
||||||
.tag("organization_id", org.key.to_string())
|
|
||||||
.tag("direction", "down".to_string())
|
|
||||||
.timestamp(timestamp)
|
|
||||||
.field("min", totals.packets.min.0 as i64)
|
|
||||||
.field("max", totals.packets.max.0 as i64)
|
|
||||||
.field("avg", totals.packets.avg.0 as i64)
|
|
||||||
.build()?,
|
|
||||||
DataPoint::builder("packets")
|
|
||||||
.tag("host_id", node_id.to_string())
|
|
||||||
.tag("organization_id", org.key.to_string())
|
|
||||||
.tag("direction", "up".to_string())
|
|
||||||
.timestamp(timestamp)
|
|
||||||
.field("min", totals.packets.min.1 as i64)
|
|
||||||
.field("max", totals.packets.max.1 as i64)
|
|
||||||
.field("avg", totals.packets.avg.1 as i64)
|
|
||||||
.build()?,
|
|
||||||
DataPoint::builder("bits")
|
|
||||||
.tag("host_id", node_id.to_string())
|
|
||||||
.tag("organization_id", org.key.to_string())
|
|
||||||
.tag("direction", "down".to_string())
|
|
||||||
.timestamp(timestamp)
|
|
||||||
.field("min", totals.bits.min.0 as i64)
|
|
||||||
.field("max", totals.bits.max.0 as i64)
|
|
||||||
.field("avg", totals.bits.avg.0 as i64)
|
|
||||||
.build()?,
|
|
||||||
DataPoint::builder("bits")
|
|
||||||
.tag("host_id", node_id.to_string())
|
|
||||||
.tag("organization_id", org.key.to_string())
|
|
||||||
.tag("direction", "up".to_string())
|
|
||||||
.timestamp(timestamp)
|
|
||||||
.field("min", totals.bits.min.1 as i64)
|
|
||||||
.field("max", totals.bits.max.1 as i64)
|
|
||||||
.field("avg", totals.bits.avg.1 as i64)
|
|
||||||
.build()?,
|
|
||||||
DataPoint::builder("shaped_bits")
|
|
||||||
.tag("host_id", node_id.to_string())
|
|
||||||
.tag("organization_id", org.key.to_string())
|
|
||||||
.tag("direction", "down".to_string())
|
|
||||||
.timestamp(timestamp)
|
|
||||||
.field("min", totals.shaped_bits.min.0 as i64)
|
|
||||||
.field("max", totals.shaped_bits.max.0 as i64)
|
|
||||||
.field("avg", totals.shaped_bits.avg.0 as i64)
|
|
||||||
.build()?,
|
|
||||||
DataPoint::builder("shaped_bits")
|
|
||||||
.tag("host_id", node_id.to_string())
|
|
||||||
.tag("organization_id", org.key.to_string())
|
|
||||||
.tag("direction", "up".to_string())
|
|
||||||
.timestamp(timestamp)
|
|
||||||
.field("min", totals.shaped_bits.min.1 as i64)
|
|
||||||
.field("max", totals.shaped_bits.max.1 as i64)
|
|
||||||
.field("avg", totals.shaped_bits.avg.1 as i64)
|
|
||||||
.build()?,
|
|
||||||
];
|
|
||||||
|
|
||||||
//client.write(&org.influx_bucket, stream::iter(points)).await?;
|
|
||||||
client
|
|
||||||
.write_with_precision(
|
|
||||||
&org.influx_bucket,
|
|
||||||
stream::iter(points),
|
|
||||||
influxdb2::api::write::TimestampPrecision::Seconds,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
@ -1,10 +0,0 @@
|
|||||||
mod queue;
|
|
||||||
mod devices;
|
|
||||||
mod host_totals;
|
|
||||||
mod organization_cache;
|
|
||||||
mod per_host;
|
|
||||||
mod tree;
|
|
||||||
mod node_perf;
|
|
||||||
mod uisp_devices;
|
|
||||||
pub use queue::{submissions_queue, SubmissionType};
|
|
||||||
pub use organization_cache::get_org_details;
|
|
@ -1,35 +0,0 @@
|
|||||||
use futures::prelude::*;
|
|
||||||
use influxdb2::{models::DataPoint, Client};
|
|
||||||
use pgdb::OrganizationDetails;
|
|
||||||
|
|
||||||
pub async fn collect_node_perf(
|
|
||||||
org: &OrganizationDetails,
|
|
||||||
node_id: &str,
|
|
||||||
timestamp: i64,
|
|
||||||
cpu: &Option<Vec<u32>>,
|
|
||||||
ram: &Option<u32>,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
if let (Some(cpu), Some(ram)) = (cpu, ram) {
|
|
||||||
let influx_url = format!("http://{}:8086", org.influx_host);
|
|
||||||
let client = Client::new(&influx_url, &org.influx_org, &org.influx_token);
|
|
||||||
let cpu_sum = cpu.iter().sum::<u32>();
|
|
||||||
let cpu_avg = cpu_sum / cpu.len() as u32;
|
|
||||||
let cpu_max = *cpu.iter().max().unwrap();
|
|
||||||
let points = vec![DataPoint::builder("perf")
|
|
||||||
.tag("host_id", node_id.to_string())
|
|
||||||
.tag("organization_id", org.key.to_string())
|
|
||||||
.timestamp(timestamp)
|
|
||||||
.field("ram", *ram as i64)
|
|
||||||
.field("cpu", cpu_avg as i64)
|
|
||||||
.field("cpu_max", cpu_max as i64)
|
|
||||||
.build()?];
|
|
||||||
client
|
|
||||||
.write_with_precision(
|
|
||||||
&org.influx_bucket,
|
|
||||||
stream::iter(points),
|
|
||||||
influxdb2::api::write::TimestampPrecision::Seconds,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
@ -1,25 +0,0 @@
|
|||||||
use std::collections::HashMap;
|
|
||||||
use once_cell::sync::Lazy;
|
|
||||||
use pgdb::{OrganizationDetails, sqlx::{Pool, Postgres}};
|
|
||||||
use tokio::sync::RwLock;
|
|
||||||
|
|
||||||
static ORG_CACHE: Lazy<RwLock<HashMap<String, OrganizationDetails>>> = Lazy::new(|| {
|
|
||||||
RwLock::new(HashMap::new())
|
|
||||||
});
|
|
||||||
|
|
||||||
pub async fn get_org_details(cnn: &Pool<Postgres>, key: &str) -> Option<OrganizationDetails> {
|
|
||||||
{ // Safety scope - lock is dropped on exit
|
|
||||||
let cache = ORG_CACHE.read().await;
|
|
||||||
if let Some(org) = cache.get(key) {
|
|
||||||
return Some(org.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// We can be certain that we don't have a dangling lock now.
|
|
||||||
// Upgrade to a write lock and try to fetch the org details.
|
|
||||||
let mut cache = ORG_CACHE.write().await;
|
|
||||||
if let Ok(org) = pgdb::get_organization(cnn, key).await {
|
|
||||||
cache.insert(key.to_string(), org.clone());
|
|
||||||
return Some(org);
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
@ -1,68 +0,0 @@
|
|||||||
use influxdb2::{Client, models::DataPoint};
|
|
||||||
use lts_client::transport_data::StatsHost;
|
|
||||||
use pgdb::OrganizationDetails;
|
|
||||||
use futures::prelude::*;
|
|
||||||
use tracing::info;
|
|
||||||
|
|
||||||
pub async fn collect_per_host(
|
|
||||||
org: &OrganizationDetails,
|
|
||||||
node_id: &str,
|
|
||||||
timestamp: i64,
|
|
||||||
totals: &Option<Vec<StatsHost>>,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
if let Some(hosts) = totals {
|
|
||||||
let influx_url = format!("http://{}:8086", org.influx_host);
|
|
||||||
let client = Client::new(&influx_url, &org.influx_org, &org.influx_token);
|
|
||||||
let mut points: Vec<DataPoint> = Vec::new();
|
|
||||||
info!("Received per-host stats, {} hosts", hosts.len());
|
|
||||||
|
|
||||||
for host in hosts.iter() {
|
|
||||||
let circuit_id = if let Some(cid) = &host.circuit_id {
|
|
||||||
cid.clone()
|
|
||||||
} else {
|
|
||||||
"unknown".to_string()
|
|
||||||
};
|
|
||||||
points.push(DataPoint::builder("host_bits")
|
|
||||||
.tag("host_id", node_id.to_string())
|
|
||||||
.tag("organization_id", org.key.to_string())
|
|
||||||
.tag("direction", "down".to_string())
|
|
||||||
.tag("circuit_id", &circuit_id)
|
|
||||||
.tag("ip", host.ip_address.to_string())
|
|
||||||
.timestamp(timestamp)
|
|
||||||
.field("min", host.bits.min.0 as i64)
|
|
||||||
.field("max", host.bits.max.0 as i64)
|
|
||||||
.field("avg", host.bits.avg.0 as i64)
|
|
||||||
.build()?);
|
|
||||||
points.push(DataPoint::builder("host_bits")
|
|
||||||
.tag("host_id", node_id.to_string())
|
|
||||||
.tag("organization_id", org.key.to_string())
|
|
||||||
.tag("direction", "up".to_string())
|
|
||||||
.tag("circuit_id", &circuit_id)
|
|
||||||
.tag("ip", host.ip_address.to_string())
|
|
||||||
.timestamp(timestamp)
|
|
||||||
.field("min", host.bits.min.1 as i64)
|
|
||||||
.field("max", host.bits.max.1 as i64)
|
|
||||||
.field("avg", host.bits.avg.1 as i64)
|
|
||||||
.build()?);
|
|
||||||
points.push(DataPoint::builder("rtt")
|
|
||||||
.tag("host_id", node_id.to_string())
|
|
||||||
.tag("organization_id", org.key.to_string())
|
|
||||||
.tag("circuit_id", &circuit_id)
|
|
||||||
.tag("ip", host.ip_address.to_string())
|
|
||||||
.timestamp(timestamp)
|
|
||||||
.field("min", host.rtt.avg as f64 / 100.0)
|
|
||||||
.field("max", host.rtt.max as f64 / 100.0)
|
|
||||||
.field("avg", host.rtt.avg as f64 / 100.0)
|
|
||||||
.build()?);
|
|
||||||
}
|
|
||||||
|
|
||||||
client
|
|
||||||
.write_with_precision(
|
|
||||||
&org.influx_bucket,
|
|
||||||
stream::iter(points),
|
|
||||||
influxdb2::api::write::TimestampPrecision::Seconds,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
@ -1,91 +0,0 @@
|
|||||||
//! Provides a queue of submissions to be processed by the long-term storage.
|
|
||||||
//! This is a "fan in" pattern: multi-producer, single-consumer messages
|
|
||||||
//! send data into the queue, which is managed by a single consumer
|
|
||||||
//! thread. The consumer thread spawns tokio tasks to actually
|
|
||||||
//! perform the processing.
|
|
||||||
|
|
||||||
use crate::submissions::submission_queue::{
|
|
||||||
devices::ingest_shaped_devices, host_totals::collect_host_totals, node_perf::collect_node_perf,
|
|
||||||
organization_cache::get_org_details, tree::collect_tree, per_host::collect_per_host, uisp_devices::collect_uisp_devices,
|
|
||||||
};
|
|
||||||
use lts_client::transport_data::{LtsCommand, NodeIdAndLicense};
|
|
||||||
use pgdb::sqlx::{Pool, Postgres};
|
|
||||||
use tokio::sync::mpsc::{Receiver, Sender};
|
|
||||||
use tracing::{info, error, warn};
|
|
||||||
|
|
||||||
const SUBMISSION_QUEUE_SIZE: usize = 100;
|
|
||||||
pub type SubmissionType = (NodeIdAndLicense, LtsCommand);
|
|
||||||
|
|
||||||
pub async fn submissions_queue(cnn: Pool<Postgres>) -> anyhow::Result<Sender<SubmissionType>> {
|
|
||||||
// Create a channel to send data to the consumer thread
|
|
||||||
let (tx, rx) = tokio::sync::mpsc::channel::<SubmissionType>(SUBMISSION_QUEUE_SIZE);
|
|
||||||
tokio::spawn(run_queue(cnn, rx)); // Note that'we *moving* rx into the spawned task
|
|
||||||
Ok(tx)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn run_queue(cnn: Pool<Postgres>, mut rx: Receiver<SubmissionType>) -> anyhow::Result<()> {
|
|
||||||
while let Some(message) = rx.recv().await {
|
|
||||||
info!("Received a message from the submission queue");
|
|
||||||
let (node_id, command) = message;
|
|
||||||
tokio::spawn(ingest_stats(cnn.clone(), node_id, command));
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
//#[tracing::instrument]
|
|
||||||
async fn ingest_stats(
|
|
||||||
cnn: Pool<Postgres>,
|
|
||||||
node_id: NodeIdAndLicense,
|
|
||||||
command: LtsCommand,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
info!("Ingesting stats for node {}", node_id.node_id);
|
|
||||||
|
|
||||||
if let Some(org) = get_org_details(&cnn, &node_id.license_key).await {
|
|
||||||
//println!("{:?}", command);
|
|
||||||
match command {
|
|
||||||
LtsCommand::Devices(devices) => {
|
|
||||||
info!("Ingesting Shaped Devices");
|
|
||||||
update_last_seen(cnn.clone(), &node_id).await;
|
|
||||||
if let Err(e) = ingest_shaped_devices(cnn, &org, &node_id.node_id, &devices).await {
|
|
||||||
error!("Error ingesting shaped devices: {}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
LtsCommand::Submit(stats) => {
|
|
||||||
//println!("Submission: {:?}", submission);
|
|
||||||
info!("Ingesting statistics dump");
|
|
||||||
let ts = stats.timestamp as i64;
|
|
||||||
let _ = tokio::join!(
|
|
||||||
update_last_seen(cnn.clone(), &node_id),
|
|
||||||
collect_host_totals(&org, &node_id.node_id, ts, &stats.totals),
|
|
||||||
collect_node_perf(
|
|
||||||
&org,
|
|
||||||
&node_id.node_id,
|
|
||||||
ts,
|
|
||||||
&stats.cpu_usage,
|
|
||||||
&stats.ram_percent
|
|
||||||
),
|
|
||||||
collect_tree(cnn.clone(), &org, &node_id.node_id, ts, &stats.tree),
|
|
||||||
collect_per_host(&org, &node_id.node_id, ts, &stats.hosts),
|
|
||||||
collect_uisp_devices(cnn.clone(), &org, &stats.uisp_devices, ts),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
warn!(
|
|
||||||
"Unable to find organization for license {}",
|
|
||||||
node_id.license_key
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn update_last_seen(cnn: Pool<Postgres>, details: &NodeIdAndLicense) {
|
|
||||||
let res = pgdb::new_stats_arrived(cnn, &details.license_key, &details.node_id).await;
|
|
||||||
if res.is_err() {
|
|
||||||
error!(
|
|
||||||
"Unable to update last seen for node {}: {}",
|
|
||||||
details.node_id,
|
|
||||||
res.unwrap_err()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,105 +0,0 @@
|
|||||||
use futures::prelude::*;
|
|
||||||
use influxdb2::{models::DataPoint, Client};
|
|
||||||
use lts_client::transport_data::StatsTreeNode;
|
|
||||||
use pgdb::{
|
|
||||||
sqlx::{Pool, Postgres},
|
|
||||||
OrganizationDetails,
|
|
||||||
};
|
|
||||||
use tracing::{info, error};
|
|
||||||
|
|
||||||
const SQL: &str = "INSERT INTO site_tree (key, host_id, site_name, index, parent, site_type, max_up, max_down, current_up, current_down, current_rtt) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) ON CONFLICT (key, host_id, site_name) DO NOTHING";
|
|
||||||
|
|
||||||
pub async fn collect_tree(
|
|
||||||
cnn: Pool<Postgres>,
|
|
||||||
org: &OrganizationDetails,
|
|
||||||
node_id: &str,
|
|
||||||
timestamp: i64,
|
|
||||||
totals: &Option<Vec<StatsTreeNode>>,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
if let Some(tree) = totals {
|
|
||||||
//println!("{tree:?}");
|
|
||||||
let influx_url = format!("http://{}:8086", org.influx_host);
|
|
||||||
let client = Client::new(&influx_url, &org.influx_org, &org.influx_token);
|
|
||||||
let mut points: Vec<DataPoint> = Vec::new();
|
|
||||||
|
|
||||||
let mut trans = cnn.begin().await?;
|
|
||||||
|
|
||||||
pgdb::sqlx::query("DELETE FROM site_tree WHERE key=$1 AND host_id=$2")
|
|
||||||
.bind(org.key.to_string())
|
|
||||||
.bind(node_id)
|
|
||||||
.execute(&mut trans)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
for node in tree.iter() {
|
|
||||||
points.push(
|
|
||||||
DataPoint::builder("tree")
|
|
||||||
.tag("host_id", node_id.to_string())
|
|
||||||
.tag("organization_id", org.key.to_string())
|
|
||||||
.tag("node_name", node.name.to_string())
|
|
||||||
.tag("direction", "down".to_string())
|
|
||||||
.timestamp(timestamp)
|
|
||||||
.field("bits_min", node.current_throughput.min.0 as i64)
|
|
||||||
.field("bits_max", node.current_throughput.max.0 as i64)
|
|
||||||
.field("bits_avg", node.current_throughput.avg.0 as i64)
|
|
||||||
.build()?,
|
|
||||||
);
|
|
||||||
points.push(
|
|
||||||
DataPoint::builder("tree")
|
|
||||||
.tag("host_id", node_id.to_string())
|
|
||||||
.tag("organization_id", org.key.to_string())
|
|
||||||
.tag("node_name", node.name.to_string())
|
|
||||||
.tag("direction", "up".to_string())
|
|
||||||
.timestamp(timestamp)
|
|
||||||
.field("bits_min", node.current_throughput.min.1 as i64)
|
|
||||||
.field("bits_max", node.current_throughput.max.1 as i64)
|
|
||||||
.field("bits_avg", node.current_throughput.avg.1 as i64)
|
|
||||||
.build()?,
|
|
||||||
);
|
|
||||||
points.push(
|
|
||||||
DataPoint::builder("tree")
|
|
||||||
.tag("host_id", node_id.to_string())
|
|
||||||
.tag("organization_id", org.key.to_string())
|
|
||||||
.tag("node_name", node.name.to_string())
|
|
||||||
.timestamp(timestamp)
|
|
||||||
.field("rtt_min", node.rtt.min as i64 / 100)
|
|
||||||
.field("rtt_max", node.rtt.max as i64 / 100)
|
|
||||||
.field("rtt_avg", node.rtt.avg as i64 / 100)
|
|
||||||
.build()?,
|
|
||||||
);
|
|
||||||
|
|
||||||
let result = pgdb::sqlx::query(SQL)
|
|
||||||
.bind(org.key.to_string())
|
|
||||||
.bind(node_id)
|
|
||||||
.bind(&node.name)
|
|
||||||
.bind(node.index as i32)
|
|
||||||
.bind(node.immediate_parent.unwrap_or(0) as i32)
|
|
||||||
.bind(node.node_type.as_ref().unwrap_or(&String::new()).clone())
|
|
||||||
.bind(node.max_throughput.1 as i64)
|
|
||||||
.bind(node.max_throughput.0 as i64)
|
|
||||||
.bind(node.current_throughput.max.1 as i64)
|
|
||||||
.bind(node.current_throughput.max.0 as i64)
|
|
||||||
.bind(node.rtt.avg as i64)
|
|
||||||
.execute(&mut trans)
|
|
||||||
.await;
|
|
||||||
if let Err(e) = result {
|
|
||||||
error!("Error inserting tree node: {}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let result = trans.commit().await;
|
|
||||||
info!("Transaction committed");
|
|
||||||
if let Err(e) = result {
|
|
||||||
error!("Error committing transaction: {}", e);
|
|
||||||
}
|
|
||||||
|
|
||||||
client
|
|
||||||
.write_with_precision(
|
|
||||||
&org.influx_bucket,
|
|
||||||
stream::iter(points),
|
|
||||||
influxdb2::api::write::TimestampPrecision::Seconds,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
@ -1,115 +0,0 @@
|
|||||||
use futures::prelude::*;
|
|
||||||
use influxdb2::{models::DataPoint, Client};
|
|
||||||
use lts_client::transport_data::UispExtDevice;
|
|
||||||
use pgdb::{
|
|
||||||
sqlx::{Pool, Postgres},
|
|
||||||
OrganizationDetails,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub async fn collect_uisp_devices(
|
|
||||||
cnn: Pool<Postgres>,
|
|
||||||
org: &OrganizationDetails,
|
|
||||||
devices: &Option<Vec<UispExtDevice>>,
|
|
||||||
ts: i64,
|
|
||||||
) {
|
|
||||||
let (sql, influx) = tokio::join!(uisp_sql(cnn, org, devices), uisp_influx(org, devices, ts),);
|
|
||||||
|
|
||||||
if let Err(e) = sql {
|
|
||||||
tracing::error!("Error writing uisp sql: {:?}", e);
|
|
||||||
}
|
|
||||||
if let Err(e) = influx {
|
|
||||||
tracing::error!("Error writing uisp influx: {:?}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn uisp_sql(
|
|
||||||
cnn: Pool<Postgres>,
|
|
||||||
org: &OrganizationDetails,
|
|
||||||
devices: &Option<Vec<UispExtDevice>>,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
if let Some(devices) = devices {
|
|
||||||
let mut trans = cnn.begin().await.unwrap();
|
|
||||||
|
|
||||||
// Handle the SQL portion (things that don't need to be graphed, just displayed)
|
|
||||||
|
|
||||||
pgdb::sqlx::query("DELETE FROM uisp_devices_ext WHERE key=$1")
|
|
||||||
.bind(org.key.to_string())
|
|
||||||
.execute(&mut trans)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
pgdb::sqlx::query("DELETE FROM uisp_devices_interfaces WHERE key=$1")
|
|
||||||
.bind(org.key.to_string())
|
|
||||||
.execute(&mut trans)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
for device in devices.iter() {
|
|
||||||
pgdb::sqlx::query("INSERT INTO uisp_devices_ext (key, device_id, name, model, firmware, status, mode) VALUES ($1, $2, $3, $4, $5, $6, $7)")
|
|
||||||
.bind(org.key.to_string())
|
|
||||||
.bind(&device.device_id)
|
|
||||||
.bind(&device.name)
|
|
||||||
.bind(&device.model)
|
|
||||||
.bind(&device.firmware)
|
|
||||||
.bind(&device.status)
|
|
||||||
.bind(&device.mode)
|
|
||||||
.execute(&mut trans)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
for interface in device.interfaces.iter() {
|
|
||||||
let mut ip_list = String::new();
|
|
||||||
for ip in interface.ip.iter() {
|
|
||||||
ip_list.push_str(&format!("{} ", ip));
|
|
||||||
}
|
|
||||||
pgdb::sqlx::query("INSERT INTO uisp_devices_interfaces (key, device_id, name, mac, status, speed, ip_list) VALUES ($1, $2, $3, $4, $5, $6, $7)")
|
|
||||||
.bind(org.key.to_string())
|
|
||||||
.bind(&device.device_id)
|
|
||||||
.bind(&interface.name)
|
|
||||||
.bind(&interface.mac)
|
|
||||||
.bind(&interface.status)
|
|
||||||
.bind(&interface.speed)
|
|
||||||
.bind(ip_list)
|
|
||||||
.execute(&mut trans)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
trans.commit().await?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn uisp_influx(
|
|
||||||
org: &OrganizationDetails,
|
|
||||||
devices: &Option<Vec<UispExtDevice>>,
|
|
||||||
timestamp: i64,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
if let Some(devices) = devices {
|
|
||||||
let influx_url = format!("http://{}:8086", org.influx_host);
|
|
||||||
let client = Client::new(&influx_url, &org.influx_org, &org.influx_token);
|
|
||||||
let mut points: Vec<DataPoint> = Vec::new();
|
|
||||||
|
|
||||||
for device in devices.iter() {
|
|
||||||
points.push(
|
|
||||||
DataPoint::builder("device_ext")
|
|
||||||
.tag("device_id", &device.device_id)
|
|
||||||
.tag("organization_id", org.key.to_string())
|
|
||||||
.tag("direction", "down".to_string())
|
|
||||||
.timestamp(timestamp)
|
|
||||||
.field("rx_signal", device.rx_signal as i64)
|
|
||||||
.field("noise_floor", device.noise_floor as i64)
|
|
||||||
.field("dl_capacity", device.downlink_capacity_mbps as i64)
|
|
||||||
.field("ul_capacity", device.uplink_capacity_mbps as i64)
|
|
||||||
.build()?,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
client
|
|
||||||
.write_with_precision(
|
|
||||||
&org.influx_bucket,
|
|
||||||
stream::iter(points),
|
|
||||||
influxdb2::api::write::TimestampPrecision::Seconds,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
@ -1,102 +0,0 @@
|
|||||||
//! Provides a TCP handler server, listening on port 9128. Connections
|
|
||||||
//! are expected in the encrypted LTS format (see the `lq_bus` crate).
|
|
||||||
//! If everything checks out, they are sent to the submission queue
|
|
||||||
//! for storage.
|
|
||||||
|
|
||||||
use super::submission_queue::SubmissionType;
|
|
||||||
use crate::pki::LIBREQOS_KEYPAIR;
|
|
||||||
use lts_client::{
|
|
||||||
dryoc::dryocbox::{DryocBox, PublicKey},
|
|
||||||
transport_data::{LtsCommand, NodeIdAndLicense},
|
|
||||||
};
|
|
||||||
use pgdb::sqlx::{Pool, Postgres};
|
|
||||||
use tokio::{io::AsyncReadExt, net::{TcpListener, TcpStream}, spawn, sync::mpsc::Sender};
|
|
||||||
use tracing::{info, error, warn};
|
|
||||||
|
|
||||||
/// Starts the submission server, listening on port 9128.
|
|
||||||
/// The server runs in the background.
|
|
||||||
pub async fn submissions_server(
|
|
||||||
cnn: Pool<Postgres>,
|
|
||||||
sender: Sender<SubmissionType>,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
let listener = TcpListener::bind(":::9128").await?;
|
|
||||||
info!("Listening for stats submissions on :::9128");
|
|
||||||
|
|
||||||
loop {
|
|
||||||
let (mut socket, address) = listener.accept().await?;
|
|
||||||
info!("Connection from {address:?}");
|
|
||||||
let pool = cnn.clone();
|
|
||||||
let my_sender = sender.clone();
|
|
||||||
spawn(async move {
|
|
||||||
loop {
|
|
||||||
if let Ok(message) = read_message(&mut socket, pool.clone()).await {
|
|
||||||
my_sender.send(message).await.unwrap();
|
|
||||||
} else {
|
|
||||||
error!("Read failed. Dropping socket.");
|
|
||||||
std::mem::drop(socket);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument]
|
|
||||||
async fn read_message(socket: &mut TcpStream, pool: Pool<Postgres>) -> anyhow::Result<SubmissionType> {
|
|
||||||
read_version(socket).await?;
|
|
||||||
let header_size = read_size(socket).await?;
|
|
||||||
let header = read_header(socket, header_size as usize).await?;
|
|
||||||
let body_size = read_size(socket).await?;
|
|
||||||
let message = read_body(socket, pool.clone(), body_size as usize, &header).await?;
|
|
||||||
Ok((header, message))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn read_version(stream: &mut TcpStream) -> anyhow::Result<()> {
|
|
||||||
let version = stream.read_u16().await?;
|
|
||||||
if version != 1 {
|
|
||||||
warn!("Received a version {version} header.");
|
|
||||||
return Err(anyhow::Error::msg("Received an unknown version header"));
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn read_size(stream: &mut TcpStream) -> anyhow::Result<u64> {
|
|
||||||
let size = stream.read_u64().await?;
|
|
||||||
Ok(size)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn read_header(stream: &mut TcpStream, size: usize) -> anyhow::Result<NodeIdAndLicense> {
|
|
||||||
let mut buffer = vec![0u8; size];
|
|
||||||
let _bytes_read = stream.read(&mut buffer).await?;
|
|
||||||
let header: NodeIdAndLicense = lts_client::cbor::from_slice(&buffer)?;
|
|
||||||
Ok(header)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn read_body(stream: &mut TcpStream, pool: Pool<Postgres>, size: usize, header: &NodeIdAndLicense) -> anyhow::Result<LtsCommand> {
|
|
||||||
info!("Reading body of size {size}");
|
|
||||||
info!("{header:?}");
|
|
||||||
|
|
||||||
let mut buffer = vec![0u8; size];
|
|
||||||
let bytes_read = stream.read_exact(&mut buffer).await?;
|
|
||||||
if bytes_read != size {
|
|
||||||
warn!("Received a body of size {bytes_read}, expected {size}");
|
|
||||||
return Err(anyhow::Error::msg("Received a body of unexpected size"));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the header against the database and retrieve the current
|
|
||||||
// public key
|
|
||||||
let public_key = pgdb::fetch_public_key(pool, &header.license_key, &header.node_id).await?;
|
|
||||||
let public_key: PublicKey = lts_client::cbor::from_slice(&public_key)?;
|
|
||||||
let private_key = LIBREQOS_KEYPAIR.read().unwrap().secret_key.clone();
|
|
||||||
|
|
||||||
// Decrypt
|
|
||||||
let dryocbox = DryocBox::from_bytes(&buffer).expect("failed to read box");
|
|
||||||
let decrypted = dryocbox
|
|
||||||
.decrypt_to_vec(&header.nonce.into(), &public_key, &private_key)?;
|
|
||||||
|
|
||||||
let decrypted = miniz_oxide::inflate::decompress_to_vec(&decrypted).expect("failed to decompress");
|
|
||||||
|
|
||||||
// Try to deserialize
|
|
||||||
let payload = lts_client::cbor::from_slice(&decrypted)?;
|
|
||||||
Ok(payload)
|
|
||||||
}
|
|
@ -1,28 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "lts_node"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
license = "GPL-2.0-only"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
tokio = { version = "1.25.0", features = ["full"] }
|
|
||||||
anyhow = "1"
|
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
|
||||||
axum = {version = "0.6", features = ["ws", "headers"] }
|
|
||||||
lts_client = { path = "../lts_client" }
|
|
||||||
lqos_config = { path = "../../lqos_config" }
|
|
||||||
serde_json = "1"
|
|
||||||
pgdb = { path = "../pgdb" }
|
|
||||||
once_cell = "1"
|
|
||||||
influxdb2 = "0"
|
|
||||||
influxdb2-structmap = "0"
|
|
||||||
num-traits = "0"
|
|
||||||
futures = "0"
|
|
||||||
tracing = "0.1"
|
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
|
||||||
tower = { version = "0.4", features = ["util"] }
|
|
||||||
tower-http = { version = "0.4.0", features = ["fs", "trace"] }
|
|
||||||
chrono = "0"
|
|
||||||
miniz_oxide = "0.7.1"
|
|
||||||
tokio-util = { version = "0.7.8", features = ["io"] }
|
|
||||||
wasm_pipe_types = { path = "../wasm_pipe_types" }
|
|
@ -1,13 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
pushd ../wasm_pipe
|
|
||||||
./build.sh
|
|
||||||
popd
|
|
||||||
pushd ../site_build
|
|
||||||
./esbuild.mjs
|
|
||||||
popd
|
|
||||||
pushd web
|
|
||||||
cp ../../site_build/output/* .
|
|
||||||
cp ../../site_build/src/main.html .
|
|
||||||
cp ../../site_build/wasm/wasm_pipe_bg.wasm .
|
|
||||||
popd
|
|
||||||
RUST_LOG=info RUST_BACKTRACE=1 cargo run
|
|
@ -1,23 +0,0 @@
|
|||||||
mod web;
|
|
||||||
use tracing::{info, error};
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() -> anyhow::Result<()> {
|
|
||||||
// install global collector configured based on RUST_LOG env var.
|
|
||||||
tracing_subscriber::fmt::init();
|
|
||||||
|
|
||||||
// Get the database connection pool
|
|
||||||
let pool = pgdb::get_connection_pool(5).await;
|
|
||||||
if pool.is_err() {
|
|
||||||
error!("Unable to connect to the database");
|
|
||||||
error!("{pool:?}");
|
|
||||||
return Err(anyhow::Error::msg("Unable to connect to the database"));
|
|
||||||
}
|
|
||||||
let pool = pool.unwrap();
|
|
||||||
|
|
||||||
// Start the webserver
|
|
||||||
info!("Starting the webserver");
|
|
||||||
let _ = tokio::spawn(web::webserver(pool)).await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
@ -1,92 +0,0 @@
|
|||||||
//! The webserver listens on port 9127, but it is intended that this only
|
|
||||||
//! listen on localhost and have a reverse proxy in front of it. The proxy
|
|
||||||
//! should provide HTTPS.
|
|
||||||
mod wss;
|
|
||||||
use crate::web::wss::ws_handler;
|
|
||||||
use axum::body::StreamBody;
|
|
||||||
use axum::http::{header, HeaderMap};
|
|
||||||
use axum::response::IntoResponse;
|
|
||||||
use axum::{response::Html, routing::get, Router};
|
|
||||||
use pgdb::sqlx::Pool;
|
|
||||||
use pgdb::sqlx::Postgres;
|
|
||||||
use tokio_util::io::ReaderStream;
|
|
||||||
use tower_http::trace::TraceLayer;
|
|
||||||
use tower_http::trace::DefaultMakeSpan;
|
|
||||||
|
|
||||||
const JS_BUNDLE: &str = include_str!("../../web/app.js");
|
|
||||||
const JS_MAP: &str = include_str!("../../web/app.js.map");
|
|
||||||
const CSS: &str = include_str!("../../web/style.css");
|
|
||||||
const CSS_MAP: &str = include_str!("../../web/style.css.map");
|
|
||||||
const HTML_MAIN: &str = include_str!("../../web/main.html");
|
|
||||||
const WASM_BODY: &[u8] = include_bytes!("../../web/wasm_pipe_bg.wasm");
|
|
||||||
|
|
||||||
pub async fn webserver(cnn: Pool<Postgres>) {
|
|
||||||
let app = Router::new()
|
|
||||||
.route("/", get(index_page))
|
|
||||||
.route("/app.js", get(js_bundle))
|
|
||||||
.route("/app.js.map", get(js_map))
|
|
||||||
.route("/style.css", get(css))
|
|
||||||
.route("/style.css.map", get(css_map))
|
|
||||||
.route("/ws", get(ws_handler))
|
|
||||||
.route("/wasm_pipe_bg.wasm", get(wasm_file))
|
|
||||||
.with_state(cnn)
|
|
||||||
.layer(
|
|
||||||
TraceLayer::new_for_http()
|
|
||||||
.make_span_with(DefaultMakeSpan::default().include_headers(true)),
|
|
||||||
);
|
|
||||||
|
|
||||||
tracing::info!("Listening for web traffic on 0.0.0.0:9127");
|
|
||||||
axum::Server::bind(&"0.0.0.0:9127".parse().unwrap())
|
|
||||||
.serve(app.into_make_service())
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn index_page() -> Html<String> {
|
|
||||||
Html(HTML_MAIN.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn js_bundle() -> axum::response::Response<String> {
|
|
||||||
axum::response::Response::builder()
|
|
||||||
.header("Content-Type", "text/javascript")
|
|
||||||
.body(JS_BUNDLE.to_string())
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn js_map() -> axum::response::Response<String> {
|
|
||||||
axum::response::Response::builder()
|
|
||||||
.header("Content-Type", "text/json")
|
|
||||||
.body(JS_MAP.to_string())
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn css() -> axum::response::Response<String> {
|
|
||||||
axum::response::Response::builder()
|
|
||||||
.header("Content-Type", "text/css")
|
|
||||||
.body(CSS.to_string())
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn css_map() -> axum::response::Response<String> {
|
|
||||||
axum::response::Response::builder()
|
|
||||||
.header("Content-Type", "text/json")
|
|
||||||
.body(CSS_MAP.to_string())
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn wasm_file() -> impl IntoResponse {
|
|
||||||
let mut headers = HeaderMap::new();
|
|
||||||
headers.insert(
|
|
||||||
header::CONTENT_TYPE,
|
|
||||||
header::HeaderValue::from_static("application/wasm"),
|
|
||||||
);
|
|
||||||
headers.insert(
|
|
||||||
header::CONTENT_DISPOSITION,
|
|
||||||
header::HeaderValue::from_static("attachment; filename=wasm_pipe_bg.wasm"),
|
|
||||||
);
|
|
||||||
axum::response::Response::builder()
|
|
||||||
.header(header::CONTENT_TYPE, header::HeaderValue::from_static("application/wasm"))
|
|
||||||
.header(header::CONTENT_DISPOSITION, header::HeaderValue::from_static("attachment; filename=wasm_pipe_bg.wasm"))
|
|
||||||
.body(StreamBody::new(ReaderStream::new(WASM_BODY)))
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
@ -1,57 +0,0 @@
|
|||||||
use axum::extract::ws::WebSocket;
|
|
||||||
use pgdb::sqlx::{Pool, Postgres};
|
|
||||||
use serde::Serialize;
|
|
||||||
use wasm_pipe_types::WasmResponse;
|
|
||||||
|
|
||||||
use super::send_response;
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize)]
|
|
||||||
pub struct LoginResult {
|
|
||||||
pub msg: String,
|
|
||||||
pub token: String,
|
|
||||||
pub name: String,
|
|
||||||
pub license_key: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn on_login(license: &str, username: &str, password: &str, socket: &mut WebSocket, cnn: Pool<Postgres>) -> Option<LoginResult> {
|
|
||||||
let login = pgdb::try_login(cnn, license, username, password).await;
|
|
||||||
if let Ok(login) = login {
|
|
||||||
let lr = WasmResponse::LoginOk {
|
|
||||||
token: login.token.clone(),
|
|
||||||
name: login.name.clone(),
|
|
||||||
license_key: license.to_string(),
|
|
||||||
};
|
|
||||||
send_response(socket, lr).await;
|
|
||||||
return Some(LoginResult {
|
|
||||||
msg: "Login Ok".to_string(),
|
|
||||||
token: login.token.to_string(),
|
|
||||||
name: login.name.to_string(),
|
|
||||||
license_key: license.to_string(),
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
let lr = WasmResponse::LoginFail;
|
|
||||||
send_response(socket, lr).await;
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn on_token_auth(token_id: &str, socket: &mut WebSocket, cnn: Pool<Postgres>) -> Option<LoginResult> {
|
|
||||||
let login = pgdb::token_to_credentials(cnn, token_id).await;
|
|
||||||
if let Ok(login) = login {
|
|
||||||
let lr = WasmResponse::AuthOk {
|
|
||||||
token: login.token.clone(),
|
|
||||||
name: login.name.clone(),
|
|
||||||
license_key: login.license.clone(),
|
|
||||||
};
|
|
||||||
send_response(socket, lr).await;
|
|
||||||
return Some(LoginResult {
|
|
||||||
msg: "Login Ok".to_string(),
|
|
||||||
token: login.token.to_string(),
|
|
||||||
name: login.name.to_string(),
|
|
||||||
license_key: login.license.to_string(),
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
send_response(socket, WasmResponse::AuthFail).await;
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
@ -1,316 +0,0 @@
|
|||||||
use crate::web::wss::{
|
|
||||||
nodes::node_status,
|
|
||||||
queries::{
|
|
||||||
ext_device::{
|
|
||||||
send_extended_device_capacity_graph, send_extended_device_info,
|
|
||||||
send_extended_device_snr_graph,
|
|
||||||
},
|
|
||||||
omnisearch, root_heat_map, send_circuit_info, send_packets_for_all_nodes,
|
|
||||||
send_packets_for_node, send_perf_for_node, send_rtt_for_all_nodes,
|
|
||||||
send_rtt_for_all_nodes_circuit, send_rtt_for_all_nodes_site, send_rtt_for_node,
|
|
||||||
send_site_info, send_site_parents, send_site_stack_map, send_throughput_for_all_nodes,
|
|
||||||
send_throughput_for_all_nodes_by_circuit, send_throughput_for_all_nodes_by_site,
|
|
||||||
send_throughput_for_node, site_heat_map,
|
|
||||||
site_tree::send_site_tree,
|
|
||||||
time_period::InfluxTimePeriod,
|
|
||||||
send_circuit_parents, send_root_parents,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
use axum::{
|
|
||||||
extract::{
|
|
||||||
ws::{Message, WebSocket, WebSocketUpgrade},
|
|
||||||
State,
|
|
||||||
},
|
|
||||||
response::IntoResponse,
|
|
||||||
};
|
|
||||||
use pgdb::sqlx::{Pool, Postgres};
|
|
||||||
use wasm_pipe_types::{WasmRequest, WasmResponse};
|
|
||||||
mod login;
|
|
||||||
mod nodes;
|
|
||||||
mod queries;
|
|
||||||
|
|
||||||
pub async fn ws_handler(
|
|
||||||
ws: WebSocketUpgrade,
|
|
||||||
State(state): State<Pool<Postgres>>,
|
|
||||||
) -> impl IntoResponse {
|
|
||||||
ws.on_upgrade(move |sock| handle_socket(sock, state))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_socket(mut socket: WebSocket, cnn: Pool<Postgres>) {
|
|
||||||
tracing::info!("WebSocket Connected");
|
|
||||||
let mut credentials: Option<login::LoginResult> = None;
|
|
||||||
while let Some(msg) = socket.recv().await {
|
|
||||||
let cnn = cnn.clone();
|
|
||||||
let msg = msg.unwrap();
|
|
||||||
|
|
||||||
// Get the binary message and decompress it
|
|
||||||
tracing::info!("Received a message: {:?}", msg);
|
|
||||||
let raw = msg.into_data();
|
|
||||||
let uncompressed = miniz_oxide::inflate::decompress_to_vec(&raw).unwrap();
|
|
||||||
let msg = lts_client::cbor::from_slice::<WasmRequest>(&uncompressed).unwrap();
|
|
||||||
tracing::info!("{msg:?}");
|
|
||||||
|
|
||||||
// Update the token credentials (if there are any)
|
|
||||||
if let Some(credentials) = &credentials {
|
|
||||||
let _ = pgdb::refresh_token(cnn.clone(), &credentials.token).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle the message by type
|
|
||||||
let matcher = (&msg, &mut credentials);
|
|
||||||
let wss = &mut socket;
|
|
||||||
match matcher {
|
|
||||||
// Handle login with just a token
|
|
||||||
(WasmRequest::Auth { token }, _) => {
|
|
||||||
let result = login::on_token_auth(token, &mut socket, cnn).await;
|
|
||||||
if let Some(result) = result {
|
|
||||||
credentials = Some(result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Handle login with a username and password
|
|
||||||
(
|
|
||||||
WasmRequest::Login {
|
|
||||||
license,
|
|
||||||
username,
|
|
||||||
password,
|
|
||||||
},
|
|
||||||
_,
|
|
||||||
) => {
|
|
||||||
let result = login::on_login(license, username, password, &mut socket, cnn).await;
|
|
||||||
if let Some(result) = result {
|
|
||||||
credentials = Some(result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Node status for dashboard
|
|
||||||
(WasmRequest::GetNodeStatus, Some(credentials)) => {
|
|
||||||
node_status(&cnn, wss, &credentials.license_key).await;
|
|
||||||
}
|
|
||||||
// Packet chart for dashboard
|
|
||||||
(WasmRequest::PacketChart { period }, Some(credentials)) => {
|
|
||||||
let _ =
|
|
||||||
send_packets_for_all_nodes(&cnn, wss, &credentials.license_key, period.into())
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
// Packet chart for individual node
|
|
||||||
(
|
|
||||||
WasmRequest::PacketChartSingle {
|
|
||||||
period,
|
|
||||||
node_id,
|
|
||||||
node_name,
|
|
||||||
},
|
|
||||||
Some(credentials),
|
|
||||||
) => {
|
|
||||||
let _ = send_packets_for_node(
|
|
||||||
&cnn,
|
|
||||||
wss,
|
|
||||||
&credentials.license_key,
|
|
||||||
period.into(),
|
|
||||||
node_id,
|
|
||||||
node_name,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
// Throughput chart for the dashboard
|
|
||||||
(WasmRequest::ThroughputChart { period }, Some(credentials)) => {
|
|
||||||
let _ = send_throughput_for_all_nodes(
|
|
||||||
&cnn,
|
|
||||||
wss,
|
|
||||||
&credentials.license_key,
|
|
||||||
InfluxTimePeriod::new(period),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
// Throughput chart for a single shaper node
|
|
||||||
(
|
|
||||||
WasmRequest::ThroughputChartSingle {
|
|
||||||
period,
|
|
||||||
node_id,
|
|
||||||
node_name,
|
|
||||||
},
|
|
||||||
Some(credentials),
|
|
||||||
) => {
|
|
||||||
let _ = send_throughput_for_node(
|
|
||||||
&cnn,
|
|
||||||
wss,
|
|
||||||
&credentials.license_key,
|
|
||||||
InfluxTimePeriod::new(period),
|
|
||||||
node_id.to_string(),
|
|
||||||
node_name.to_string(),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
(WasmRequest::ThroughputChartSite { period, site_id }, Some(credentials)) => {
|
|
||||||
let _ = send_throughput_for_all_nodes_by_site(
|
|
||||||
&cnn,
|
|
||||||
wss,
|
|
||||||
&credentials.license_key,
|
|
||||||
site_id.to_string(),
|
|
||||||
InfluxTimePeriod::new(period),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
(WasmRequest::ThroughputChartCircuit { period, circuit_id }, Some(credentials)) => {
|
|
||||||
let _ = send_throughput_for_all_nodes_by_circuit(
|
|
||||||
&cnn,
|
|
||||||
wss,
|
|
||||||
&credentials.license_key,
|
|
||||||
circuit_id.to_string(),
|
|
||||||
InfluxTimePeriod::new(period),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
// Rtt Chart
|
|
||||||
(WasmRequest::RttChart { period }, Some(credentials)) => {
|
|
||||||
let _ = send_rtt_for_all_nodes(
|
|
||||||
&cnn,
|
|
||||||
wss,
|
|
||||||
&credentials.license_key,
|
|
||||||
InfluxTimePeriod::new(period),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
(WasmRequest::RttChartSite { period, site_id }, Some(credentials)) => {
|
|
||||||
let _ = send_rtt_for_all_nodes_site(
|
|
||||||
&cnn,
|
|
||||||
wss,
|
|
||||||
&credentials.license_key,
|
|
||||||
site_id.to_string(),
|
|
||||||
InfluxTimePeriod::new(period),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
(
|
|
||||||
WasmRequest::RttChartSingle {
|
|
||||||
period,
|
|
||||||
node_id,
|
|
||||||
node_name,
|
|
||||||
},
|
|
||||||
Some(credentials),
|
|
||||||
) => {
|
|
||||||
let _ = send_rtt_for_node(
|
|
||||||
&cnn,
|
|
||||||
wss,
|
|
||||||
&credentials.license_key,
|
|
||||||
InfluxTimePeriod::new(period),
|
|
||||||
node_id.to_string(),
|
|
||||||
node_name.to_string(),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
(WasmRequest::RttChartCircuit { period, circuit_id }, Some(credentials)) => {
|
|
||||||
let _ = send_rtt_for_all_nodes_circuit(
|
|
||||||
&cnn,
|
|
||||||
wss,
|
|
||||||
&credentials.license_key,
|
|
||||||
circuit_id.to_string(),
|
|
||||||
InfluxTimePeriod::new(period),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
// Site Stack
|
|
||||||
(WasmRequest::SiteStack { period, site_id }, Some(credentials)) => {
|
|
||||||
let _ = send_site_stack_map(
|
|
||||||
&cnn,
|
|
||||||
wss,
|
|
||||||
&credentials.license_key,
|
|
||||||
InfluxTimePeriod::new(period),
|
|
||||||
site_id.to_string(),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
(WasmRequest::RootHeat { period }, Some(credentials)) => {
|
|
||||||
let _ = root_heat_map(
|
|
||||||
&cnn,
|
|
||||||
wss,
|
|
||||||
&credentials.license_key,
|
|
||||||
InfluxTimePeriod::new(period),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
(WasmRequest::SiteHeat { period, site_id }, Some(credentials)) => {
|
|
||||||
let _ = site_heat_map(
|
|
||||||
&cnn,
|
|
||||||
wss,
|
|
||||||
&credentials.license_key,
|
|
||||||
site_id,
|
|
||||||
InfluxTimePeriod::new(period),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
(
|
|
||||||
WasmRequest::NodePerfChart {
|
|
||||||
period,
|
|
||||||
node_id,
|
|
||||||
node_name,
|
|
||||||
},
|
|
||||||
Some(credentials),
|
|
||||||
) => {
|
|
||||||
let _ = send_perf_for_node(
|
|
||||||
&cnn,
|
|
||||||
wss,
|
|
||||||
&credentials.license_key,
|
|
||||||
InfluxTimePeriod::new(period),
|
|
||||||
node_id.to_string(),
|
|
||||||
node_name.to_string(),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
(WasmRequest::Tree { parent }, Some(credentials)) => {
|
|
||||||
send_site_tree(&cnn, wss, &credentials.license_key, parent).await;
|
|
||||||
}
|
|
||||||
(WasmRequest::SiteInfo { site_id }, Some(credentials)) => {
|
|
||||||
send_site_info(&cnn, wss, &credentials.license_key, site_id).await;
|
|
||||||
}
|
|
||||||
(WasmRequest::SiteParents { site_id }, Some(credentials)) => {
|
|
||||||
send_site_parents(&cnn, wss, &credentials.license_key, site_id).await;
|
|
||||||
}
|
|
||||||
(WasmRequest::CircuitParents { circuit_id }, Some(credentials)) => {
|
|
||||||
send_circuit_parents(&cnn, wss, &credentials.license_key, circuit_id).await;
|
|
||||||
}
|
|
||||||
(WasmRequest::RootParents, Some(credentials)) => {
|
|
||||||
send_root_parents(&cnn, wss, &credentials.license_key).await;
|
|
||||||
}
|
|
||||||
(WasmRequest::Search { term }, Some(credentials)) => {
|
|
||||||
let _ = omnisearch(&cnn, wss, &credentials.license_key, term).await;
|
|
||||||
}
|
|
||||||
(WasmRequest::CircuitInfo { circuit_id }, Some(credentials)) => {
|
|
||||||
send_circuit_info(&cnn, wss, &credentials.license_key, circuit_id).await;
|
|
||||||
}
|
|
||||||
(WasmRequest::ExtendedDeviceInfo { circuit_id }, Some(credentials)) => {
|
|
||||||
send_extended_device_info(&cnn, wss, &credentials.license_key, circuit_id).await;
|
|
||||||
}
|
|
||||||
(WasmRequest::SignalNoiseChartExt { period, device_id }, Some(credentials)) => {
|
|
||||||
let _ = send_extended_device_snr_graph(
|
|
||||||
&cnn,
|
|
||||||
wss,
|
|
||||||
&credentials.license_key,
|
|
||||||
device_id,
|
|
||||||
InfluxTimePeriod::new(period),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
(WasmRequest::DeviceCapacityChartExt { period, device_id }, Some(credentials)) => {
|
|
||||||
let _ = send_extended_device_capacity_graph(
|
|
||||||
&cnn,
|
|
||||||
wss,
|
|
||||||
&credentials.license_key,
|
|
||||||
device_id,
|
|
||||||
InfluxTimePeriod::new(period),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
(_, None) => {
|
|
||||||
tracing::error!("No credentials");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn serialize_response(response: WasmResponse) -> Vec<u8> {
|
|
||||||
let cbor = lts_client::cbor::to_vec(&response).unwrap();
|
|
||||||
miniz_oxide::deflate::compress_to_vec(&cbor, 8)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn send_response(socket: &mut WebSocket, response: WasmResponse) {
|
|
||||||
let serialized = serialize_response(response);
|
|
||||||
socket.send(Message::Binary(serialized)).await.unwrap();
|
|
||||||
}
|
|
@ -1,27 +0,0 @@
|
|||||||
use axum::extract::ws::WebSocket;
|
|
||||||
use pgdb::sqlx::{Pool, Postgres};
|
|
||||||
use wasm_pipe_types::Node;
|
|
||||||
|
|
||||||
use crate::web::wss::send_response;
|
|
||||||
|
|
||||||
fn convert(ns: pgdb::NodeStatus) -> Node {
|
|
||||||
Node {
|
|
||||||
node_id: ns.node_id,
|
|
||||||
node_name: ns.node_name,
|
|
||||||
last_seen: ns.last_seen,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn node_status(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str) {
|
|
||||||
tracing::info!("Fetching node status, {key}");
|
|
||||||
let nodes = pgdb::node_status(cnn, key).await;
|
|
||||||
match nodes {
|
|
||||||
Ok(nodes) => {
|
|
||||||
let nodes: Vec<Node> = nodes.into_iter().map(convert).collect();
|
|
||||||
send_response(socket, wasm_pipe_types::WasmResponse::NodeStatus { nodes }).await;
|
|
||||||
},
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("Unable to obtain node status: {}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,29 +0,0 @@
|
|||||||
use axum::extract::ws::WebSocket;
|
|
||||||
use pgdb::sqlx::{Pool, Postgres};
|
|
||||||
use wasm_pipe_types::CircuitList;
|
|
||||||
|
|
||||||
use crate::web::wss::send_response;
|
|
||||||
|
|
||||||
fn from(circuit: pgdb::CircuitInfo) -> CircuitList {
|
|
||||||
CircuitList {
|
|
||||||
circuit_name: circuit.circuit_name,
|
|
||||||
device_id: circuit.device_id,
|
|
||||||
device_name: circuit.device_name,
|
|
||||||
parent_node: circuit.parent_node,
|
|
||||||
mac: circuit.mac,
|
|
||||||
download_min_mbps: circuit.download_min_mbps,
|
|
||||||
download_max_mbps: circuit.download_max_mbps,
|
|
||||||
upload_min_mbps: circuit.upload_min_mbps,
|
|
||||||
upload_max_mbps: circuit.upload_max_mbps,
|
|
||||||
comment: circuit.comment,
|
|
||||||
ip_range: circuit.ip_range,
|
|
||||||
subnet: circuit.subnet,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn send_circuit_info(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str, circuit_id: &str) {
|
|
||||||
if let Ok(hosts) = pgdb::get_circuit_info(cnn, key, circuit_id).await {
|
|
||||||
let hosts = hosts.into_iter().map(from).collect::<Vec<_>>();
|
|
||||||
send_response(socket, wasm_pipe_types::WasmResponse::CircuitInfo { data: hosts }).await;
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,166 +0,0 @@
|
|||||||
use std::collections::HashSet;
|
|
||||||
use axum::extract::ws::WebSocket;
|
|
||||||
use chrono::{DateTime, FixedOffset};
|
|
||||||
use influxdb2::{FromDataPoint, models::Query, Client};
|
|
||||||
use pgdb::{sqlx::{Pool, Postgres}, organization_cache::get_org_details};
|
|
||||||
|
|
||||||
use crate::web::wss::send_response;
|
|
||||||
|
|
||||||
use super::time_period::InfluxTimePeriod;
|
|
||||||
|
|
||||||
pub async fn send_extended_device_info(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
socket: &mut WebSocket,
|
|
||||||
key: &str,
|
|
||||||
circuit_id: &str,
|
|
||||||
) {
|
|
||||||
// Get devices for circuit
|
|
||||||
if let Ok(hosts_list) = pgdb::get_circuit_info(cnn, key, circuit_id).await {
|
|
||||||
// Get the hosts known to be in this circuit
|
|
||||||
let mut hosts = HashSet::new();
|
|
||||||
hosts_list.into_iter().for_each(|h| {
|
|
||||||
hosts.insert(h.device_id);
|
|
||||||
});
|
|
||||||
if hosts.is_empty() {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
println!("{hosts:?}");
|
|
||||||
|
|
||||||
// Get extended data
|
|
||||||
let mut extended_data = Vec::new();
|
|
||||||
for host in hosts.iter() {
|
|
||||||
let ext = pgdb::get_device_info_ext(cnn, key, host).await;
|
|
||||||
if let Ok(ext) = ext {
|
|
||||||
let mut ext_wasm = wasm_pipe_types::ExtendedDeviceInfo {
|
|
||||||
device_id: ext.device_id.clone(),
|
|
||||||
name: ext.name.clone(),
|
|
||||||
model: ext.model.clone(),
|
|
||||||
firmware: ext.firmware.clone(),
|
|
||||||
status: ext.status.clone(),
|
|
||||||
mode: ext.mode.clone(),
|
|
||||||
channel_width: ext.channel_width,
|
|
||||||
tx_power: ext.tx_power,
|
|
||||||
interfaces: Vec::new(),
|
|
||||||
};
|
|
||||||
if let Ok(interfaces) = pgdb::get_device_interfaces_ext(cnn, key, host).await {
|
|
||||||
for ed in interfaces {
|
|
||||||
let edw = wasm_pipe_types::ExtendedDeviceInterface {
|
|
||||||
name: ed.name,
|
|
||||||
mac: ed.mac,
|
|
||||||
status: ed.status,
|
|
||||||
speed: ed.speed,
|
|
||||||
ip_list: ed.ip_list,
|
|
||||||
};
|
|
||||||
ext_wasm.interfaces.push(edw);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
extended_data.push(ext_wasm);
|
|
||||||
} else {
|
|
||||||
tracing::error!("Error getting extended device info: {:?}", ext);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If there is any, send it
|
|
||||||
println!("{extended_data:?}");
|
|
||||||
if !extended_data.is_empty() {
|
|
||||||
send_response(socket, wasm_pipe_types::WasmResponse::DeviceExt { data: extended_data }).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn send_extended_device_snr_graph(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
socket: &mut WebSocket,
|
|
||||||
key: &str,
|
|
||||||
device_id: &str,
|
|
||||||
period: InfluxTimePeriod,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
if let Some(org) = get_org_details(cnn, key).await {
|
|
||||||
let influx_url = format!("http://{}:8086", org.influx_host);
|
|
||||||
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
|
|
||||||
|
|
||||||
let qs = format!(
|
|
||||||
"from(bucket: \"{}\")
|
|
||||||
|> {}
|
|
||||||
|> filter(fn: (r) => r[\"_measurement\"] == \"device_ext\")
|
|
||||||
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|
|
||||||
|> filter(fn: (r) => r[\"device_id\"] == \"{}\")
|
|
||||||
|> filter(fn: (r) => r[\"_field\"] == \"noise_floor\" or r[\"_field\"] == \"rx_signal\")
|
|
||||||
|> {}
|
|
||||||
|> yield(name: \"last\")",
|
|
||||||
org.influx_bucket, period.range(), org.key, device_id, period.aggregate_window()
|
|
||||||
);
|
|
||||||
//println!("{qs}");
|
|
||||||
|
|
||||||
let query = Query::new(qs);
|
|
||||||
let rows = client.query::<SnrRow>(Some(query)).await?;
|
|
||||||
|
|
||||||
let mut sn = Vec::new();
|
|
||||||
rows.iter().for_each(|row| {
|
|
||||||
let snr = wasm_pipe_types::SignalNoiseChartExt {
|
|
||||||
noise: row.noise_floor,
|
|
||||||
signal: row.rx_signal,
|
|
||||||
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
|
|
||||||
};
|
|
||||||
sn.push(snr);
|
|
||||||
});
|
|
||||||
send_response(socket, wasm_pipe_types::WasmResponse::DeviceExtSnr { data: sn, device_id: device_id.to_string() }).await;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, FromDataPoint, Default)]
|
|
||||||
pub struct SnrRow {
|
|
||||||
pub device_id: String,
|
|
||||||
pub noise_floor: f64,
|
|
||||||
pub rx_signal: f64,
|
|
||||||
pub time: DateTime<FixedOffset>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn send_extended_device_capacity_graph(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
socket: &mut WebSocket,
|
|
||||||
key: &str,
|
|
||||||
device_id: &str,
|
|
||||||
period: InfluxTimePeriod,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
if let Some(org) = get_org_details(cnn, key).await {
|
|
||||||
let influx_url = format!("http://{}:8086", org.influx_host);
|
|
||||||
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
|
|
||||||
|
|
||||||
let qs = format!(
|
|
||||||
"from(bucket: \"{}\")
|
|
||||||
|> {}
|
|
||||||
|> filter(fn: (r) => r[\"_measurement\"] == \"device_ext\")
|
|
||||||
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|
|
||||||
|> filter(fn: (r) => r[\"device_id\"] == \"{}\")
|
|
||||||
|> filter(fn: (r) => r[\"_field\"] == \"dl_capacity\" or r[\"_field\"] == \"ul_capacity\")
|
|
||||||
|> {}
|
|
||||||
|> yield(name: \"last\")",
|
|
||||||
org.influx_bucket, period.range(), org.key, device_id, period.aggregate_window()
|
|
||||||
);
|
|
||||||
//println!("{qs}");
|
|
||||||
|
|
||||||
let query = Query::new(qs);
|
|
||||||
let rows = client.query::<CapacityRow>(Some(query)).await?;
|
|
||||||
|
|
||||||
let mut sn = Vec::new();
|
|
||||||
rows.iter().for_each(|row| {
|
|
||||||
let snr = wasm_pipe_types::CapacityChartExt {
|
|
||||||
dl: row.dl_capacity,
|
|
||||||
ul: row.ul_capacity,
|
|
||||||
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
|
|
||||||
};
|
|
||||||
sn.push(snr);
|
|
||||||
});
|
|
||||||
send_response(socket, wasm_pipe_types::WasmResponse::DeviceExtCapacity { data: sn, device_id: device_id.to_string() }).await;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, FromDataPoint, Default)]
|
|
||||||
pub struct CapacityRow {
|
|
||||||
pub device_id: String,
|
|
||||||
pub dl_capacity: f64,
|
|
||||||
pub ul_capacity: f64,
|
|
||||||
pub time: DateTime<FixedOffset>,
|
|
||||||
}
|
|
@ -1,28 +0,0 @@
|
|||||||
//! Provides pre-packaged queries for obtaining data, that will
|
|
||||||
//! then be used by the web server to respond to requests.
|
|
||||||
|
|
||||||
mod circuit_info;
|
|
||||||
mod node_perf;
|
|
||||||
mod packet_counts;
|
|
||||||
mod rtt;
|
|
||||||
mod search;
|
|
||||||
mod site_heat_map;
|
|
||||||
mod site_info;
|
|
||||||
mod site_parents;
|
|
||||||
pub mod site_tree;
|
|
||||||
mod throughput;
|
|
||||||
pub mod ext_device;
|
|
||||||
pub mod time_period;
|
|
||||||
pub use circuit_info::send_circuit_info;
|
|
||||||
pub use node_perf::send_perf_for_node;
|
|
||||||
pub use packet_counts::{send_packets_for_all_nodes, send_packets_for_node};
|
|
||||||
pub use rtt::{send_rtt_for_all_nodes, send_rtt_for_all_nodes_site, send_rtt_for_node, send_rtt_for_all_nodes_circuit};
|
|
||||||
pub use search::omnisearch;
|
|
||||||
pub use site_heat_map::{root_heat_map, site_heat_map};
|
|
||||||
pub use site_info::send_site_info;
|
|
||||||
pub use site_parents::{send_site_parents, send_circuit_parents, send_root_parents};
|
|
||||||
pub use throughput::{
|
|
||||||
send_throughput_for_all_nodes, send_throughput_for_all_nodes_by_circuit,
|
|
||||||
send_throughput_for_all_nodes_by_site, send_throughput_for_node,
|
|
||||||
send_site_stack_map,
|
|
||||||
};
|
|
@ -1,97 +0,0 @@
|
|||||||
use axum::extract::ws::WebSocket;
|
|
||||||
use chrono::{DateTime, FixedOffset, Utc};
|
|
||||||
use influxdb2::{Client, FromDataPoint, models::Query};
|
|
||||||
use pgdb::{sqlx::{Pool, Postgres}, organization_cache::get_org_details};
|
|
||||||
use wasm_pipe_types::{PerfHost, Perf};
|
|
||||||
use crate::web::wss::send_response;
|
|
||||||
use super::time_period::InfluxTimePeriod;
|
|
||||||
|
|
||||||
#[derive(Debug, FromDataPoint)]
|
|
||||||
pub struct PerfRow {
|
|
||||||
pub host_id: String,
|
|
||||||
pub time: DateTime<FixedOffset>,
|
|
||||||
pub cpu: f64,
|
|
||||||
pub cpu_max: f64,
|
|
||||||
pub ram: f64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for PerfRow {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
host_id: "".to_string(),
|
|
||||||
time: DateTime::<Utc>::MIN_UTC.into(),
|
|
||||||
cpu: 0.0,
|
|
||||||
cpu_max: 0.0,
|
|
||||||
ram: 0.0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn send_perf_for_node(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
socket: &mut WebSocket,
|
|
||||||
key: &str,
|
|
||||||
period: InfluxTimePeriod,
|
|
||||||
node_id: String,
|
|
||||||
node_name: String,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
let node = get_perf_for_node(cnn, key, node_id, node_name, period).await?;
|
|
||||||
send_response(socket, wasm_pipe_types::WasmResponse::NodePerfChart { nodes: vec![node] }).await;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_perf_for_node(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
node_id: String,
|
|
||||||
node_name: String,
|
|
||||||
period: InfluxTimePeriod,
|
|
||||||
) -> anyhow::Result<PerfHost> {
|
|
||||||
if let Some(org) = get_org_details(cnn, key).await {
|
|
||||||
let influx_url = format!("http://{}:8086", org.influx_host);
|
|
||||||
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
|
|
||||||
|
|
||||||
let qs = format!(
|
|
||||||
"from(bucket: \"{}\")
|
|
||||||
|> {}
|
|
||||||
|> filter(fn: (r) => r[\"_measurement\"] == \"perf\")
|
|
||||||
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|
|
||||||
|> filter(fn: (r) => r[\"host_id\"] == \"{}\")
|
|
||||||
|> {}
|
|
||||||
|> yield(name: \"last\")",
|
|
||||||
org.influx_bucket, period.range(), org.key, node_id, period.aggregate_window()
|
|
||||||
);
|
|
||||||
|
|
||||||
let query = Query::new(qs);
|
|
||||||
let rows = client.query::<PerfRow>(Some(query)).await;
|
|
||||||
match rows {
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("Error querying InfluxDB (node-perf): {}", e);
|
|
||||||
return Err(anyhow::Error::msg("Unable to query influx"));
|
|
||||||
}
|
|
||||||
Ok(rows) => {
|
|
||||||
// Parse and send the data
|
|
||||||
//println!("{rows:?}");
|
|
||||||
|
|
||||||
let mut stats = Vec::new();
|
|
||||||
|
|
||||||
// Fill download
|
|
||||||
for row in rows.iter() {
|
|
||||||
stats.push(Perf {
|
|
||||||
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
|
|
||||||
cpu: row.cpu,
|
|
||||||
cpu_max: row.cpu_max,
|
|
||||||
ram: row.ram,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
return Ok(PerfHost{
|
|
||||||
node_id,
|
|
||||||
node_name,
|
|
||||||
stats,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(anyhow::Error::msg("Unable to query influx"))
|
|
||||||
}
|
|
@ -1,144 +0,0 @@
|
|||||||
//! Packet-per-second data queries
|
|
||||||
mod packet_row;
|
|
||||||
use self::packet_row::PacketRow;
|
|
||||||
use super::time_period::InfluxTimePeriod;
|
|
||||||
use crate::web::wss::send_response;
|
|
||||||
use axum::extract::ws::WebSocket;
|
|
||||||
use futures::future::join_all;
|
|
||||||
use influxdb2::{models::Query, Client};
|
|
||||||
use pgdb::{sqlx::{Pool, Postgres}, organization_cache::get_org_details};
|
|
||||||
use wasm_pipe_types::{PacketHost, Packets};
|
|
||||||
|
|
||||||
pub async fn send_packets_for_all_nodes(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
socket: &mut WebSocket,
|
|
||||||
key: &str,
|
|
||||||
period: InfluxTimePeriod,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
let nodes = get_packets_for_all_nodes(cnn, key, period).await?;
|
|
||||||
send_response(socket, wasm_pipe_types::WasmResponse::PacketChart { nodes }).await;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn send_packets_for_node(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
socket: &mut WebSocket,
|
|
||||||
key: &str,
|
|
||||||
period: InfluxTimePeriod,
|
|
||||||
node_id: &str,
|
|
||||||
node_name: &str,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
let node =
|
|
||||||
get_packets_for_node(cnn, key, node_id.to_string(), node_name.to_string(), period).await?;
|
|
||||||
|
|
||||||
send_response(
|
|
||||||
socket,
|
|
||||||
wasm_pipe_types::WasmResponse::PacketChart { nodes: vec![node] },
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Requests packet-per-second data for all shaper nodes for a given organization
|
|
||||||
///
|
|
||||||
/// # Arguments
|
|
||||||
/// * `cnn` - A connection pool to the database
|
|
||||||
/// * `key` - The organization's license key
|
|
||||||
pub async fn get_packets_for_all_nodes(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
period: InfluxTimePeriod,
|
|
||||||
) -> anyhow::Result<Vec<PacketHost>> {
|
|
||||||
let node_status = pgdb::node_status(cnn, key).await?;
|
|
||||||
let mut futures = Vec::new();
|
|
||||||
for node in node_status {
|
|
||||||
futures.push(get_packets_for_node(
|
|
||||||
cnn,
|
|
||||||
key,
|
|
||||||
node.node_id.to_string(),
|
|
||||||
node.node_name.to_string(),
|
|
||||||
period.clone(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
let all_nodes: anyhow::Result<Vec<PacketHost>> = join_all(futures).await.into_iter().collect();
|
|
||||||
all_nodes
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Requests packet-per-second data for a single shaper node.
|
|
||||||
///
|
|
||||||
/// # Arguments
|
|
||||||
/// * `cnn` - A connection pool to the database
|
|
||||||
/// * `key` - The organization's license key
|
|
||||||
/// * `node_id` - The ID of the node to query
|
|
||||||
/// * `node_name` - The name of the node to query
|
|
||||||
pub async fn get_packets_for_node(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
node_id: String,
|
|
||||||
node_name: String,
|
|
||||||
period: InfluxTimePeriod,
|
|
||||||
) -> anyhow::Result<PacketHost> {
|
|
||||||
if let Some(org) = get_org_details(cnn, key).await {
|
|
||||||
let influx_url = format!("http://{}:8086", org.influx_host);
|
|
||||||
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
|
|
||||||
|
|
||||||
let qs = format!(
|
|
||||||
"from(bucket: \"{}\")
|
|
||||||
|> {}
|
|
||||||
|> filter(fn: (r) => r[\"_measurement\"] == \"packets\")
|
|
||||||
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|
|
||||||
|> filter(fn: (r) => r[\"host_id\"] == \"{}\")
|
|
||||||
|> {}
|
|
||||||
|> yield(name: \"last\")",
|
|
||||||
org.influx_bucket,
|
|
||||||
period.range(),
|
|
||||||
org.key,
|
|
||||||
node_id,
|
|
||||||
period.aggregate_window()
|
|
||||||
);
|
|
||||||
|
|
||||||
let query = Query::new(qs);
|
|
||||||
let rows = client.query::<PacketRow>(Some(query)).await;
|
|
||||||
match rows {
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("Error querying InfluxDB (packets by node): {}", e);
|
|
||||||
return Err(anyhow::Error::msg("Unable to query influx"));
|
|
||||||
}
|
|
||||||
Ok(rows) => {
|
|
||||||
// Parse and send the data
|
|
||||||
//println!("{rows:?}");
|
|
||||||
|
|
||||||
let mut down = Vec::new();
|
|
||||||
let mut up = Vec::new();
|
|
||||||
|
|
||||||
// Fill download
|
|
||||||
for row in rows.iter().filter(|r| r.direction == "down") {
|
|
||||||
down.push(Packets {
|
|
||||||
value: row.avg,
|
|
||||||
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
|
|
||||||
l: row.min,
|
|
||||||
u: row.max - row.min,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fill upload
|
|
||||||
for row in rows.iter().filter(|r| r.direction == "up") {
|
|
||||||
up.push(Packets {
|
|
||||||
value: row.avg,
|
|
||||||
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
|
|
||||||
l: row.min,
|
|
||||||
u: row.max - row.min,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
return Ok(PacketHost {
|
|
||||||
node_id,
|
|
||||||
node_name,
|
|
||||||
down,
|
|
||||||
up,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(anyhow::Error::msg("Unable to query influx"))
|
|
||||||
}
|
|
@ -1,25 +0,0 @@
|
|||||||
use chrono::{DateTime, FixedOffset, Utc};
|
|
||||||
use influxdb2::FromDataPoint;
|
|
||||||
|
|
||||||
#[derive(Debug, FromDataPoint)]
|
|
||||||
pub struct PacketRow {
|
|
||||||
pub direction: String,
|
|
||||||
pub host_id: String,
|
|
||||||
pub min: f64,
|
|
||||||
pub max: f64,
|
|
||||||
pub avg: f64,
|
|
||||||
pub time: DateTime<FixedOffset>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for PacketRow {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
direction: "".to_string(),
|
|
||||||
host_id: "".to_string(),
|
|
||||||
min: 0.0,
|
|
||||||
max: 0.0,
|
|
||||||
avg: 0.0,
|
|
||||||
time: DateTime::<Utc>::MIN_UTC.into(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,314 +0,0 @@
|
|||||||
use axum::extract::ws::WebSocket;
|
|
||||||
use futures::future::join_all;
|
|
||||||
use influxdb2::{Client, models::Query};
|
|
||||||
use pgdb::{sqlx::{Pool, Postgres}, organization_cache::get_org_details};
|
|
||||||
use wasm_pipe_types::{RttHost, Rtt};
|
|
||||||
use crate::web::wss::{queries::rtt::rtt_row::RttCircuitRow, send_response};
|
|
||||||
use self::rtt_row::{RttRow, RttSiteRow};
|
|
||||||
|
|
||||||
use super::time_period::InfluxTimePeriod;
|
|
||||||
mod rtt_row;
|
|
||||||
|
|
||||||
pub async fn send_rtt_for_all_nodes(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str, period: InfluxTimePeriod) -> anyhow::Result<()> {
|
|
||||||
let nodes = get_rtt_for_all_nodes(cnn, key, period).await?;
|
|
||||||
|
|
||||||
let mut histogram = vec![0; 20];
|
|
||||||
for node in nodes.iter() {
|
|
||||||
for rtt in node.rtt.iter() {
|
|
||||||
let bucket = usize::min(19, (rtt.value / 10.0) as usize);
|
|
||||||
histogram[bucket] += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let nodes = vec![RttHost { node_id: "".to_string(), node_name: "".to_string(), rtt: rtt_bucket_merge(&nodes) }];
|
|
||||||
send_response(socket, wasm_pipe_types::WasmResponse::RttChart { nodes, histogram }).await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn send_rtt_for_all_nodes_site(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str, site_id: String, period: InfluxTimePeriod) -> anyhow::Result<()> {
|
|
||||||
let nodes = get_rtt_for_all_nodes_site(cnn, key, &site_id, period).await?;
|
|
||||||
|
|
||||||
let mut histogram = vec![0; 20];
|
|
||||||
for node in nodes.iter() {
|
|
||||||
for rtt in node.rtt.iter() {
|
|
||||||
let bucket = usize::min(19, (rtt.value / 200.0) as usize);
|
|
||||||
histogram[bucket] += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
send_response(socket, wasm_pipe_types::WasmResponse::RttChartSite { nodes, histogram }).await;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn send_rtt_for_all_nodes_circuit(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str, site_id: String, period: InfluxTimePeriod) -> anyhow::Result<()> {
|
|
||||||
let nodes = get_rtt_for_all_nodes_circuit(cnn, key, &site_id, period).await?;
|
|
||||||
|
|
||||||
let mut histogram = vec![0; 20];
|
|
||||||
for node in nodes.iter() {
|
|
||||||
for rtt in node.rtt.iter() {
|
|
||||||
let bucket = usize::min(19, (rtt.value / 200.0) as usize);
|
|
||||||
histogram[bucket] += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
send_response(socket, wasm_pipe_types::WasmResponse::RttChartCircuit { nodes, histogram }).await;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn send_rtt_for_node(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str, period: InfluxTimePeriod, node_id: String, node_name: String) -> anyhow::Result<()> {
|
|
||||||
let node = get_rtt_for_node(cnn, key, node_id, node_name, period).await?;
|
|
||||||
let nodes = vec![node];
|
|
||||||
|
|
||||||
let mut histogram = vec![0; 20];
|
|
||||||
for node in nodes.iter() {
|
|
||||||
for rtt in node.rtt.iter() {
|
|
||||||
let bucket = usize::min(19, (rtt.value / 200.0) as usize);
|
|
||||||
histogram[bucket] += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
send_response(socket, wasm_pipe_types::WasmResponse::RttChart { nodes, histogram }).await;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn rtt_bucket_merge(rtt: &[RttHost]) -> Vec<Rtt> {
|
|
||||||
let mut entries: Vec<Rtt> = Vec::new();
|
|
||||||
for entry in rtt.iter() {
|
|
||||||
for entry in entry.rtt.iter() {
|
|
||||||
if let Some(e) = entries.iter().position(|d| d.date == entry.date) {
|
|
||||||
entries[e].l = f64::min(entries[e].l, entry.l);
|
|
||||||
entries[e].u = f64::max(entries[e].u, entry.u);
|
|
||||||
} else {
|
|
||||||
entries.push(entry.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
entries
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_rtt_for_all_nodes(cnn: &Pool<Postgres>, key: &str, period: InfluxTimePeriod) -> anyhow::Result<Vec<RttHost>> {
|
|
||||||
let node_status = pgdb::node_status(cnn, key).await?;
|
|
||||||
let mut futures = Vec::new();
|
|
||||||
for node in node_status {
|
|
||||||
futures.push(get_rtt_for_node(
|
|
||||||
cnn,
|
|
||||||
key,
|
|
||||||
node.node_id.to_string(),
|
|
||||||
node.node_name.to_string(),
|
|
||||||
period.clone(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
let all_nodes: anyhow::Result<Vec<RttHost>> = join_all(futures).await
|
|
||||||
.into_iter().collect();
|
|
||||||
all_nodes
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_rtt_for_all_nodes_site(cnn: &Pool<Postgres>, key: &str, site_id: &str, period: InfluxTimePeriod) -> anyhow::Result<Vec<RttHost>> {
|
|
||||||
let node_status = pgdb::node_status(cnn, key).await?;
|
|
||||||
let mut futures = Vec::new();
|
|
||||||
for node in node_status {
|
|
||||||
futures.push(get_rtt_for_node_site(
|
|
||||||
cnn,
|
|
||||||
key,
|
|
||||||
node.node_id.to_string(),
|
|
||||||
node.node_name.to_string(),
|
|
||||||
site_id.to_string(),
|
|
||||||
period.clone(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
let all_nodes: anyhow::Result<Vec<RttHost>> = join_all(futures).await
|
|
||||||
.into_iter().collect();
|
|
||||||
all_nodes
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_rtt_for_all_nodes_circuit(cnn: &Pool<Postgres>, key: &str, circuit_id: &str, period: InfluxTimePeriod) -> anyhow::Result<Vec<RttHost>> {
|
|
||||||
let node_status = pgdb::node_status(cnn, key).await?;
|
|
||||||
let mut futures = Vec::new();
|
|
||||||
for node in node_status {
|
|
||||||
futures.push(get_rtt_for_node_circuit(
|
|
||||||
cnn,
|
|
||||||
key,
|
|
||||||
node.node_id.to_string(),
|
|
||||||
node.node_name.to_string(),
|
|
||||||
circuit_id.to_string(),
|
|
||||||
period.clone(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
let all_nodes: anyhow::Result<Vec<RttHost>> = join_all(futures).await
|
|
||||||
.into_iter().collect();
|
|
||||||
all_nodes
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_rtt_for_node(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
node_id: String,
|
|
||||||
node_name: String,
|
|
||||||
period: InfluxTimePeriod,
|
|
||||||
) -> anyhow::Result<RttHost> {
|
|
||||||
if let Some(org) = get_org_details(cnn, key).await {
|
|
||||||
let influx_url = format!("http://{}:8086", org.influx_host);
|
|
||||||
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
|
|
||||||
|
|
||||||
let qs = format!(
|
|
||||||
"from(bucket: \"{}\")
|
|
||||||
|> {}
|
|
||||||
|> filter(fn: (r) => r[\"_measurement\"] == \"rtt\")
|
|
||||||
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|
|
||||||
|> filter(fn: (r) => r[\"host_id\"] == \"{}\")
|
|
||||||
|> {}
|
|
||||||
|> yield(name: \"last\")",
|
|
||||||
org.influx_bucket, period.range(), org.key, node_id, period.aggregate_window()
|
|
||||||
);
|
|
||||||
|
|
||||||
let query = Query::new(qs);
|
|
||||||
let rows = client.query::<RttRow>(Some(query)).await;
|
|
||||||
match rows {
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("Error querying InfluxDB (rtt node): {}", e);
|
|
||||||
return Err(anyhow::Error::msg("Unable to query influx"));
|
|
||||||
}
|
|
||||||
Ok(rows) => {
|
|
||||||
// Parse and send the data
|
|
||||||
//println!("{rows:?}");
|
|
||||||
|
|
||||||
let mut rtt = Vec::new();
|
|
||||||
|
|
||||||
// Fill RTT
|
|
||||||
for row in rows.iter() {
|
|
||||||
rtt.push(Rtt {
|
|
||||||
value: f64::min(200.0, row.avg),
|
|
||||||
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
|
|
||||||
l: f64::min(200.0, row.min),
|
|
||||||
u: f64::min(200.0, row.max) - f64::min(200.0, row.min),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
return Ok(RttHost{
|
|
||||||
node_id,
|
|
||||||
node_name,
|
|
||||||
rtt,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(anyhow::Error::msg("Unable to query influx"))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_rtt_for_node_site(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
node_id: String,
|
|
||||||
node_name: String,
|
|
||||||
site_id: String,
|
|
||||||
period: InfluxTimePeriod,
|
|
||||||
) -> anyhow::Result<RttHost> {
|
|
||||||
if let Some(org) = get_org_details(cnn, key).await {
|
|
||||||
let influx_url = format!("http://{}:8086", org.influx_host);
|
|
||||||
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
|
|
||||||
|
|
||||||
let qs = format!(
|
|
||||||
"from(bucket: \"{}\")
|
|
||||||
|> {}
|
|
||||||
|> filter(fn: (r) => r[\"_measurement\"] == \"tree\")
|
|
||||||
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|
|
||||||
|> filter(fn: (r) => r[\"host_id\"] == \"{}\")
|
|
||||||
|> filter(fn: (r) => r[\"node_name\"] == \"{}\")
|
|
||||||
|> filter(fn: (r) => r[\"_field\"] == \"rtt_avg\" or r[\"_field\"] == \"rtt_max\" or r[\"_field\"] == \"rtt_min\")
|
|
||||||
|> {}
|
|
||||||
|> yield(name: \"last\")",
|
|
||||||
org.influx_bucket, period.range(), org.key, node_id, site_id, period.aggregate_window()
|
|
||||||
);
|
|
||||||
|
|
||||||
let query = Query::new(qs);
|
|
||||||
let rows = client.query::<RttSiteRow>(Some(query)).await;
|
|
||||||
match rows {
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("Error querying InfluxDB (rtt node site): {}", e);
|
|
||||||
return Err(anyhow::Error::msg("Unable to query influx"));
|
|
||||||
}
|
|
||||||
Ok(rows) => {
|
|
||||||
// Parse and send the data
|
|
||||||
//println!("{rows:?}");
|
|
||||||
|
|
||||||
let mut rtt = Vec::new();
|
|
||||||
|
|
||||||
// Fill download
|
|
||||||
for row in rows.iter() {
|
|
||||||
rtt.push(Rtt {
|
|
||||||
value: row.rtt_avg,
|
|
||||||
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
|
|
||||||
l: row.rtt_min,
|
|
||||||
u: row.rtt_max - row.rtt_min,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
return Ok(RttHost{
|
|
||||||
node_id,
|
|
||||||
node_name,
|
|
||||||
rtt,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(anyhow::Error::msg("Unable to query influx"))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_rtt_for_node_circuit(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
node_id: String,
|
|
||||||
node_name: String,
|
|
||||||
circuit_id: String,
|
|
||||||
period: InfluxTimePeriod,
|
|
||||||
) -> anyhow::Result<RttHost> {
|
|
||||||
if let Some(org) = get_org_details(cnn, key).await {
|
|
||||||
let influx_url = format!("http://{}:8086", org.influx_host);
|
|
||||||
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
|
|
||||||
|
|
||||||
let qs = format!(
|
|
||||||
"from(bucket: \"{}\")
|
|
||||||
|> {}
|
|
||||||
|> filter(fn: (r) => r[\"_measurement\"] == \"rtt\")
|
|
||||||
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|
|
||||||
|> filter(fn: (r) => r[\"host_id\"] == \"{}\")
|
|
||||||
|> filter(fn: (r) => r[\"circuit_id\"] == \"{}\")
|
|
||||||
|> filter(fn: (r) => r[\"_field\"] == \"avg\" or r[\"_field\"] == \"max\" or r[\"_field\"] == \"min\")
|
|
||||||
|> {}
|
|
||||||
|> yield(name: \"last\")",
|
|
||||||
org.influx_bucket, period.range(), org.key, node_id, circuit_id, period.aggregate_window()
|
|
||||||
);
|
|
||||||
//log::warn!("{qs}");
|
|
||||||
let query = Query::new(qs);
|
|
||||||
let rows = client.query::<RttCircuitRow>(Some(query)).await;
|
|
||||||
match rows {
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("Error querying InfluxDB (rtt_node_circuit): {}", e);
|
|
||||||
return Err(anyhow::Error::msg("Unable to query influx"));
|
|
||||||
}
|
|
||||||
Ok(rows) => {
|
|
||||||
// Parse and send the data
|
|
||||||
//println!("{rows:?}");
|
|
||||||
|
|
||||||
let mut rtt = Vec::new();
|
|
||||||
|
|
||||||
// Fill download
|
|
||||||
for row in rows.iter() {
|
|
||||||
rtt.push(Rtt {
|
|
||||||
value: row.avg,
|
|
||||||
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
|
|
||||||
l: row.min,
|
|
||||||
u: row.max - row.min,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
return Ok(RttHost{
|
|
||||||
node_id,
|
|
||||||
node_name,
|
|
||||||
rtt,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(anyhow::Error::msg("Unable to query influx"))
|
|
||||||
}
|
|
@ -1,65 +0,0 @@
|
|||||||
use chrono::{DateTime, FixedOffset, Utc};
|
|
||||||
use influxdb2::FromDataPoint;
|
|
||||||
|
|
||||||
#[derive(Debug, FromDataPoint)]
|
|
||||||
pub struct RttRow {
|
|
||||||
pub host_id: String,
|
|
||||||
pub min: f64,
|
|
||||||
pub max: f64,
|
|
||||||
pub avg: f64,
|
|
||||||
pub time: DateTime<FixedOffset>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for RttRow {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
host_id: "".to_string(),
|
|
||||||
min: 0.0,
|
|
||||||
max: 0.0,
|
|
||||||
avg: 0.0,
|
|
||||||
time: DateTime::<Utc>::MIN_UTC.into(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, FromDataPoint)]
|
|
||||||
pub struct RttSiteRow {
|
|
||||||
pub host_id: String,
|
|
||||||
pub rtt_min: f64,
|
|
||||||
pub rtt_max: f64,
|
|
||||||
pub rtt_avg: f64,
|
|
||||||
pub time: DateTime<FixedOffset>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for RttSiteRow {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
host_id: "".to_string(),
|
|
||||||
rtt_min: 0.0,
|
|
||||||
rtt_max: 0.0,
|
|
||||||
rtt_avg: 0.0,
|
|
||||||
time: DateTime::<Utc>::MIN_UTC.into(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, FromDataPoint)]
|
|
||||||
pub struct RttCircuitRow {
|
|
||||||
pub host_id: String,
|
|
||||||
pub min: f64,
|
|
||||||
pub max: f64,
|
|
||||||
pub avg: f64,
|
|
||||||
pub time: DateTime<FixedOffset>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for RttCircuitRow {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
host_id: "".to_string(),
|
|
||||||
min: 0.0,
|
|
||||||
max: 0.0,
|
|
||||||
avg: 0.0,
|
|
||||||
time: DateTime::<Utc>::MIN_UTC.into(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,88 +0,0 @@
|
|||||||
use axum::extract::ws::WebSocket;
|
|
||||||
use pgdb::sqlx::{Pool, Postgres};
|
|
||||||
use wasm_pipe_types::SearchResult;
|
|
||||||
|
|
||||||
use crate::web::wss::send_response;
|
|
||||||
|
|
||||||
pub async fn omnisearch(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
socket: &mut WebSocket,
|
|
||||||
key: &str,
|
|
||||||
term: &str,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
tracing::warn!("Searching for {term}");
|
|
||||||
|
|
||||||
let hits = search_devices(cnn, key, term).await;
|
|
||||||
if let Err(e) = &hits {
|
|
||||||
tracing::error!("{e:?}");
|
|
||||||
}
|
|
||||||
let mut hits = hits.unwrap();
|
|
||||||
|
|
||||||
hits.extend(search_ips(cnn, key, term).await?);
|
|
||||||
hits.extend(search_sites(cnn, key, term).await?);
|
|
||||||
|
|
||||||
hits.sort_by(|a,b| a.name.cmp(&b.name));
|
|
||||||
hits.dedup_by(|a,b| a.name == b.name && a.url == b.url);
|
|
||||||
hits.sort_by(|a,b| a.score.partial_cmp(&b.score).unwrap());
|
|
||||||
|
|
||||||
send_response(socket, wasm_pipe_types::WasmResponse::SearchResult { hits }).await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn search_devices(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
term: &str,
|
|
||||||
) -> anyhow::Result<Vec<SearchResult>> {
|
|
||||||
let hits = pgdb::search_devices(cnn, key, term).await?;
|
|
||||||
Ok(hits
|
|
||||||
.iter()
|
|
||||||
.map(|hit| SearchResult {
|
|
||||||
name: hit.circuit_name.to_string(),
|
|
||||||
url: format!("circuit:{}", hit.circuit_id),
|
|
||||||
score: hit.score,
|
|
||||||
icon: "circuit".to_string(),
|
|
||||||
})
|
|
||||||
.collect())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn search_ips(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
term: &str,
|
|
||||||
) -> anyhow::Result<Vec<SearchResult>> {
|
|
||||||
let hits = pgdb::search_ip(cnn, key, term).await?;
|
|
||||||
Ok(hits
|
|
||||||
.iter()
|
|
||||||
.map(|hit| SearchResult {
|
|
||||||
name: hit.circuit_name.to_string(),
|
|
||||||
url: format!("circuit:{}", hit.circuit_id),
|
|
||||||
score: hit.score,
|
|
||||||
icon: "circuit".to_string(),
|
|
||||||
})
|
|
||||||
.collect())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn search_sites(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
term: &str,
|
|
||||||
) -> anyhow::Result<Vec<SearchResult>> {
|
|
||||||
let hits = pgdb::search_sites(cnn, key, term).await?;
|
|
||||||
Ok(hits
|
|
||||||
.iter()
|
|
||||||
.map(|hit| {
|
|
||||||
let t = if hit.site_type.is_empty() {
|
|
||||||
"site".to_string()
|
|
||||||
} else {
|
|
||||||
hit.site_type.to_string()
|
|
||||||
};
|
|
||||||
SearchResult {
|
|
||||||
name: hit.site_name.to_string(),
|
|
||||||
url: format!("{t}:{}", hit.site_name),
|
|
||||||
score: hit.score,
|
|
||||||
icon: t,
|
|
||||||
}})
|
|
||||||
.collect())
|
|
||||||
}
|
|
@ -1,282 +0,0 @@
|
|||||||
use super::time_period::InfluxTimePeriod;
|
|
||||||
use crate::web::wss::send_response;
|
|
||||||
use axum::extract::ws::WebSocket;
|
|
||||||
use chrono::{DateTime, FixedOffset, Utc};
|
|
||||||
use influxdb2::Client;
|
|
||||||
use influxdb2::{models::Query, FromDataPoint};
|
|
||||||
use pgdb::organization_cache::get_org_details;
|
|
||||||
use pgdb::sqlx::{query, Pool, Postgres, Row};
|
|
||||||
use pgdb::OrganizationDetails;
|
|
||||||
use serde::Serialize;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use wasm_pipe_types::WasmResponse;
|
|
||||||
|
|
||||||
pub async fn root_heat_map(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
socket: &mut WebSocket,
|
|
||||||
key: &str,
|
|
||||||
period: InfluxTimePeriod,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
if let Some(org) = get_org_details(cnn, key).await {
|
|
||||||
let influx_url = format!("http://{}:8086", org.influx_host);
|
|
||||||
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
|
|
||||||
|
|
||||||
// Get sites where parent=0 (for this setup)
|
|
||||||
let hosts: Vec<String> =
|
|
||||||
query("SELECT DISTINCT site_name FROM site_tree WHERE key=$1 AND parent=0")
|
|
||||||
.bind(key)
|
|
||||||
.fetch_all(cnn)
|
|
||||||
.await?
|
|
||||||
.iter()
|
|
||||||
.map(|row| row.try_get("site_name").unwrap())
|
|
||||||
.filter(|row| row != "Root")
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let mut host_filter = "filter(fn: (r) => ".to_string();
|
|
||||||
for host in hosts.iter() {
|
|
||||||
host_filter += &format!("r[\"node_name\"] == \"{host}\" or ");
|
|
||||||
}
|
|
||||||
host_filter = host_filter[0..host_filter.len() - 4].to_string();
|
|
||||||
host_filter += ")";
|
|
||||||
|
|
||||||
// Query influx for RTT averages
|
|
||||||
let qs = format!(
|
|
||||||
"from(bucket: \"{}\")
|
|
||||||
|> {}
|
|
||||||
|> filter(fn: (r) => r[\"_measurement\"] == \"tree\")
|
|
||||||
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|
|
||||||
|> filter(fn: (r) => r[\"_field\"] == \"rtt_avg\")
|
|
||||||
|> {}
|
|
||||||
|> {}
|
|
||||||
|> yield(name: \"last\")",
|
|
||||||
org.influx_bucket,
|
|
||||||
period.range(),
|
|
||||||
org.key,
|
|
||||||
host_filter,
|
|
||||||
period.aggregate_window()
|
|
||||||
);
|
|
||||||
//println!("{qs}");
|
|
||||||
|
|
||||||
let query = Query::new(qs);
|
|
||||||
let rows = client.query::<HeatRow>(Some(query)).await;
|
|
||||||
match rows {
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("Error querying InfluxDB (root heat map): {}", e);
|
|
||||||
return Err(anyhow::Error::msg("Unable to query influx"));
|
|
||||||
}
|
|
||||||
Ok(rows) => {
|
|
||||||
let mut sorter: HashMap<String, Vec<(DateTime<FixedOffset>, f64)>> = HashMap::new();
|
|
||||||
for row in rows.iter() {
|
|
||||||
if let Some(hat) = sorter.get_mut(&row.node_name) {
|
|
||||||
hat.push((row.time, row.rtt_avg));
|
|
||||||
} else {
|
|
||||||
sorter.insert(row.node_name.clone(), vec![(row.time, row.rtt_avg)]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
send_response(socket, WasmResponse::RootHeat { data: sorter }).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn site_circuits_heat_map(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
site_name: &str,
|
|
||||||
period: InfluxTimePeriod,
|
|
||||||
sorter: &mut HashMap<String, Vec<(DateTime<FixedOffset>, f64)>>,
|
|
||||||
client: Client,
|
|
||||||
org: &OrganizationDetails,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
// Get sites where parent=site_id (for this setup)
|
|
||||||
let hosts: Vec<(String, String)> =
|
|
||||||
query("SELECT DISTINCT circuit_id, circuit_name FROM shaped_devices WHERE key=$1 AND parent_node=$2")
|
|
||||||
.bind(key)
|
|
||||||
.bind(site_name)
|
|
||||||
.fetch_all(cnn)
|
|
||||||
.await?
|
|
||||||
.iter()
|
|
||||||
.map(|row| (row.try_get("circuit_id").unwrap(), row.try_get("circuit_name").unwrap()))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let mut circuit_map = HashMap::new();
|
|
||||||
for (id, name) in hosts.iter() {
|
|
||||||
circuit_map.insert(id, name);
|
|
||||||
}
|
|
||||||
let hosts = hosts.iter().map(|(id, _)| id).collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let mut host_filter = "filter(fn: (r) => ".to_string();
|
|
||||||
for host in hosts.iter() {
|
|
||||||
host_filter += &format!("r[\"circuit_id\"] == \"{host}\" or ");
|
|
||||||
}
|
|
||||||
host_filter = host_filter[0..host_filter.len() - 4].to_string();
|
|
||||||
host_filter += ")";
|
|
||||||
|
|
||||||
// Query influx for RTT averages
|
|
||||||
let qs = format!(
|
|
||||||
"from(bucket: \"{}\")
|
|
||||||
|> {}
|
|
||||||
|> filter(fn: (r) => r[\"_measurement\"] == \"rtt\")
|
|
||||||
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|
|
||||||
|> filter(fn: (r) => r[\"_field\"] == \"avg\")
|
|
||||||
|> {}
|
|
||||||
|> {}
|
|
||||||
|> yield(name: \"last\")",
|
|
||||||
org.influx_bucket,
|
|
||||||
period.range(),
|
|
||||||
org.key,
|
|
||||||
host_filter,
|
|
||||||
period.aggregate_window()
|
|
||||||
);
|
|
||||||
//println!("{qs}\n\n");
|
|
||||||
if qs.contains("filter(fn: (r))") {
|
|
||||||
// No hosts to filter
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let query = Query::new(qs);
|
|
||||||
let rows = client.query::<HeatCircuitRow>(Some(query)).await;
|
|
||||||
match rows {
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("Error querying InfluxDB (site_circuits_heat_map): {}", e);
|
|
||||||
return Err(anyhow::Error::msg("Unable to query influx"));
|
|
||||||
}
|
|
||||||
Ok(rows) => {
|
|
||||||
for row in rows.iter() {
|
|
||||||
if let Some(name) = circuit_map.get(&row.circuit_id) {
|
|
||||||
if let Some(hat) = sorter.get_mut(*name) {
|
|
||||||
hat.push((row.time, row.avg));
|
|
||||||
} else {
|
|
||||||
sorter.insert(name.to_string(), vec![(row.time, row.avg)]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn site_heat_map(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
socket: &mut WebSocket,
|
|
||||||
key: &str,
|
|
||||||
site_name: &str,
|
|
||||||
period: InfluxTimePeriod,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
if let Some(org) = get_org_details(cnn, key).await {
|
|
||||||
let influx_url = format!("http://{}:8086", org.influx_host);
|
|
||||||
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
|
|
||||||
|
|
||||||
// Get the site index
|
|
||||||
let site_id = pgdb::get_site_id_from_name(cnn, key, site_name).await?;
|
|
||||||
|
|
||||||
// Get sites where parent=site_id (for this setup)
|
|
||||||
let hosts: Vec<String> =
|
|
||||||
query("SELECT DISTINCT site_name FROM site_tree WHERE key=$1 AND parent=$2")
|
|
||||||
.bind(key)
|
|
||||||
.bind(site_id)
|
|
||||||
.fetch_all(cnn)
|
|
||||||
.await?
|
|
||||||
.iter()
|
|
||||||
.map(|row| row.try_get("site_name").unwrap())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let mut host_filter = "filter(fn: (r) => ".to_string();
|
|
||||||
for host in hosts.iter() {
|
|
||||||
host_filter += &format!("r[\"node_name\"] == \"{host}\" or ");
|
|
||||||
}
|
|
||||||
host_filter = host_filter[0..host_filter.len() - 4].to_string();
|
|
||||||
host_filter += ")";
|
|
||||||
|
|
||||||
if host_filter.ends_with("(r))") {
|
|
||||||
host_filter =
|
|
||||||
"filter(fn: (r) => r[\"node_name\"] == \"bad_sheep_no_data\")".to_string();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query influx for RTT averages
|
|
||||||
let qs = format!(
|
|
||||||
"from(bucket: \"{}\")
|
|
||||||
|> {}
|
|
||||||
|> filter(fn: (r) => r[\"_measurement\"] == \"tree\")
|
|
||||||
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|
|
||||||
|> filter(fn: (r) => r[\"_field\"] == \"rtt_avg\")
|
|
||||||
|> {}
|
|
||||||
|> {}
|
|
||||||
|> yield(name: \"last\")",
|
|
||||||
org.influx_bucket,
|
|
||||||
period.range(),
|
|
||||||
org.key,
|
|
||||||
host_filter,
|
|
||||||
period.aggregate_window()
|
|
||||||
);
|
|
||||||
//println!("{qs}\n\n");
|
|
||||||
|
|
||||||
let query = Query::new(qs);
|
|
||||||
let rows = client.query::<HeatRow>(Some(query)).await;
|
|
||||||
match rows {
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("Error querying InfluxDB (site-heat-map): {}", e);
|
|
||||||
return Err(anyhow::Error::msg("Unable to query influx"));
|
|
||||||
}
|
|
||||||
Ok(rows) => {
|
|
||||||
let mut sorter: HashMap<String, Vec<(DateTime<FixedOffset>, f64)>> = HashMap::new();
|
|
||||||
for row in rows.iter() {
|
|
||||||
if let Some(hat) = sorter.get_mut(&row.node_name) {
|
|
||||||
hat.push((row.time, row.rtt_avg));
|
|
||||||
} else {
|
|
||||||
sorter.insert(row.node_name.clone(), vec![(row.time, row.rtt_avg)]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
site_circuits_heat_map(cnn, key, site_name, period, &mut sorter, client, &org)
|
|
||||||
.await?;
|
|
||||||
send_response(socket, WasmResponse::SiteHeat { data: sorter }).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize)]
|
|
||||||
struct HeatMessage {
|
|
||||||
msg: String,
|
|
||||||
data: HashMap<String, Vec<(DateTime<FixedOffset>, f64)>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, FromDataPoint)]
|
|
||||||
pub struct HeatRow {
|
|
||||||
pub node_name: String,
|
|
||||||
pub rtt_avg: f64,
|
|
||||||
pub time: DateTime<FixedOffset>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for HeatRow {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
node_name: "".to_string(),
|
|
||||||
rtt_avg: 0.0,
|
|
||||||
time: DateTime::<Utc>::MIN_UTC.into(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, FromDataPoint)]
|
|
||||||
pub struct HeatCircuitRow {
|
|
||||||
pub circuit_id: String,
|
|
||||||
pub avg: f64,
|
|
||||||
pub time: DateTime<FixedOffset>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for HeatCircuitRow {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
circuit_id: "".to_string(),
|
|
||||||
avg: 0.0,
|
|
||||||
time: DateTime::<Utc>::MIN_UTC.into(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,20 +0,0 @@
|
|||||||
use axum::extract::ws::WebSocket;
|
|
||||||
use pgdb::sqlx::{Pool, Postgres};
|
|
||||||
use serde::Serialize;
|
|
||||||
use wasm_pipe_types::{SiteTree, WasmResponse};
|
|
||||||
use crate::web::wss::send_response;
|
|
||||||
use super::site_tree::tree_to_host;
|
|
||||||
|
|
||||||
#[derive(Serialize)]
|
|
||||||
struct SiteInfoMessage {
|
|
||||||
msg: String,
|
|
||||||
data: SiteTree,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
pub async fn send_site_info(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str, site_id: &str) {
|
|
||||||
if let Ok(host) = pgdb::get_site_info(cnn, key, site_id).await {
|
|
||||||
let host = tree_to_host(host);
|
|
||||||
send_response(socket, WasmResponse::SiteInfo { data: host }).await;
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,47 +0,0 @@
|
|||||||
use axum::extract::ws::WebSocket;
|
|
||||||
use pgdb::sqlx::{Pool, Postgres};
|
|
||||||
|
|
||||||
use crate::web::wss::send_response;
|
|
||||||
|
|
||||||
pub async fn send_site_parents(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
socket: &mut WebSocket,
|
|
||||||
key: &str,
|
|
||||||
site_name: &str,
|
|
||||||
) {
|
|
||||||
if let Ok(parents) = pgdb::get_parent_list(cnn, key, site_name).await {
|
|
||||||
send_response(socket, wasm_pipe_types::WasmResponse::SiteParents { data: parents }).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
let child_result = pgdb::get_child_list(cnn, key, site_name).await;
|
|
||||||
if let Ok(children) = child_result {
|
|
||||||
send_response(socket, wasm_pipe_types::WasmResponse::SiteChildren { data: children }).await;
|
|
||||||
} else {
|
|
||||||
tracing::error!("Error getting children: {:?}", child_result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn send_circuit_parents(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
socket: &mut WebSocket,
|
|
||||||
key: &str,
|
|
||||||
circuit_id: &str,
|
|
||||||
) {
|
|
||||||
if let Ok(parents) = pgdb::get_circuit_parent_list(cnn, key, circuit_id).await {
|
|
||||||
send_response(socket, wasm_pipe_types::WasmResponse::SiteParents { data: parents }).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn send_root_parents(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
socket: &mut WebSocket,
|
|
||||||
key: &str,
|
|
||||||
) {
|
|
||||||
let site_name = "Root";
|
|
||||||
let child_result = pgdb::get_child_list(cnn, key, site_name).await;
|
|
||||||
if let Ok(children) = child_result {
|
|
||||||
send_response(socket, wasm_pipe_types::WasmResponse::SiteChildren { data: children }).await;
|
|
||||||
} else {
|
|
||||||
tracing::error!("Error getting children: {:?}", child_result);
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,31 +0,0 @@
|
|||||||
use axum::extract::ws::WebSocket;
|
|
||||||
use pgdb::{
|
|
||||||
sqlx::{Pool, Postgres},
|
|
||||||
TreeNode,
|
|
||||||
};
|
|
||||||
use wasm_pipe_types::SiteTree;
|
|
||||||
use crate::web::wss::send_response;
|
|
||||||
|
|
||||||
pub async fn send_site_tree(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str, parent: &str) {
|
|
||||||
let tree = pgdb::get_site_tree(cnn, key, parent).await.unwrap();
|
|
||||||
let tree = tree
|
|
||||||
.into_iter()
|
|
||||||
.map(tree_to_host)
|
|
||||||
.collect::<Vec<SiteTree>>();
|
|
||||||
|
|
||||||
send_response(socket, wasm_pipe_types::WasmResponse::SiteTree { data: tree }).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn tree_to_host(row: TreeNode) -> SiteTree {
|
|
||||||
SiteTree {
|
|
||||||
index: row.index,
|
|
||||||
site_name: row.site_name,
|
|
||||||
site_type: row.site_type,
|
|
||||||
parent: row.parent,
|
|
||||||
max_down: row.max_down,
|
|
||||||
max_up: row.max_up,
|
|
||||||
current_down: row.current_down,
|
|
||||||
current_up: row.current_up,
|
|
||||||
current_rtt: row.current_rtt,
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,319 +0,0 @@
|
|||||||
use std::collections::HashMap;
|
|
||||||
mod site_stack;
|
|
||||||
use axum::extract::ws::WebSocket;
|
|
||||||
use futures::future::join_all;
|
|
||||||
use influxdb2::{Client, models::Query};
|
|
||||||
use pgdb::{sqlx::{Pool, Postgres}, organization_cache::get_org_details};
|
|
||||||
use wasm_pipe_types::{ThroughputHost, Throughput};
|
|
||||||
use crate::web::wss::send_response;
|
|
||||||
use self::throughput_row::{ThroughputRow, ThroughputRowBySite, ThroughputRowByCircuit};
|
|
||||||
use super::time_period::InfluxTimePeriod;
|
|
||||||
mod throughput_row;
|
|
||||||
pub use site_stack::send_site_stack_map;
|
|
||||||
|
|
||||||
pub async fn send_throughput_for_all_nodes(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str, period: InfluxTimePeriod) -> anyhow::Result<()> {
|
|
||||||
let nodes = get_throughput_for_all_nodes(cnn, key, period).await?;
|
|
||||||
send_response(socket, wasm_pipe_types::WasmResponse::BitsChart { nodes }).await;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn send_throughput_for_all_nodes_by_site(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str, site_name: String, period: InfluxTimePeriod) -> anyhow::Result<()> {
|
|
||||||
let nodes = get_throughput_for_all_nodes_by_site(cnn, key, period, &site_name).await?;
|
|
||||||
|
|
||||||
send_response(socket, wasm_pipe_types::WasmResponse::BitsChart { nodes }).await;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn send_throughput_for_all_nodes_by_circuit(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str, circuit_id: String, period: InfluxTimePeriod) -> anyhow::Result<()> {
|
|
||||||
let nodes = get_throughput_for_all_nodes_by_circuit(cnn, key, period, &circuit_id).await?;
|
|
||||||
send_response(socket, wasm_pipe_types::WasmResponse::BitsChart { nodes }).await;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn send_throughput_for_node(cnn: &Pool<Postgres>, socket: &mut WebSocket, key: &str, period: InfluxTimePeriod, node_id: String, node_name: String) -> anyhow::Result<()> {
|
|
||||||
let node = get_throughput_for_node(cnn, key, node_id, node_name, period).await?;
|
|
||||||
send_response(socket, wasm_pipe_types::WasmResponse::BitsChart { nodes: vec![node] }).await;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_throughput_for_all_nodes(cnn: &Pool<Postgres>, key: &str, period: InfluxTimePeriod) -> anyhow::Result<Vec<ThroughputHost>> {
|
|
||||||
let node_status = pgdb::node_status(cnn, key).await?;
|
|
||||||
let mut futures = Vec::new();
|
|
||||||
for node in node_status {
|
|
||||||
futures.push(get_throughput_for_node(
|
|
||||||
cnn,
|
|
||||||
key,
|
|
||||||
node.node_id.to_string(),
|
|
||||||
node.node_name.to_string(),
|
|
||||||
period.clone(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
let all_nodes: anyhow::Result<Vec<ThroughputHost>> = join_all(futures).await
|
|
||||||
.into_iter().collect();
|
|
||||||
all_nodes
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_throughput_for_all_nodes_by_site(cnn: &Pool<Postgres>, key: &str, period: InfluxTimePeriod, site_name: &str) -> anyhow::Result<Vec<ThroughputHost>> {
|
|
||||||
let node_status = pgdb::node_status(cnn, key).await?;
|
|
||||||
let mut futures = Vec::new();
|
|
||||||
for node in node_status {
|
|
||||||
futures.push(get_throughput_for_node_by_site(
|
|
||||||
cnn,
|
|
||||||
key,
|
|
||||||
node.node_id.to_string(),
|
|
||||||
node.node_name.to_string(),
|
|
||||||
site_name.to_string(),
|
|
||||||
period.clone(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
let all_nodes: anyhow::Result<Vec<ThroughputHost>> = join_all(futures).await
|
|
||||||
.into_iter().collect();
|
|
||||||
all_nodes
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_throughput_for_all_nodes_by_circuit(cnn: &Pool<Postgres>, key: &str, period: InfluxTimePeriod, circuit_id: &str) -> anyhow::Result<Vec<ThroughputHost>> {
|
|
||||||
let node_status = pgdb::node_status(cnn, key).await?;
|
|
||||||
let mut futures = Vec::new();
|
|
||||||
for node in node_status {
|
|
||||||
futures.push(get_throughput_for_node_by_circuit(
|
|
||||||
cnn,
|
|
||||||
key,
|
|
||||||
node.node_id.to_string(),
|
|
||||||
node.node_name.to_string(),
|
|
||||||
circuit_id.to_string(),
|
|
||||||
period.clone(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
let mut all_nodes = Vec::new();
|
|
||||||
for node in (join_all(futures).await).into_iter().flatten() {
|
|
||||||
all_nodes.extend(node);
|
|
||||||
}
|
|
||||||
Ok(all_nodes)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_throughput_for_node(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
node_id: String,
|
|
||||||
node_name: String,
|
|
||||||
period: InfluxTimePeriod,
|
|
||||||
) -> anyhow::Result<ThroughputHost> {
|
|
||||||
if let Some(org) = get_org_details(cnn, key).await {
|
|
||||||
let influx_url = format!("http://{}:8086", org.influx_host);
|
|
||||||
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
|
|
||||||
|
|
||||||
let qs = format!(
|
|
||||||
"from(bucket: \"{}\")
|
|
||||||
|> {}
|
|
||||||
|> filter(fn: (r) => r[\"_measurement\"] == \"bits\")
|
|
||||||
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|
|
||||||
|> filter(fn: (r) => r[\"host_id\"] == \"{}\")
|
|
||||||
|> {}
|
|
||||||
|> yield(name: \"last\")",
|
|
||||||
org.influx_bucket, period.range(), org.key, node_id, period.aggregate_window()
|
|
||||||
);
|
|
||||||
|
|
||||||
let query = Query::new(qs);
|
|
||||||
let rows = client.query::<ThroughputRow>(Some(query)).await;
|
|
||||||
match rows {
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("Error querying InfluxDB (throughput node): {}", e);
|
|
||||||
return Err(anyhow::Error::msg("Unable to query influx"));
|
|
||||||
}
|
|
||||||
Ok(rows) => {
|
|
||||||
// Parse and send the data
|
|
||||||
//println!("{rows:?}");
|
|
||||||
|
|
||||||
let mut down = Vec::new();
|
|
||||||
let mut up = Vec::new();
|
|
||||||
|
|
||||||
// Fill download
|
|
||||||
for row in rows.iter().filter(|r| r.direction == "down") {
|
|
||||||
down.push(Throughput {
|
|
||||||
value: row.avg,
|
|
||||||
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
|
|
||||||
l: row.min,
|
|
||||||
u: row.max - row.min,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fill upload
|
|
||||||
for row in rows.iter().filter(|r| r.direction == "up") {
|
|
||||||
up.push(Throughput {
|
|
||||||
value: row.avg,
|
|
||||||
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
|
|
||||||
l: row.min,
|
|
||||||
u: row.max - row.min,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
return Ok(ThroughputHost{
|
|
||||||
node_id,
|
|
||||||
node_name,
|
|
||||||
down,
|
|
||||||
up,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(anyhow::Error::msg("Unable to query influx"))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_throughput_for_node_by_site(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
node_id: String,
|
|
||||||
node_name: String,
|
|
||||||
site_name: String,
|
|
||||||
period: InfluxTimePeriod,
|
|
||||||
) -> anyhow::Result<ThroughputHost> {
|
|
||||||
if let Some(org) = get_org_details(cnn, key).await {
|
|
||||||
let influx_url = format!("http://{}:8086", org.influx_host);
|
|
||||||
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
|
|
||||||
|
|
||||||
let qs = format!(
|
|
||||||
"from(bucket: \"{}\")
|
|
||||||
|> {}
|
|
||||||
|> filter(fn: (r) => r[\"_measurement\"] == \"tree\")
|
|
||||||
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|
|
||||||
|> filter(fn: (r) => r[\"host_id\"] == \"{}\")
|
|
||||||
|> filter(fn: (r) => r[\"node_name\"] == \"{}\")
|
|
||||||
|> filter(fn: (r) => r[\"_field\"] == \"bits_avg\" or r[\"_field\"] == \"bits_max\" or r[\"_field\"] == \"bits_min\")
|
|
||||||
|> {}
|
|
||||||
|> yield(name: \"last\")",
|
|
||||||
org.influx_bucket, period.range(), org.key, node_id, site_name, period.aggregate_window()
|
|
||||||
);
|
|
||||||
|
|
||||||
let query = Query::new(qs);
|
|
||||||
//println!("{:?}", query);
|
|
||||||
let rows = client.query::<ThroughputRowBySite>(Some(query)).await;
|
|
||||||
match rows {
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("Error querying InfluxDB (throughput site): {}", e);
|
|
||||||
return Err(anyhow::Error::msg("Unable to query influx"));
|
|
||||||
}
|
|
||||||
Ok(rows) => {
|
|
||||||
// Parse and send the data
|
|
||||||
//println!("{rows:?}");
|
|
||||||
|
|
||||||
let mut down = Vec::new();
|
|
||||||
let mut up = Vec::new();
|
|
||||||
|
|
||||||
// Fill download
|
|
||||||
for row in rows.iter().filter(|r| r.direction == "down") {
|
|
||||||
down.push(Throughput {
|
|
||||||
value: row.bits_avg,
|
|
||||||
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
|
|
||||||
l: row.bits_min,
|
|
||||||
u: row.bits_max - row.bits_min,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fill upload
|
|
||||||
for row in rows.iter().filter(|r| r.direction == "up") {
|
|
||||||
up.push(Throughput {
|
|
||||||
value: row.bits_avg,
|
|
||||||
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
|
|
||||||
l: row.bits_min,
|
|
||||||
u: row.bits_max - row.bits_min,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
return Ok(ThroughputHost{
|
|
||||||
node_id,
|
|
||||||
node_name,
|
|
||||||
down,
|
|
||||||
up,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(anyhow::Error::msg("Unable to query influx"))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_throughput_for_node_by_circuit(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
node_id: String,
|
|
||||||
node_name: String,
|
|
||||||
circuit_id: String,
|
|
||||||
period: InfluxTimePeriod,
|
|
||||||
) -> anyhow::Result<Vec<ThroughputHost>> {
|
|
||||||
if let Some(org) = get_org_details(cnn, key).await {
|
|
||||||
let influx_url = format!("http://{}:8086", org.influx_host);
|
|
||||||
let client = Client::new(influx_url, &org.influx_org, &org.influx_token);
|
|
||||||
|
|
||||||
let qs = format!(
|
|
||||||
"from(bucket: \"{}\")
|
|
||||||
|> {}
|
|
||||||
|> filter(fn: (r) => r[\"_measurement\"] == \"host_bits\")
|
|
||||||
|> filter(fn: (r) => r[\"organization_id\"] == \"{}\")
|
|
||||||
|> filter(fn: (r) => r[\"host_id\"] == \"{}\")
|
|
||||||
|> filter(fn: (r) => r[\"circuit_id\"] == \"{}\")
|
|
||||||
|> filter(fn: (r) => r[\"_field\"] == \"avg\" or r[\"_field\"] == \"max\" or r[\"_field\"] == \"min\")
|
|
||||||
|> {}
|
|
||||||
|> yield(name: \"last\")",
|
|
||||||
org.influx_bucket, period.range(), org.key, node_id, circuit_id, period.aggregate_window()
|
|
||||||
);
|
|
||||||
|
|
||||||
let query = Query::new(qs);
|
|
||||||
//println!("{:?}", query);
|
|
||||||
let rows = client.query::<ThroughputRowByCircuit>(Some(query)).await;
|
|
||||||
match rows {
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!(" (throughput circuit): {}", e);
|
|
||||||
return Err(anyhow::Error::msg("Unable to query influx"));
|
|
||||||
}
|
|
||||||
Ok(rows) => {
|
|
||||||
// Parse and send the data
|
|
||||||
//println!("{rows:?}");
|
|
||||||
|
|
||||||
let mut sorter: HashMap<String, (Vec<Throughput>, Vec<Throughput>)> = HashMap::new();
|
|
||||||
|
|
||||||
// Fill download
|
|
||||||
for row in rows.iter().filter(|r| r.direction == "down") {
|
|
||||||
let tp = Throughput {
|
|
||||||
value: row.avg,
|
|
||||||
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
|
|
||||||
l: row.min,
|
|
||||||
u: row.max - row.min,
|
|
||||||
};
|
|
||||||
if let Some(hat) = sorter.get_mut(&row.ip) {
|
|
||||||
hat.0.push(tp);
|
|
||||||
} else {
|
|
||||||
sorter.insert(row.ip.clone(), (vec![tp], Vec::new()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fill upload
|
|
||||||
for row in rows.iter().filter(|r| r.direction == "up") {
|
|
||||||
let tp = Throughput {
|
|
||||||
value: row.avg,
|
|
||||||
date: row.time.format("%Y-%m-%d %H:%M:%S").to_string(),
|
|
||||||
l: row.min,
|
|
||||||
u: row.max - row.min,
|
|
||||||
};
|
|
||||||
if let Some(hat) = sorter.get_mut(&row.ip) {
|
|
||||||
hat.1.push(tp);
|
|
||||||
} else {
|
|
||||||
sorter.insert(row.ip.clone(), (Vec::new(), vec![tp]));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut result = Vec::new();
|
|
||||||
|
|
||||||
for (ip, (down, up)) in sorter.iter() {
|
|
||||||
result.push(ThroughputHost{
|
|
||||||
node_id: node_id.clone(),
|
|
||||||
node_name: format!("{ip} {node_name}"),
|
|
||||||
down: down.clone(),
|
|
||||||
up: up.clone(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
return Ok(result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(anyhow::Error::msg("Unable to query influx"))
|
|
||||||
}
|
|
@ -1,111 +0,0 @@
|
|||||||
use crate::web::wss::{queries::time_period::InfluxTimePeriod, send_response};
|
|
||||||
use axum::extract::ws::WebSocket;
|
|
||||||
use pgdb::sqlx::{Pool, Postgres, Row};
|
|
||||||
use wasm_pipe_types::Throughput;
|
|
||||||
|
|
||||||
use super::{get_throughput_for_all_nodes_by_circuit, get_throughput_for_all_nodes_by_site};
|
|
||||||
|
|
||||||
pub async fn send_site_stack_map(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
socket: &mut WebSocket,
|
|
||||||
key: &str,
|
|
||||||
period: InfluxTimePeriod,
|
|
||||||
site_id: String,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
let site_index = pgdb::get_site_id_from_name(cnn, key, &site_id).await?;
|
|
||||||
//println!("Site index: {site_index}");
|
|
||||||
|
|
||||||
let sites: Vec<String> =
|
|
||||||
pgdb::sqlx::query("SELECT DISTINCT site_name FROM site_tree WHERE key=$1 AND parent=$2")
|
|
||||||
.bind(key)
|
|
||||||
.bind(site_index)
|
|
||||||
.fetch_all(cnn)
|
|
||||||
.await?
|
|
||||||
.iter()
|
|
||||||
.map(|row| row.try_get("site_name").unwrap())
|
|
||||||
.collect();
|
|
||||||
//println!("{sites:?}");
|
|
||||||
|
|
||||||
let circuits: Vec<(String, String)> =
|
|
||||||
pgdb::sqlx::query("SELECT DISTINCT circuit_id, circuit_name FROM shaped_devices WHERE key=$1 AND parent_node=$2")
|
|
||||||
.bind(key)
|
|
||||||
.bind(site_id)
|
|
||||||
.fetch_all(cnn)
|
|
||||||
.await?
|
|
||||||
.iter()
|
|
||||||
.map(|row| (row.try_get("circuit_id").unwrap(), row.try_get("circuit_name").unwrap()))
|
|
||||||
.collect();
|
|
||||||
//println!("{circuits:?}");
|
|
||||||
|
|
||||||
let mut result = Vec::new();
|
|
||||||
for site in sites.into_iter() {
|
|
||||||
let mut throughput =
|
|
||||||
get_throughput_for_all_nodes_by_site(cnn, key, period.clone(), &site).await?;
|
|
||||||
throughput
|
|
||||||
.iter_mut()
|
|
||||||
.for_each(|row| row.node_name = site.clone());
|
|
||||||
result.extend(throughput);
|
|
||||||
}
|
|
||||||
for circuit in circuits.into_iter() {
|
|
||||||
let mut throughput =
|
|
||||||
get_throughput_for_all_nodes_by_circuit(cnn, key, period.clone(), &circuit.0).await?;
|
|
||||||
throughput
|
|
||||||
.iter_mut()
|
|
||||||
.for_each(|row| row.node_name = circuit.1.clone());
|
|
||||||
result.extend(throughput);
|
|
||||||
}
|
|
||||||
//println!("{result:?}");
|
|
||||||
|
|
||||||
// Sort by total
|
|
||||||
result.sort_by(|a, b| {
|
|
||||||
b.total()
|
|
||||||
.partial_cmp(&a.total())
|
|
||||||
.unwrap_or(std::cmp::Ordering::Equal)
|
|
||||||
});
|
|
||||||
|
|
||||||
// If there are more than 9 entries, create an "others" to handle the remainder
|
|
||||||
if result.len() > 9 {
|
|
||||||
let mut others = wasm_pipe_types::ThroughputHost {
|
|
||||||
node_id: "others".to_string(),
|
|
||||||
node_name: "others".to_string(),
|
|
||||||
down: Vec::new(),
|
|
||||||
up: Vec::new(),
|
|
||||||
};
|
|
||||||
result[0].down.iter().for_each(|x| {
|
|
||||||
others.down.push(Throughput {
|
|
||||||
value: 0.0,
|
|
||||||
date: x.date.clone(),
|
|
||||||
l: 0.0,
|
|
||||||
u: 0.0,
|
|
||||||
});
|
|
||||||
});
|
|
||||||
result[0].up.iter().for_each(|x| {
|
|
||||||
others.up.push(Throughput {
|
|
||||||
value: 0.0,
|
|
||||||
date: x.date.clone(),
|
|
||||||
l: 0.0,
|
|
||||||
u: 0.0,
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
result.iter().skip(9).for_each(|row| {
|
|
||||||
row.down.iter().enumerate().for_each(|(i, x)| {
|
|
||||||
others.down[i].value += x.value;
|
|
||||||
});
|
|
||||||
row.up.iter().enumerate().for_each(|(i, x)| {
|
|
||||||
others.up[i].value += x.value;
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
result.truncate(9);
|
|
||||||
result.push(others);
|
|
||||||
}
|
|
||||||
|
|
||||||
send_response(
|
|
||||||
socket,
|
|
||||||
wasm_pipe_types::WasmResponse::SiteStack { nodes: result },
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
@ -1,71 +0,0 @@
|
|||||||
use chrono::{DateTime, FixedOffset, Utc};
|
|
||||||
use influxdb2::FromDataPoint;
|
|
||||||
|
|
||||||
#[derive(Debug, FromDataPoint)]
|
|
||||||
pub struct ThroughputRow {
|
|
||||||
pub direction: String,
|
|
||||||
pub host_id: String,
|
|
||||||
pub min: f64,
|
|
||||||
pub max: f64,
|
|
||||||
pub avg: f64,
|
|
||||||
pub time: DateTime<FixedOffset>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for ThroughputRow {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
direction: "".to_string(),
|
|
||||||
host_id: "".to_string(),
|
|
||||||
min: 0.0,
|
|
||||||
max: 0.0,
|
|
||||||
avg: 0.0,
|
|
||||||
time: DateTime::<Utc>::MIN_UTC.into(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, FromDataPoint)]
|
|
||||||
pub struct ThroughputRowBySite {
|
|
||||||
pub direction: String,
|
|
||||||
pub host_id: String,
|
|
||||||
pub bits_min: f64,
|
|
||||||
pub bits_max: f64,
|
|
||||||
pub bits_avg: f64,
|
|
||||||
pub time: DateTime<FixedOffset>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for ThroughputRowBySite {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
direction: "".to_string(),
|
|
||||||
host_id: "".to_string(),
|
|
||||||
bits_min: 0.0,
|
|
||||||
bits_max: 0.0,
|
|
||||||
bits_avg: 0.0,
|
|
||||||
time: DateTime::<Utc>::MIN_UTC.into(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, FromDataPoint)]
|
|
||||||
pub struct ThroughputRowByCircuit {
|
|
||||||
pub direction: String,
|
|
||||||
pub ip: String,
|
|
||||||
pub min: f64,
|
|
||||||
pub max: f64,
|
|
||||||
pub avg: f64,
|
|
||||||
pub time: DateTime<FixedOffset>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for ThroughputRowByCircuit {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
direction: "".to_string(),
|
|
||||||
ip: "".to_string(),
|
|
||||||
min: 0.0,
|
|
||||||
max: 0.0,
|
|
||||||
avg: 0.0,
|
|
||||||
time: DateTime::<Utc>::MIN_UTC.into(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,55 +0,0 @@
|
|||||||
#[derive(Clone)]
|
|
||||||
pub struct InfluxTimePeriod {
|
|
||||||
start: String,
|
|
||||||
aggregate: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl InfluxTimePeriod {
|
|
||||||
pub fn new(period: &str) -> Self {
|
|
||||||
let start = match period {
|
|
||||||
"5m" => "-5m",
|
|
||||||
"15m" => "-15m",
|
|
||||||
"1h" => "-60m",
|
|
||||||
"6h" => "-360m",
|
|
||||||
"12h" => "-720m",
|
|
||||||
"24h" => "-1440m",
|
|
||||||
"7d" => "-10080m",
|
|
||||||
"28d" => "-40320m",
|
|
||||||
_ => "-5m",
|
|
||||||
};
|
|
||||||
|
|
||||||
let aggregate = match period {
|
|
||||||
"5m" => "10s",
|
|
||||||
"15m" => "30s",
|
|
||||||
"1h" => "1m",
|
|
||||||
"6h" => "6m",
|
|
||||||
"12h" => "12m",
|
|
||||||
"24h" => "24m",
|
|
||||||
"7d" => "210m",
|
|
||||||
"28d" => "4h",
|
|
||||||
_ => "10s",
|
|
||||||
};
|
|
||||||
|
|
||||||
Self {
|
|
||||||
start: start.to_string(),
|
|
||||||
aggregate: aggregate.to_string(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn range(&self) -> String {
|
|
||||||
format!("range(start: {})", self.start)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn aggregate_window(&self) -> String {
|
|
||||||
format!(
|
|
||||||
"aggregateWindow(every: {}, fn: mean, createEmpty: false)",
|
|
||||||
self.aggregate
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<&String> for InfluxTimePeriod {
|
|
||||||
fn from(period: &String) -> Self {
|
|
||||||
Self::new(period)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,20 +0,0 @@
|
|||||||
# Site Build
|
|
||||||
|
|
||||||
This folder compiles and packages the website used by `lts_node`. It
|
|
||||||
needs to be compiled and made available to the `lts_node` process.
|
|
||||||
|
|
||||||
Steps: TBA
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
|
|
||||||
To run the build (as opposed to shipping pre-built files), you need to
|
|
||||||
install `esbuild` and `npm` (ugh). You can do this with:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
(change directory to site_build folder)
|
|
||||||
sudo apt-get install npm
|
|
||||||
npm install
|
|
||||||
````
|
|
||||||
|
|
||||||
You can run the build manually by running `./esbuild.sh` in this
|
|
||||||
directory.
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -1,18 +0,0 @@
|
|||||||
<!DOCTYPE html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<meta charset="utf-8">
|
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
|
||||||
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
|
|
||||||
<title>LibreQoS Long-Term Statistics</title>
|
|
||||||
<link rel="shortcut icon" href="#" />
|
|
||||||
|
|
||||||
<script type="module" src="/app.js"></script>
|
|
||||||
<link rel="stylesheet" href="style.css" />
|
|
||||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css" />
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<div id="main"></div>
|
|
||||||
<footer>Copyright © 2023 LibreQoS</footer>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
Binary file not shown.
@ -1,18 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "pgdb"
|
|
||||||
version = "0.1.0"
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
once_cell = "1"
|
|
||||||
thiserror = "1"
|
|
||||||
env_logger = "0"
|
|
||||||
log = "0"
|
|
||||||
lqos_bus = { path = "../../lqos_bus" }
|
|
||||||
sqlx = { version = "0.6.3", features = [ "runtime-tokio-rustls", "postgres" ] }
|
|
||||||
futures = "0"
|
|
||||||
uuid = { version = "1", features = ["v4", "fast-rng" ] }
|
|
||||||
influxdb2 = "0"
|
|
||||||
sha2 = "0"
|
|
||||||
dashmap = "5"
|
|
||||||
lqos_utils = { path = "../../lqos_utils" }
|
|
@ -1,184 +0,0 @@
|
|||||||
-- Creates the initial tables for the license server
|
|
||||||
|
|
||||||
-- We're using Trigrams for faster text search
|
|
||||||
CREATE EXTENSION pg_trgm;
|
|
||||||
|
|
||||||
CREATE TABLE public.licenses (
|
|
||||||
key character varying(254) NOT NULL,
|
|
||||||
stats_host integer NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE public.organizations (
|
|
||||||
key character varying(254) NOT NULL,
|
|
||||||
name character varying(254) NOT NULL,
|
|
||||||
influx_host character varying(254) NOT NULL,
|
|
||||||
influx_org character varying(254) NOT NULL,
|
|
||||||
influx_token character varying(254) NOT NULL,
|
|
||||||
influx_bucket character varying(254) NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE public.shaper_nodes (
|
|
||||||
license_key character varying(254) NOT NULL,
|
|
||||||
node_id character varying(254) NOT NULL,
|
|
||||||
node_name character varying(254) NOT NULL,
|
|
||||||
last_seen timestamp without time zone DEFAULT now() NOT NULL,
|
|
||||||
public_key bytea
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE public.site_tree
|
|
||||||
(
|
|
||||||
key character varying(254) NOT NULL,
|
|
||||||
site_name character varying(254) NOT NULL,
|
|
||||||
host_id character varying(254) NOT NULL,
|
|
||||||
index integer NOT NULL,
|
|
||||||
parent integer NOT NULL,
|
|
||||||
site_type character varying(32),
|
|
||||||
max_up integer NOT NULL DEFAULT 0,
|
|
||||||
max_down integer NOT NULL DEFAULT 0,
|
|
||||||
current_up integer NOT NULL DEFAULT 0,
|
|
||||||
current_down integer NOT NULL DEFAULT 0,
|
|
||||||
current_rtt integer NOT NULL DEFAULT 0,
|
|
||||||
PRIMARY KEY (key, site_name, host_id)
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE public.shaped_devices
|
|
||||||
(
|
|
||||||
key character varying(254) NOT NULL,
|
|
||||||
node_id character varying(254) NOT NULL,
|
|
||||||
circuit_id character varying(254) NOT NULL,
|
|
||||||
device_id character varying(254) NOT NULL,
|
|
||||||
circuit_name character varying(254) NOT NULL DEFAULT '',
|
|
||||||
device_name character varying(254) NOT NULL DEFAULT '',
|
|
||||||
parent_node character varying(254) NOT NULL DEFAULT '',
|
|
||||||
mac character varying(254) NOT NULL DEFAULT '',
|
|
||||||
download_min_mbps integer NOT NULL DEFAULT 0,
|
|
||||||
upload_min_mbps integer NOT NULL DEFAULT 0,
|
|
||||||
download_max_mbps integer NOT NULL DEFAULT 0,
|
|
||||||
upload_max_mbps integer NOT NULL DEFAULT 0,
|
|
||||||
comment text,
|
|
||||||
PRIMARY KEY (key, node_id, circuit_id, device_id)
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE public.shaped_device_ip
|
|
||||||
(
|
|
||||||
key character varying(254) COLLATE pg_catalog."default" NOT NULL,
|
|
||||||
node_id character varying(254) COLLATE pg_catalog."default" NOT NULL,
|
|
||||||
circuit_id character varying(254) COLLATE pg_catalog."default" NOT NULL,
|
|
||||||
ip_range character varying(254) COLLATE pg_catalog."default" NOT NULL,
|
|
||||||
subnet integer NOT NULL,
|
|
||||||
CONSTRAINT shaped_device_ip_pkey PRIMARY KEY (key, node_id, circuit_id, ip_range, subnet)
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE public.stats_hosts (
|
|
||||||
id integer NOT NULL,
|
|
||||||
ip_address character varying(128) NOT NULL,
|
|
||||||
can_accept_new_clients boolean NOT NULL DEFAULT true,
|
|
||||||
influx_host character varying(128) NOT NULL,
|
|
||||||
api_key character varying(255) NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE SEQUENCE public.stats_hosts_id_seq
|
|
||||||
AS integer
|
|
||||||
START WITH 1
|
|
||||||
INCREMENT BY 1
|
|
||||||
NO MINVALUE
|
|
||||||
NO MAXVALUE
|
|
||||||
CACHE 1;
|
|
||||||
|
|
||||||
ALTER TABLE ONLY public.stats_hosts
|
|
||||||
ALTER COLUMN id SET DEFAULT nextval('public.stats_hosts_id_seq'::regclass);
|
|
||||||
|
|
||||||
ALTER TABLE ONLY public.licenses
|
|
||||||
ADD CONSTRAINT licenses_pkey PRIMARY KEY (key);
|
|
||||||
|
|
||||||
ALTER TABLE ONLY public.organizations
|
|
||||||
ADD CONSTRAINT pk_organizations PRIMARY KEY (key);
|
|
||||||
|
|
||||||
ALTER TABLE ONLY public.shaper_nodes
|
|
||||||
ADD CONSTRAINT shaper_nodes_pk PRIMARY KEY (license_key, node_id);
|
|
||||||
|
|
||||||
ALTER TABLE ONLY public.stats_hosts
|
|
||||||
ADD CONSTRAINT stats_hosts_pkey PRIMARY KEY (id);
|
|
||||||
|
|
||||||
ALTER TABLE ONLY public.organizations
|
|
||||||
ADD CONSTRAINT organizations_license_fk FOREIGN KEY (key) REFERENCES public.licenses(key);
|
|
||||||
|
|
||||||
ALTER TABLE ONLY public.licenses
|
|
||||||
ADD CONSTRAINT stats_host_fk FOREIGN KEY (stats_host) REFERENCES public.stats_hosts(id) NOT VALID;
|
|
||||||
|
|
||||||
CREATE TABLE public.logins
|
|
||||||
(
|
|
||||||
key character varying(254) NOT NULL,
|
|
||||||
username character varying(64) NOT NULL,
|
|
||||||
password_hash character varying(64) NOT NULL,
|
|
||||||
nicename character varying(64) NOT NULL,
|
|
||||||
CONSTRAINT pk_logins_licenses PRIMARY KEY (key, username),
|
|
||||||
CONSTRAINT fk_login_licenses FOREIGN KEY (key)
|
|
||||||
REFERENCES public.licenses (key) MATCH SIMPLE
|
|
||||||
ON UPDATE NO ACTION
|
|
||||||
ON DELETE NO ACTION
|
|
||||||
NOT VALID
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE public.active_tokens
|
|
||||||
(
|
|
||||||
key character varying(254) NOT NULL,
|
|
||||||
token character varying(254) NOT NULL,
|
|
||||||
username character varying(64) NOT NULL,
|
|
||||||
expires timestamp without time zone NOT NULL DEFAULT NOW() + interval '2 hours',
|
|
||||||
PRIMARY KEY (token)
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE public.uisp_devices_ext
|
|
||||||
(
|
|
||||||
key character varying(254) NOT NULL,
|
|
||||||
device_id character varying(254) NOT NULL,
|
|
||||||
name character varying(254) NOT NULL DEFAULT '',
|
|
||||||
model character varying(254) NOT NULL DEFAULT '',
|
|
||||||
firmware character varying(64) NOT NULL DEFAULT '',
|
|
||||||
status character varying(64) NOT NULL DEFAULT '',
|
|
||||||
mode character varying(64) NOT NULL DEFAULT '',
|
|
||||||
channel_width integer NOT NULL DEFAULT 0,
|
|
||||||
tx_power integer NOT NULL DEFAULT 0,
|
|
||||||
PRIMARY KEY (key, device_id)
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE public.uisp_devices_interfaces
|
|
||||||
(
|
|
||||||
key character varying(254) NOT NULL,
|
|
||||||
device_id character varying(254) NOT NULL,
|
|
||||||
id serial NOT NULL,
|
|
||||||
name character varying(64) NOT NULL DEFAULT '',
|
|
||||||
mac character varying(64) NOT NULL DEFAULT '',
|
|
||||||
status character varying(64) NOT NULL DEFAULT '',
|
|
||||||
speed character varying(64) NOT NULL DEFAULT '',
|
|
||||||
ip_list character varying(254) NOT NULL DEFAULT '',
|
|
||||||
PRIMARY KEY (key, device_id, id)
|
|
||||||
);
|
|
||||||
|
|
||||||
---- Indices
|
|
||||||
|
|
||||||
CREATE INDEX site_tree_key
|
|
||||||
ON public.site_tree USING btree
|
|
||||||
(key ASC NULLS LAST)
|
|
||||||
;
|
|
||||||
|
|
||||||
CREATE INDEX site_tree_key_parent
|
|
||||||
ON public.site_tree USING btree
|
|
||||||
(key ASC NULLS LAST, parent ASC NULLS LAST)
|
|
||||||
;
|
|
||||||
|
|
||||||
CREATE INDEX shaped_devices_key_circuit_id
|
|
||||||
ON public.shaped_devices USING btree
|
|
||||||
(key ASC NULLS LAST, circuit_id ASC NULLS LAST)
|
|
||||||
;
|
|
||||||
|
|
||||||
CREATE INDEX stats_host_ip
|
|
||||||
ON public.stats_hosts USING btree
|
|
||||||
(ip_address ASC NULLS LAST)
|
|
||||||
;
|
|
||||||
|
|
||||||
CREATE INDEX shaper_nodes_license_key_idx
|
|
||||||
ON public.shaper_nodes USING btree
|
|
||||||
(license_key ASC NULLS LAST)
|
|
||||||
;
|
|
@ -1,83 +0,0 @@
|
|||||||
use sqlx::{Pool, Postgres, FromRow};
|
|
||||||
|
|
||||||
use crate::license::StatsHostError;
|
|
||||||
|
|
||||||
#[derive(Debug, FromRow)]
|
|
||||||
pub struct CircuitInfo {
|
|
||||||
pub circuit_name: String,
|
|
||||||
pub device_id: String,
|
|
||||||
pub device_name: String,
|
|
||||||
pub parent_node: String,
|
|
||||||
pub mac: String,
|
|
||||||
pub download_min_mbps: i32,
|
|
||||||
pub download_max_mbps: i32,
|
|
||||||
pub upload_min_mbps: i32,
|
|
||||||
pub upload_max_mbps: i32,
|
|
||||||
pub comment: String,
|
|
||||||
pub ip_range: String,
|
|
||||||
pub subnet: i32,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_circuit_info(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
circuit_id: &str,
|
|
||||||
) -> Result<Vec<CircuitInfo>, StatsHostError> {
|
|
||||||
const SQL: &str = "SELECT circuit_name, device_id, device_name, parent_node, mac, download_min_mbps, download_max_mbps, upload_min_mbps, upload_max_mbps, comment, ip_range, subnet FROM shaped_devices INNER JOIN shaped_device_ip ON shaped_device_ip.key = shaped_devices.key AND shaped_device_ip.circuit_id = shaped_devices.circuit_id WHERE shaped_devices.key=$1 AND shaped_devices.circuit_id=$2";
|
|
||||||
|
|
||||||
sqlx::query_as::<_, CircuitInfo>(SQL)
|
|
||||||
.bind(key)
|
|
||||||
.bind(circuit_id)
|
|
||||||
.fetch_all(cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, FromRow)]
|
|
||||||
pub struct DeviceInfoExt {
|
|
||||||
pub device_id: String,
|
|
||||||
pub name: String,
|
|
||||||
pub model: String,
|
|
||||||
pub firmware: String,
|
|
||||||
pub status: String,
|
|
||||||
pub mode: String,
|
|
||||||
pub channel_width: i32,
|
|
||||||
pub tx_power: i32,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
pub async fn get_device_info_ext(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
device_id: &str,
|
|
||||||
) -> Result<DeviceInfoExt, StatsHostError> {
|
|
||||||
sqlx::query_as::<_, DeviceInfoExt>("SELECT device_id, name, model, firmware, status, mode, channel_width, tx_power FROM uisp_devices_ext WHERE key=$1 AND device_id=$2")
|
|
||||||
.bind(key)
|
|
||||||
.bind(device_id)
|
|
||||||
.fetch_one(cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, FromRow)]
|
|
||||||
pub struct DeviceInterfaceExt {
|
|
||||||
pub name: String,
|
|
||||||
pub mac: String,
|
|
||||||
pub status: String,
|
|
||||||
pub speed: String,
|
|
||||||
pub ip_list: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_device_interfaces_ext(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
device_id: &str,
|
|
||||||
) -> Result<Vec<DeviceInterfaceExt>, StatsHostError>
|
|
||||||
{
|
|
||||||
sqlx::query_as::<_, DeviceInterfaceExt>("SELECT name, mac, status, speed, ip_list FROM uis_devices_interfaces WHERE key=$1 AND device_id=$2")
|
|
||||||
.bind(key)
|
|
||||||
.bind(device_id)
|
|
||||||
.fetch_all(cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))
|
|
||||||
}
|
|
@ -1,37 +0,0 @@
|
|||||||
//! Manages access to the safely stored connection string, in `/etc/lqdb`.
|
|
||||||
//! Failure to obtain a database connection is a fatal error.
|
|
||||||
//! The connection string is read once, on the first call to `get_connection_string()`.
|
|
||||||
//! Please be careful to never include `/etc/lqdb` in any git commits.
|
|
||||||
|
|
||||||
use std::path::Path;
|
|
||||||
use std::fs::File;
|
|
||||||
use std::io::Read;
|
|
||||||
use once_cell::sync::Lazy;
|
|
||||||
|
|
||||||
pub static CONNECTION_STRING: Lazy<String> = Lazy::new(read_connection_string);
|
|
||||||
|
|
||||||
/// Read the connection string from /etc/lqdb
|
|
||||||
/// Called by the `Lazy` on CONNECTION_STRING
|
|
||||||
fn read_connection_string() -> String {
|
|
||||||
let path = Path::new("/etc/lqdb");
|
|
||||||
if !path.exists() {
|
|
||||||
log::error!("{} does not exist", path.display());
|
|
||||||
panic!("{} does not exist", path.display());
|
|
||||||
}
|
|
||||||
|
|
||||||
match File::open(path) {
|
|
||||||
Ok(mut file) => {
|
|
||||||
let mut buf = String::new();
|
|
||||||
if let Ok(_size) = file.read_to_string(&mut buf) {
|
|
||||||
buf
|
|
||||||
} else {
|
|
||||||
log::error!("Could not read {}", path.display());
|
|
||||||
panic!("Could not read {}", path.display());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
log::error!("Could not open {}: {e:?}", path.display());
|
|
||||||
panic!("Could not open {}: {e:?}", path.display());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,4 +0,0 @@
|
|||||||
mod connection_string;
|
|
||||||
mod pool;
|
|
||||||
|
|
||||||
pub use pool::get_connection_pool;
|
|
@ -1,13 +0,0 @@
|
|||||||
use sqlx::{postgres::PgPoolOptions, Postgres, Pool};
|
|
||||||
use super::connection_string::CONNECTION_STRING;
|
|
||||||
|
|
||||||
/// Obtain a connection pool to the database.
|
|
||||||
///
|
|
||||||
/// # Arguments
|
|
||||||
/// * `max_connections` - The maximum number of connections to the database.
|
|
||||||
pub async fn get_connection_pool(max_connections: u32) -> Result<Pool<Postgres>, sqlx::Error> {
|
|
||||||
PgPoolOptions::new()
|
|
||||||
.max_connections(max_connections)
|
|
||||||
.connect(&CONNECTION_STRING)
|
|
||||||
.await
|
|
||||||
}
|
|
@ -1,59 +0,0 @@
|
|||||||
use sqlx::{Pool, Postgres, Row};
|
|
||||||
use crate::license::StatsHostError;
|
|
||||||
|
|
||||||
pub async fn add_stats_host(cnn: Pool<Postgres>, hostname: String, influx_host: String, api_key: String) -> Result<i64, StatsHostError> {
|
|
||||||
// Does the stats host already exist? We don't want duplicates
|
|
||||||
let row = sqlx::query("SELECT COUNT(*) AS count FROM stats_hosts WHERE ip_address=$1")
|
|
||||||
.bind(&hostname)
|
|
||||||
.fetch_one(&cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
let count: i64 = row.try_get("count").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
if count != 0 {
|
|
||||||
return Err(StatsHostError::HostAlreadyExists);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the new primary key
|
|
||||||
log::info!("Getting new primary key for stats host");
|
|
||||||
let row = sqlx::query("SELECT NEXTVAL('stats_hosts_id_seq') AS id")
|
|
||||||
.fetch_one(&cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
|
|
||||||
let new_id: i64 = row.try_get("id").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
// Insert the stats host
|
|
||||||
log::info!("Inserting new stats host: {} ({})", hostname, new_id);
|
|
||||||
sqlx::query("INSERT INTO stats_hosts (id, ip_address, can_accept_new_clients, influx_host, api_key) VALUES ($1, $2, $3, $4, $5)")
|
|
||||||
.bind(new_id)
|
|
||||||
.bind(&hostname)
|
|
||||||
.bind(true)
|
|
||||||
.bind(&influx_host)
|
|
||||||
.bind(&api_key)
|
|
||||||
.execute(&cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
Ok(new_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
const FIND_STATS_HOST: &str = "SELECT a.id AS id, a.influx_host AS influx_host, a.api_key AS api_key
|
|
||||||
FROM stats_hosts a
|
|
||||||
WHERE can_accept_new_clients = true
|
|
||||||
ORDER BY (SELECT COUNT(organizations.\"key\") FROM organizations WHERE a.influx_host = influx_host)
|
|
||||||
LIMIT 1";
|
|
||||||
|
|
||||||
pub async fn find_emptiest_stats_host(cnn: Pool<Postgres>) -> Result<(i32, String, String), StatsHostError> {
|
|
||||||
let row = sqlx::query(FIND_STATS_HOST)
|
|
||||||
.fetch_one(&cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
let id: i32 = row.try_get("id").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
let influx_host: String = row.try_get("influx_host").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
let api_key: String = row.try_get("api_key").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
Ok((id, influx_host, api_key))
|
|
||||||
}
|
|
@ -1,26 +0,0 @@
|
|||||||
mod connection;
|
|
||||||
mod license;
|
|
||||||
mod organization;
|
|
||||||
mod hosts;
|
|
||||||
mod orchestrator;
|
|
||||||
mod logins;
|
|
||||||
mod nodes;
|
|
||||||
mod search;
|
|
||||||
mod tree;
|
|
||||||
mod circuit;
|
|
||||||
pub mod organization_cache;
|
|
||||||
|
|
||||||
pub mod sqlx {
|
|
||||||
pub use sqlx::*;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub use connection::get_connection_pool;
|
|
||||||
pub use license::{get_stats_host_for_key, insert_or_update_node_public_key, fetch_public_key};
|
|
||||||
pub use organization::{OrganizationDetails, get_organization};
|
|
||||||
pub use hosts::add_stats_host;
|
|
||||||
pub use orchestrator::create_free_trial;
|
|
||||||
pub use logins::{try_login, delete_user, add_user, refresh_token, token_to_credentials};
|
|
||||||
pub use nodes::{new_stats_arrived, node_status, NodeStatus};
|
|
||||||
pub use search::*;
|
|
||||||
pub use tree::*;
|
|
||||||
pub use circuit::*;
|
|
@ -1,87 +0,0 @@
|
|||||||
//! Handles license checks from the `license_server`.
|
|
||||||
|
|
||||||
use sqlx::{Pool, Postgres, Row};
|
|
||||||
use thiserror::Error;
|
|
||||||
|
|
||||||
pub async fn get_stats_host_for_key(cnn: Pool<Postgres>, key: &str) -> Result<String, StatsHostError> {
|
|
||||||
let row = sqlx::query("SELECT ip_address FROM licenses INNER JOIN stats_hosts ON stats_hosts.id = licenses.stats_host WHERE key=$1")
|
|
||||||
.bind(key)
|
|
||||||
.fetch_one(&cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
let ip_address: &str = row.try_get("ip_address").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
log::info!("Found stats host for key: {}", ip_address);
|
|
||||||
Ok(ip_address.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn insert_or_update_node_public_key(cnn: Pool<Postgres>, node_id: &str, node_name: &str, license_key: &str, public_key: &[u8]) -> Result<(), StatsHostError> {
|
|
||||||
let row = sqlx::query("SELECT COUNT(*) AS count FROM shaper_nodes WHERE node_id=$1 AND license_key=$2")
|
|
||||||
.bind(node_id)
|
|
||||||
.bind(license_key)
|
|
||||||
.fetch_one(&cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
let count: i64 = row.try_get("count").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
match count {
|
|
||||||
0 => {
|
|
||||||
// Insert
|
|
||||||
log::info!("Inserting new node: {} {}", node_id, license_key);
|
|
||||||
sqlx::query("INSERT INTO shaper_nodes (license_key, node_id, public_key, node_name) VALUES ($1, $2, $3, $4)")
|
|
||||||
.bind(license_key)
|
|
||||||
.bind(node_id)
|
|
||||||
.bind(public_key)
|
|
||||||
.bind(node_name)
|
|
||||||
.execute(&cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
}
|
|
||||||
1 => {
|
|
||||||
// Update
|
|
||||||
log::info!("Updating node: {} {}", node_id, license_key);
|
|
||||||
sqlx::query("UPDATE shaper_nodes SET public_key=$1, last_seen=NOW(), node_name=$4 WHERE node_id=$2 AND license_key=$3")
|
|
||||||
.bind(public_key)
|
|
||||||
.bind(node_id)
|
|
||||||
.bind(license_key)
|
|
||||||
.bind(node_name)
|
|
||||||
.execute(&cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
log::error!("Found multiple nodes with the same node_id and license_key");
|
|
||||||
return Err(StatsHostError::DatabaseError("Found multiple nodes with the same node_id and license_key".to_string()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn fetch_public_key(cnn: Pool<Postgres>, license_key: &str, node_id: &str) -> Result<Vec<u8>, StatsHostError> {
|
|
||||||
let row = sqlx::query("SELECT public_key FROM shaper_nodes WHERE license_key=$1 AND node_id=$2")
|
|
||||||
.bind(license_key)
|
|
||||||
.bind(node_id)
|
|
||||||
.fetch_one(&cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
let public_key: Vec<u8> = row.try_get("public_key").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
Ok(public_key)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Error)]
|
|
||||||
pub enum StatsHostError {
|
|
||||||
#[error("Database error occurred")]
|
|
||||||
DatabaseError(String),
|
|
||||||
#[error("Host already exists")]
|
|
||||||
HostAlreadyExists,
|
|
||||||
#[error("Organization already exists")]
|
|
||||||
OrganizationAlreadyExists,
|
|
||||||
#[error("No available stats hosts")]
|
|
||||||
NoStatsHostsAvailable,
|
|
||||||
#[error("InfluxDB Error")]
|
|
||||||
InfluxError(String),
|
|
||||||
#[error("No such login")]
|
|
||||||
InvalidLogin,
|
|
||||||
}
|
|
@ -1,28 +0,0 @@
|
|||||||
use sqlx::{Pool, Postgres};
|
|
||||||
|
|
||||||
use crate::license::StatsHostError;
|
|
||||||
|
|
||||||
use super::hasher::hash_password;
|
|
||||||
|
|
||||||
pub async fn delete_user(cnn: Pool<Postgres>, key: &str, username: &str) -> Result<(), StatsHostError> {
|
|
||||||
sqlx::query("DELETE FROM logins WHERE key = $1 AND username = $2")
|
|
||||||
.bind(key)
|
|
||||||
.bind(username)
|
|
||||||
.execute(&cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn add_user(cnn: Pool<Postgres>, key: &str, username: &str, password: &str, nicename: &str) -> Result<(), StatsHostError> {
|
|
||||||
let password = hash_password(password);
|
|
||||||
sqlx::query("INSERT INTO logins (key, username, password_hash, nicename) VALUES ($1, $2, $3, $4)")
|
|
||||||
.bind(key)
|
|
||||||
.bind(username)
|
|
||||||
.bind(password)
|
|
||||||
.bind(nicename)
|
|
||||||
.execute(&cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
@ -1,9 +0,0 @@
|
|||||||
use sha2::Sha256;
|
|
||||||
use sha2::Digest;
|
|
||||||
|
|
||||||
pub(crate) fn hash_password(password: &str) -> String {
|
|
||||||
let salted = format!("!x{password}_SaltIsGoodForYou");
|
|
||||||
let mut sha256 = Sha256::new();
|
|
||||||
sha256.update(salted);
|
|
||||||
format!("{:X}", sha256.finalize())
|
|
||||||
}
|
|
@ -1,35 +0,0 @@
|
|||||||
use sqlx::{Pool, Postgres, Row};
|
|
||||||
use uuid::Uuid;
|
|
||||||
use crate::license::StatsHostError;
|
|
||||||
use super::{hasher::hash_password, token_cache::create_token};
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct LoginDetails {
|
|
||||||
pub token: String,
|
|
||||||
pub license: String,
|
|
||||||
pub name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn try_login(cnn: Pool<Postgres>, key: &str, username: &str, password: &str) -> Result<LoginDetails, StatsHostError> {
|
|
||||||
let password = hash_password(password);
|
|
||||||
|
|
||||||
let row = sqlx::query("SELECT nicename FROM logins WHERE key = $1 AND username = $2 AND password_hash = $3")
|
|
||||||
.bind(key)
|
|
||||||
.bind(username)
|
|
||||||
.bind(password)
|
|
||||||
.fetch_one(&cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
let nicename: String = row.try_get("nicename").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
let uuid = Uuid::new_v4().to_string();
|
|
||||||
let details = LoginDetails {
|
|
||||||
token: uuid,
|
|
||||||
name: nicename,
|
|
||||||
license: key.to_string(),
|
|
||||||
};
|
|
||||||
|
|
||||||
create_token(&cnn, &details, key, username).await?;
|
|
||||||
|
|
||||||
Ok(details)
|
|
||||||
}
|
|
@ -1,8 +0,0 @@
|
|||||||
mod hasher;
|
|
||||||
mod login;
|
|
||||||
mod add_del;
|
|
||||||
mod token_cache;
|
|
||||||
|
|
||||||
pub use login::{LoginDetails, try_login};
|
|
||||||
pub use add_del::{add_user, delete_user};
|
|
||||||
pub use token_cache::{refresh_token, token_to_credentials};
|
|
@ -1,96 +0,0 @@
|
|||||||
use super::LoginDetails;
|
|
||||||
use crate::license::StatsHostError;
|
|
||||||
use dashmap::DashMap;
|
|
||||||
use lqos_utils::unix_time::unix_now;
|
|
||||||
use once_cell::sync::Lazy;
|
|
||||||
use sqlx::{Pool, Postgres, Row};
|
|
||||||
|
|
||||||
static TOKEN_CACHE: Lazy<DashMap<String, TokenDetails>> = Lazy::new(DashMap::new);
|
|
||||||
|
|
||||||
struct TokenDetails {
|
|
||||||
last_seen: u64,
|
|
||||||
last_refreshed: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn create_token(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
details: &LoginDetails,
|
|
||||||
key: &str,
|
|
||||||
username: &str,
|
|
||||||
) -> Result<(), StatsHostError> {
|
|
||||||
sqlx::query("INSERT INTO active_tokens (token, key, username) VALUES ($1, $2, $3)")
|
|
||||||
.bind(&details.token)
|
|
||||||
.bind(key)
|
|
||||||
.bind(username)
|
|
||||||
.execute(cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
let now = unix_now().unwrap_or(0);
|
|
||||||
TOKEN_CACHE.insert(
|
|
||||||
details.token.clone(),
|
|
||||||
TokenDetails {
|
|
||||||
last_seen: now,
|
|
||||||
last_refreshed: now,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn refresh_token(cnn: Pool<Postgres>, token_id: &str) -> Result<(), StatsHostError> {
|
|
||||||
if let Some(mut token) = TOKEN_CACHE.get_mut(token_id) {
|
|
||||||
let now = unix_now().unwrap_or(0);
|
|
||||||
token.last_seen = now;
|
|
||||||
let age = now - token.last_refreshed;
|
|
||||||
|
|
||||||
if age > 300 {
|
|
||||||
token.last_refreshed = now;
|
|
||||||
sqlx::query("UPDATE active_tokens SET last_seen = NOW() WHERE token = $1")
|
|
||||||
.bind(token_id)
|
|
||||||
.execute(&cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(StatsHostError::DatabaseError("Unauthorized".to_string()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn token_to_credentials(
|
|
||||||
cnn: Pool<Postgres>,
|
|
||||||
token_id: &str,
|
|
||||||
) -> Result<LoginDetails, StatsHostError> {
|
|
||||||
let row = sqlx::query("SELECT key, username FROM active_tokens WHERE token = $1")
|
|
||||||
.bind(token_id)
|
|
||||||
.fetch_one(&cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
let key: String = row
|
|
||||||
.try_get("key")
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
let username: String = row
|
|
||||||
.try_get("username")
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
let row = sqlx::query("SELECT nicename FROM logins WHERE key = $1 AND username = $2")
|
|
||||||
.bind(&key)
|
|
||||||
.bind(username)
|
|
||||||
.fetch_one(&cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
let nicename: String = row
|
|
||||||
.try_get("nicename")
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
let details = LoginDetails {
|
|
||||||
token: token_id.to_string(),
|
|
||||||
name: nicename,
|
|
||||||
license: key,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(details)
|
|
||||||
}
|
|
@ -1,36 +0,0 @@
|
|||||||
use sqlx::{Pool, Postgres};
|
|
||||||
|
|
||||||
use crate::license::StatsHostError;
|
|
||||||
|
|
||||||
pub async fn new_stats_arrived(cnn: Pool<Postgres>, license: &str, node: &str) -> Result<(), StatsHostError> {
|
|
||||||
// Does the node exist?
|
|
||||||
sqlx::query("UPDATE shaper_nodes SET last_seen=NOW() WHERE license_key=$1 AND node_id=$2")
|
|
||||||
.bind(license)
|
|
||||||
.bind(node)
|
|
||||||
.execute(&cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, sqlx::FromRow, Debug)]
|
|
||||||
pub struct NodeStatus {
|
|
||||||
pub node_id: String,
|
|
||||||
pub node_name: String,
|
|
||||||
pub last_seen: i32,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn node_status(cnn: &Pool<Postgres>, license: &str) -> Result<Vec<NodeStatus>, StatsHostError> {
|
|
||||||
let res = sqlx::query_as::<_, NodeStatus>("SELECT node_id, node_name, extract('epoch' from NOW()-last_seen)::integer AS last_seen FROM shaper_nodes WHERE license_key=$1")
|
|
||||||
.bind(license)
|
|
||||||
.fetch_all(cnn)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
match res {
|
|
||||||
Err(e) => {
|
|
||||||
log::error!("Unable to get node status: {}", e);
|
|
||||||
Err(StatsHostError::DatabaseError(e.to_string()))
|
|
||||||
}
|
|
||||||
Ok(rows) => Ok(rows)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,107 +0,0 @@
|
|||||||
use crate::{
|
|
||||||
hosts::find_emptiest_stats_host, license::StatsHostError,
|
|
||||||
organization::does_organization_name_exist,
|
|
||||||
};
|
|
||||||
use influxdb2::{
|
|
||||||
models::{PostBucketRequest, RetentionRule, Status},
|
|
||||||
Client,
|
|
||||||
};
|
|
||||||
use sqlx::{Pool, Postgres};
|
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
pub async fn create_free_trial(
|
|
||||||
cnn: Pool<Postgres>,
|
|
||||||
organization_name: &str,
|
|
||||||
) -> Result<String, StatsHostError> {
|
|
||||||
// Check that no organization of this name exists already (error if they exist)
|
|
||||||
if does_organization_name_exist(cnn.clone(), organization_name).await? {
|
|
||||||
return Err(StatsHostError::OrganizationAlreadyExists);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the most empty, available stats host (error if none)
|
|
||||||
let (stats_host_id, influx_url, api_key) = find_emptiest_stats_host(cnn.clone()).await?;
|
|
||||||
|
|
||||||
// Generate a new license key
|
|
||||||
let uuid = Uuid::new_v4().to_string();
|
|
||||||
|
|
||||||
// Connect to Influx, and create a new bucket and API token
|
|
||||||
create_bucket(&influx_url, &api_key, organization_name).await?;
|
|
||||||
|
|
||||||
// As a transaction:
|
|
||||||
// - Insert into licenses
|
|
||||||
// - Insert into organizations
|
|
||||||
let mut tx = cnn.begin().await.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
sqlx::query("INSERT INTO licenses (key, stats_host) VALUES ($1, $2);")
|
|
||||||
.bind(&uuid)
|
|
||||||
.bind(stats_host_id)
|
|
||||||
.execute(&mut tx)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
sqlx::query("INSERT INTO organizations (key, name, influx_host, influx_org, influx_token, influx_bucket) VALUES ($1, $2, $3, $4, $5, $6);")
|
|
||||||
.bind(&uuid)
|
|
||||||
.bind(organization_name)
|
|
||||||
.bind(&influx_url)
|
|
||||||
.bind("LibreQoS")
|
|
||||||
.bind(api_key)
|
|
||||||
.bind(organization_name)
|
|
||||||
.execute(&mut tx)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
tx.commit().await.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
Ok(uuid)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn create_bucket(
|
|
||||||
influx_host: &str,
|
|
||||||
api_key: &str,
|
|
||||||
org_name: &str,
|
|
||||||
) -> Result<(), StatsHostError> {
|
|
||||||
let influx_url = format!("http://{influx_host}:8086");
|
|
||||||
let client = Client::new(influx_url, "LibreQoS", api_key);
|
|
||||||
|
|
||||||
// Is Influx alive and well?
|
|
||||||
match client.health().await {
|
|
||||||
Err(e) => return Err(StatsHostError::InfluxError(e.to_string())),
|
|
||||||
Ok(health) => {
|
|
||||||
if health.status == Status::Fail {
|
|
||||||
return Err(StatsHostError::InfluxError(
|
|
||||||
"Influx health check failed".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Translate the organization name into an id
|
|
||||||
let org = client.list_organizations(influxdb2::api::organization::ListOrganizationRequest {
|
|
||||||
descending: None,
|
|
||||||
limit: None,
|
|
||||||
offset: None,
|
|
||||||
org: None,
|
|
||||||
org_id: None,
|
|
||||||
user_id: None
|
|
||||||
}).await.map_err(|e| StatsHostError::InfluxError(e.to_string()))?;
|
|
||||||
let org_id = org.orgs[0].id.as_ref().unwrap();
|
|
||||||
|
|
||||||
// Let's make the bucket
|
|
||||||
if let Err(e) = client
|
|
||||||
.create_bucket(Some(PostBucketRequest {
|
|
||||||
org_id: org_id.to_string(),
|
|
||||||
name: org_name.to_string(),
|
|
||||||
description: None,
|
|
||||||
rp: None,
|
|
||||||
retention_rules: vec![RetentionRule::new(
|
|
||||||
influxdb2::models::retention_rule::Type::Expire,
|
|
||||||
604800,
|
|
||||||
)], // 1 Week
|
|
||||||
}))
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
log::error!("Error creating bucket: {}", e);
|
|
||||||
return Err(StatsHostError::InfluxError(e.to_string()));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
@ -1,38 +0,0 @@
|
|||||||
use sqlx::{Pool, Postgres, Row};
|
|
||||||
use crate::license::StatsHostError;
|
|
||||||
|
|
||||||
#[derive(Clone, sqlx::FromRow, Debug)]
|
|
||||||
pub struct OrganizationDetails {
|
|
||||||
pub key: String,
|
|
||||||
pub name: String,
|
|
||||||
pub influx_host: String,
|
|
||||||
pub influx_org: String,
|
|
||||||
pub influx_token: String,
|
|
||||||
pub influx_bucket: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_organization(cnn: &Pool<Postgres>, key: &str) -> Result<OrganizationDetails, StatsHostError> {
|
|
||||||
let mut row = sqlx::query_as::<_, OrganizationDetails>("SELECT * FROM organizations WHERE key=$1")
|
|
||||||
.bind(key)
|
|
||||||
.fetch_one(cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
// For local development - comment out
|
|
||||||
if row.influx_host == "127.0.0.1" {
|
|
||||||
row.influx_host = "146.190.156.69".to_string();
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(row)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn does_organization_name_exist(cnn: Pool<Postgres>, name: &str) -> Result<bool, StatsHostError> {
|
|
||||||
let row = sqlx::query("SELECT COUNT(*) AS count FROM organizations WHERE name=$1")
|
|
||||||
.bind(name)
|
|
||||||
.fetch_one(&cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
let count: i64 = row.try_get("count").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
Ok(count > 0)
|
|
||||||
}
|
|
@ -1,25 +0,0 @@
|
|||||||
use std::{collections::HashMap, sync::RwLock};
|
|
||||||
use once_cell::sync::Lazy;
|
|
||||||
use sqlx::{Pool, Postgres};
|
|
||||||
use crate::{OrganizationDetails, get_organization};
|
|
||||||
|
|
||||||
static ORG_CACHE: Lazy<RwLock<HashMap<String, OrganizationDetails>>> = Lazy::new(|| {
|
|
||||||
RwLock::new(HashMap::new())
|
|
||||||
});
|
|
||||||
|
|
||||||
pub async fn get_org_details(cnn: &Pool<Postgres>, key: &str) -> Option<OrganizationDetails> {
|
|
||||||
{ // Safety scope - lock is dropped on exit
|
|
||||||
let cache = ORG_CACHE.read().unwrap();
|
|
||||||
if let Some(org) = cache.get(key) {
|
|
||||||
return Some(org.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// We can be certain that we don't have a dangling lock now.
|
|
||||||
// Upgrade to a write lock and try to fetch the org details.
|
|
||||||
if let Ok(org) = get_organization(cnn, key).await {
|
|
||||||
let mut cache = ORG_CACHE.write().unwrap();
|
|
||||||
cache.insert(key.to_string(), org.clone());
|
|
||||||
return Some(org);
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
@ -1,101 +0,0 @@
|
|||||||
use sqlx::{Pool, Postgres, FromRow};
|
|
||||||
|
|
||||||
use crate::license::StatsHostError;
|
|
||||||
|
|
||||||
#[derive(Debug, FromRow)]
|
|
||||||
pub struct DeviceHit {
|
|
||||||
pub circuit_id: String,
|
|
||||||
pub circuit_name: String,
|
|
||||||
pub score: f64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, FromRow)]
|
|
||||||
pub struct SiteHit {
|
|
||||||
pub site_name: String,
|
|
||||||
pub site_type: String,
|
|
||||||
pub score: f64,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn search_devices(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
term: &str,
|
|
||||||
) -> Result<Vec<DeviceHit>, StatsHostError> {
|
|
||||||
|
|
||||||
const SQL: &str = "with input as (select $1 as q)
|
|
||||||
select circuit_id, circuit_name, 1 - (input.q <<-> (circuit_name || ' ' || device_name || ' ' || mac)) as score
|
|
||||||
from shaped_devices, input
|
|
||||||
where
|
|
||||||
key = $2 AND
|
|
||||||
(input.q <<-> (circuit_name || ' ' || device_name || ' ' || mac)) < 0.15
|
|
||||||
order by input.q <<-> (circuit_name || ' ' || device_name || ' ' || mac)";
|
|
||||||
|
|
||||||
let rows = sqlx::query_as::<_, DeviceHit>(SQL)
|
|
||||||
.bind(term)
|
|
||||||
.bind(key)
|
|
||||||
.fetch_all(cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()));
|
|
||||||
|
|
||||||
if let Err(e) = &rows {
|
|
||||||
log::error!("{e:?}");
|
|
||||||
}
|
|
||||||
|
|
||||||
rows
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn search_ip(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
term: &str,
|
|
||||||
) -> Result<Vec<DeviceHit>, StatsHostError> {
|
|
||||||
const SQL: &str = "with input as (select $1 as q)
|
|
||||||
select shaped_device_ip.circuit_id AS circuit_id,
|
|
||||||
circuit_name || ' (' || shaped_device_ip.ip_range || '/' || shaped_device_ip.subnet || ')' AS circuit_name,
|
|
||||||
1 - (input.q <<-> shaped_device_ip.ip_range) AS score
|
|
||||||
FROM shaped_device_ip INNER JOIN shaped_devices
|
|
||||||
ON (shaped_devices.circuit_id = shaped_device_ip.circuit_id AND shaped_devices.key = shaped_device_ip.key), input
|
|
||||||
WHERE shaped_device_ip.key = $2
|
|
||||||
AND (input.q <<-> shaped_device_ip.ip_range) < 0.15
|
|
||||||
ORDER BY (input.q <<-> shaped_device_ip.ip_range)";
|
|
||||||
|
|
||||||
let rows = sqlx::query_as::<_, DeviceHit>(SQL)
|
|
||||||
.bind(term)
|
|
||||||
.bind(key)
|
|
||||||
.fetch_all(cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()));
|
|
||||||
|
|
||||||
if let Err(e) = &rows {
|
|
||||||
log::error!("{e:?}");
|
|
||||||
}
|
|
||||||
|
|
||||||
rows
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn search_sites(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
term: &str,
|
|
||||||
) -> Result<Vec<SiteHit>, StatsHostError> {
|
|
||||||
const SQL: &str = "with input as (select $1 as q)
|
|
||||||
select site_name, site_type, 1 - (input.q <<-> site_name) as score
|
|
||||||
from site_tree, input
|
|
||||||
where
|
|
||||||
key = $2 AND
|
|
||||||
(input.q <<-> site_name) < 0.15
|
|
||||||
order by input.q <<-> site_name";
|
|
||||||
|
|
||||||
let rows = sqlx::query_as::<_, SiteHit>(SQL)
|
|
||||||
.bind(term)
|
|
||||||
.bind(key)
|
|
||||||
.fetch_all(cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()));
|
|
||||||
|
|
||||||
if let Err(e) = &rows {
|
|
||||||
log::error!("{e:?}");
|
|
||||||
}
|
|
||||||
|
|
||||||
rows
|
|
||||||
}
|
|
@ -1,183 +0,0 @@
|
|||||||
use sqlx::{FromRow, Pool, Postgres, Row};
|
|
||||||
use crate::license::StatsHostError;
|
|
||||||
|
|
||||||
#[derive(Debug, FromRow)]
|
|
||||||
pub struct TreeNode {
|
|
||||||
pub site_name: String,
|
|
||||||
pub index: i32,
|
|
||||||
pub parent: i32,
|
|
||||||
pub site_type: String,
|
|
||||||
pub max_down: i32,
|
|
||||||
pub max_up: i32,
|
|
||||||
pub current_down: i32,
|
|
||||||
pub current_up: i32,
|
|
||||||
pub current_rtt: i32,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_site_tree(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
host_id: &str,
|
|
||||||
) -> Result<Vec<TreeNode>, StatsHostError> {
|
|
||||||
sqlx::query_as::<_, TreeNode>("SELECT site_name, index, parent, site_type, max_down, max_up, current_down, current_up, current_rtt FROM site_tree WHERE key = $1 AND host_id=$2")
|
|
||||||
.bind(key)
|
|
||||||
.bind(host_id)
|
|
||||||
.fetch_all(cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_site_info(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
site_name: &str,
|
|
||||||
) -> Result<TreeNode, StatsHostError> {
|
|
||||||
sqlx::query_as::<_, TreeNode>("SELECT site_name, index, parent, site_type, max_down, max_up, current_down, current_up, current_rtt FROM site_tree WHERE key = $1 AND site_name=$2")
|
|
||||||
.bind(key)
|
|
||||||
.bind(site_name)
|
|
||||||
.fetch_one(cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_site_id_from_name(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
site_name: &str,
|
|
||||||
) -> Result<i32, StatsHostError> {
|
|
||||||
if site_name == "root" {
|
|
||||||
return Ok(0);
|
|
||||||
}
|
|
||||||
let site_id_db = sqlx::query("SELECT index FROM site_tree WHERE key = $1 AND site_name=$2")
|
|
||||||
.bind(key)
|
|
||||||
.bind(site_name)
|
|
||||||
.fetch_one(cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
let site_id: i32 = site_id_db.try_get("index").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
Ok(site_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_parent_list(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
site_name: &str,
|
|
||||||
) -> Result<Vec<(String, String)>, StatsHostError> {
|
|
||||||
let mut result = Vec::new();
|
|
||||||
|
|
||||||
// Get the site index
|
|
||||||
let site_id_db = sqlx::query("SELECT index FROM site_tree WHERE key = $1 AND site_name=$2")
|
|
||||||
.bind(key)
|
|
||||||
.bind(site_name)
|
|
||||||
.fetch_one(cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
let mut site_id: i32 = site_id_db.try_get("index").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
// Get the parent list
|
|
||||||
while site_id != 0 {
|
|
||||||
let parent_db = sqlx::query("SELECT site_name, parent, site_type FROM site_tree WHERE key = $1 AND index=$2")
|
|
||||||
.bind(key)
|
|
||||||
.bind(site_id)
|
|
||||||
.fetch_one(cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
let parent: String = parent_db.try_get("site_name").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
let site_type: String = parent_db.try_get("site_type").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
site_id = parent_db.try_get("parent").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
result.push((site_type, parent));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_child_list(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
site_name: &str,
|
|
||||||
) -> Result<Vec<(String, String, String)>, StatsHostError> {
|
|
||||||
let mut result = Vec::new();
|
|
||||||
|
|
||||||
// Get the site index
|
|
||||||
let site_id_db = sqlx::query("SELECT index FROM site_tree WHERE key = $1 AND site_name=$2")
|
|
||||||
.bind(key)
|
|
||||||
.bind(site_name)
|
|
||||||
.fetch_one(cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
let site_id: i32 = site_id_db.try_get("index").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
// Add child sites
|
|
||||||
let child_sites = sqlx::query("SELECT site_name, parent, site_type FROM site_tree WHERE key=$1 AND parent=$2")
|
|
||||||
.bind(key)
|
|
||||||
.bind(site_id)
|
|
||||||
.fetch_all(cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
for child in child_sites {
|
|
||||||
let child_name: String = child.try_get("site_name").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
let child_type: String = child.try_get("site_type").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
result.push((child_type, child_name.clone(), child_name));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add child shaper nodes
|
|
||||||
let child_circuits = sqlx::query("SELECT circuit_id, circuit_name FROM shaped_devices WHERE key=$1 AND parent_node=$2")
|
|
||||||
.bind(key)
|
|
||||||
.bind(site_name)
|
|
||||||
.fetch_all(cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
for child in child_circuits {
|
|
||||||
let child_name: String = child.try_get("circuit_name").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
let child_id: String = child.try_get("circuit_id").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
result.push(("circuit".to_string(), child_id, child_name));
|
|
||||||
}
|
|
||||||
|
|
||||||
result.sort_by(|a, b| a.2.cmp(&b.2));
|
|
||||||
|
|
||||||
Ok(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_circuit_parent_list(
|
|
||||||
cnn: &Pool<Postgres>,
|
|
||||||
key: &str,
|
|
||||||
circuit_id: &str,
|
|
||||||
) -> Result<Vec<(String, String)>, StatsHostError> {
|
|
||||||
let mut result = Vec::new();
|
|
||||||
|
|
||||||
// Get the site name to start at
|
|
||||||
let site_name : String = sqlx::query("SELECT parent_node FROM shaped_devices WHERE key = $1 AND circuit_id= $2")
|
|
||||||
.bind(key)
|
|
||||||
.bind(circuit_id)
|
|
||||||
.fetch_one(cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?
|
|
||||||
.get(0);
|
|
||||||
|
|
||||||
// Get the site index
|
|
||||||
let site_id_db = sqlx::query("SELECT index FROM site_tree WHERE key = $1 AND site_name=$2")
|
|
||||||
.bind(key)
|
|
||||||
.bind(site_name)
|
|
||||||
.fetch_one(cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
let mut site_id: i32 = site_id_db.try_get("index").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
|
|
||||||
// Get the parent list
|
|
||||||
while site_id != 0 {
|
|
||||||
let parent_db = sqlx::query("SELECT site_name, parent, site_type FROM site_tree WHERE key = $1 AND index=$2")
|
|
||||||
.bind(key)
|
|
||||||
.bind(site_id)
|
|
||||||
.fetch_one(cnn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
let parent: String = parent_db.try_get("site_name").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
let site_type: String = parent_db.try_get("site_type").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
site_id = parent_db.try_get("parent").map_err(|e| StatsHostError::DatabaseError(e.to_string()))?;
|
|
||||||
result.push((site_type, parent));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(result)
|
|
||||||
}
|
|
@ -1,20 +0,0 @@
|
|||||||
# Site Build
|
|
||||||
|
|
||||||
This folder compiles and packages the website used by `lts_node`. It
|
|
||||||
needs to be compiled and made available to the `lts_node` process.
|
|
||||||
|
|
||||||
Steps: TBA
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
|
|
||||||
To run the build (as opposed to shipping pre-built files), you need to
|
|
||||||
install `esbuild` and `npm` (ugh). You can do this with:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
(change directory to site_build folder)
|
|
||||||
sudo apt-get install npm
|
|
||||||
npm install
|
|
||||||
````
|
|
||||||
|
|
||||||
You can run the build manually by running `./esbuild.sh` in this
|
|
||||||
directory.
|
|
@ -1,13 +0,0 @@
|
|||||||
#!/usr/bin/env node
|
|
||||||
import * as esbuild from 'esbuild'
|
|
||||||
|
|
||||||
await esbuild.build({
|
|
||||||
entryPoints: ['src/app.ts', 'src/style.css'],
|
|
||||||
bundle: true,
|
|
||||||
minify: true,
|
|
||||||
sourcemap: true,
|
|
||||||
// target: ['chrome58', 'firefox57', 'safari11', 'edge16'],
|
|
||||||
outdir: 'output/',
|
|
||||||
loader: { '.html': 'text'},
|
|
||||||
format: 'esm',
|
|
||||||
})
|
|
@ -1,10 +0,0 @@
|
|||||||
{
|
|
||||||
"dependencies": {
|
|
||||||
"@types/bootstrap": "^5.2.6",
|
|
||||||
"@types/echarts": "^4.9.17",
|
|
||||||
"bootstrap": "^5.2.3",
|
|
||||||
"echarts": "^5.4.2",
|
|
||||||
"esbuild": "^0.17.17",
|
|
||||||
"mermaid": "^10.1.0"
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,58 +0,0 @@
|
|||||||
import html from './template.html';
|
|
||||||
import { Page } from '../page'
|
|
||||||
import { MenuPage } from '../menu/menu';
|
|
||||||
import { Component } from '../components/component';
|
|
||||||
import { ThroughputSiteChart } from '../components/throughput_site';
|
|
||||||
import { SiteInfo } from '../components/site_info';
|
|
||||||
import { RttChartSite } from '../components/rtt_site';
|
|
||||||
import { RttHistoSite } from '../components/rtt_histo_site';
|
|
||||||
import { SiteBreadcrumbs } from '../components/site_breadcrumbs';
|
|
||||||
import { SiteHeat } from '../components/site_heat';
|
|
||||||
import { SiteStackChart } from '../components/site_stack';
|
|
||||||
|
|
||||||
export class AccessPointPage implements Page {
|
|
||||||
menu: MenuPage;
|
|
||||||
components: Component[];
|
|
||||||
siteId: string;
|
|
||||||
|
|
||||||
constructor(siteId: string) {
|
|
||||||
this.siteId = siteId;
|
|
||||||
this.menu = new MenuPage("sitetreeDash");
|
|
||||||
let container = document.getElementById('mainContent');
|
|
||||||
if (container) {
|
|
||||||
container.innerHTML = html;
|
|
||||||
}
|
|
||||||
this.components = [
|
|
||||||
new SiteInfo(siteId),
|
|
||||||
new ThroughputSiteChart(siteId),
|
|
||||||
new RttChartSite(siteId),
|
|
||||||
new RttHistoSite(),
|
|
||||||
new SiteBreadcrumbs(siteId),
|
|
||||||
new SiteHeat(siteId),
|
|
||||||
new SiteStackChart(siteId),
|
|
||||||
];
|
|
||||||
}
|
|
||||||
|
|
||||||
wireup() {
|
|
||||||
this.components.forEach(component => {
|
|
||||||
component.wireup();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
ontick(): void {
|
|
||||||
this.menu.ontick();
|
|
||||||
this.components.forEach(component => {
|
|
||||||
component.ontick();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
onmessage(event: any) {
|
|
||||||
if (event.msg) {
|
|
||||||
this.menu.onmessage(event);
|
|
||||||
|
|
||||||
this.components.forEach(component => {
|
|
||||||
component.onmessage(event);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,60 +0,0 @@
|
|||||||
<div class="container">
|
|
||||||
<div class="row">
|
|
||||||
<div class="col-12" id="siteName">
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div class="row">
|
|
||||||
<div class="col-6">
|
|
||||||
<div class="card">
|
|
||||||
<div class="card-body" id="siteInfo">
|
|
||||||
Details
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div class="col-6">
|
|
||||||
<div class="card">
|
|
||||||
<div class="card-body">
|
|
||||||
<div id="throughputChart" style="height: 250px"></div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="row">
|
|
||||||
<div class="col-6">
|
|
||||||
<div class="card">
|
|
||||||
<div class="card-body">
|
|
||||||
<div id="rttChart" style="height: 250px"></div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="col-6">
|
|
||||||
<div class="card">
|
|
||||||
<div class="card-body">
|
|
||||||
<div id="rttHisto" style="height: 250px"></div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="row">
|
|
||||||
<div class="col-12">
|
|
||||||
<div class="card">
|
|
||||||
<div class="card-body">
|
|
||||||
<div id="siteStack" style="height: 250px"></div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="row">
|
|
||||||
<div class="col-12">
|
|
||||||
<div class="card">
|
|
||||||
<div class="card-body">
|
|
||||||
<div id="rootHeat" style="height: 900px"></div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
@ -1,57 +0,0 @@
|
|||||||
import 'bootstrap/dist/css/bootstrap.css';
|
|
||||||
import 'bootstrap/dist/js/bootstrap.js';
|
|
||||||
import { SiteRouter } from './router';
|
|
||||||
import { Bus, onAuthFail, onAuthOk, onMessage } from './bus';
|
|
||||||
import { Auth } from './auth';
|
|
||||||
import init from '../wasm/wasm_pipe.js';
|
|
||||||
|
|
||||||
await init();
|
|
||||||
console.log("WASM loaded");
|
|
||||||
|
|
||||||
declare global {
|
|
||||||
interface Window {
|
|
||||||
router: SiteRouter;
|
|
||||||
bus: Bus;
|
|
||||||
auth: Auth;
|
|
||||||
login: any;
|
|
||||||
graphPeriod: string;
|
|
||||||
changeGraphPeriod: any;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
(window as any).onAuthFail = onAuthFail;
|
|
||||||
(window as any).onAuthOk = onAuthOk;
|
|
||||||
(window as any).onMessage = onMessage;
|
|
||||||
|
|
||||||
window.auth = new Auth;
|
|
||||||
window.bus = new Bus();
|
|
||||||
window.router = new SiteRouter();
|
|
||||||
window.bus.connect();
|
|
||||||
window.router.initialRoute();
|
|
||||||
let graphPeriod = localStorage.getItem('graphPeriod');
|
|
||||||
if (!graphPeriod) {
|
|
||||||
graphPeriod = "5m";
|
|
||||||
localStorage.setItem('graphPeriod', graphPeriod);
|
|
||||||
}
|
|
||||||
window.graphPeriod = graphPeriod;
|
|
||||||
window.changeGraphPeriod = (period: string) => changeGraphPeriod(period);
|
|
||||||
|
|
||||||
// 10 Second interval for refreshing the page
|
|
||||||
window.setInterval(() => {
|
|
||||||
window.bus.updateConnected();
|
|
||||||
window.router.ontick();
|
|
||||||
let btn = document.getElementById("graphPeriodBtn") as HTMLButtonElement;
|
|
||||||
btn.innerText = window.graphPeriod;
|
|
||||||
}, 10000);
|
|
||||||
|
|
||||||
// Faster interval for tracking the WSS connection
|
|
||||||
window.setInterval(() => {
|
|
||||||
window.bus.updateConnected();
|
|
||||||
window.bus.sendQueue();
|
|
||||||
}, 500);
|
|
||||||
|
|
||||||
function changeGraphPeriod(period: string) {
|
|
||||||
window.graphPeriod = period;
|
|
||||||
localStorage.setItem('graphPeriod', period);
|
|
||||||
let btn = document.getElementById("graphPeriodBtn") as HTMLButtonElement;
|
|
||||||
btn.innerText = period;
|
|
||||||
}
|
|
@ -1,15 +0,0 @@
|
|||||||
export class Auth {
|
|
||||||
hasCredentials: boolean;
|
|
||||||
token: string | undefined;
|
|
||||||
|
|
||||||
constructor() {
|
|
||||||
let token = localStorage.getItem("token");
|
|
||||||
if (token) {
|
|
||||||
this.hasCredentials = true;
|
|
||||||
this.token = token;
|
|
||||||
} else {
|
|
||||||
this.hasCredentials = false;
|
|
||||||
this.token = undefined;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,160 +0,0 @@
|
|||||||
import { connect_wasm_pipe, is_wasm_connected, send_wss_queue } from "../wasm/wasm_pipe";
|
|
||||||
import { Auth } from "./auth";
|
|
||||||
import { SiteRouter } from "./router";
|
|
||||||
|
|
||||||
export class Bus {
|
|
||||||
ws: WebSocket;
|
|
||||||
connected: boolean;
|
|
||||||
|
|
||||||
constructor() {
|
|
||||||
const currentUrlWithoutAnchors = window.location.href.split('#')[0].replace("https://", "").replace("http://", "");
|
|
||||||
const url = "ws://" + currentUrlWithoutAnchors + "ws";
|
|
||||||
this.connected = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
updateConnected() {
|
|
||||||
//console.log("Connection via WASM: " + is_wasm_connected());
|
|
||||||
let indicator = document.getElementById("connStatus");
|
|
||||||
if (indicator && is_wasm_connected()) {
|
|
||||||
indicator.style.color = "green";
|
|
||||||
} else if (indicator) {
|
|
||||||
indicator.style.color = "red";
|
|
||||||
retryConnect();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sendQueue() {
|
|
||||||
send_wss_queue();
|
|
||||||
}
|
|
||||||
|
|
||||||
connect() {
|
|
||||||
const currentUrlWithoutAnchors = window.location.href.split('#')[0].replace("https://", "").replace("http://", "");
|
|
||||||
const url = "ws://" + currentUrlWithoutAnchors + "ws";
|
|
||||||
connect_wasm_pipe(url);
|
|
||||||
}
|
|
||||||
|
|
||||||
getToken(): string {
|
|
||||||
if (window.auth.hasCredentials && window.auth.token) {
|
|
||||||
return window.auth.token;
|
|
||||||
} else {
|
|
||||||
return "";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
requestThroughputChartCircuit(circuit_id: string) {
|
|
||||||
let request = {
|
|
||||||
msg: "throughputChartCircuit",
|
|
||||||
period: window.graphPeriod,
|
|
||||||
circuit_id: decodeURI(circuit_id),
|
|
||||||
};
|
|
||||||
let json = JSON.stringify(request);
|
|
||||||
this.ws.send(json);
|
|
||||||
}
|
|
||||||
|
|
||||||
requestThroughputChartSite(site_id: string) {
|
|
||||||
let request = {
|
|
||||||
msg: "throughputChartSite",
|
|
||||||
period: window.graphPeriod,
|
|
||||||
site_id: decodeURI(site_id),
|
|
||||||
};
|
|
||||||
let json = JSON.stringify(request);
|
|
||||||
this.ws.send(json);
|
|
||||||
}
|
|
||||||
|
|
||||||
requestRttChartSite(site_id: string) {
|
|
||||||
let request = {
|
|
||||||
msg: "rttChartSite",
|
|
||||||
period: window.graphPeriod,
|
|
||||||
site_id: decodeURI(site_id),
|
|
||||||
};
|
|
||||||
let json = JSON.stringify(request);
|
|
||||||
this.ws.send(json);
|
|
||||||
}
|
|
||||||
|
|
||||||
requestRttChartCircuit(circuit_id: string) {
|
|
||||||
let request = {
|
|
||||||
msg: "rttChartCircuit",
|
|
||||||
period: window.graphPeriod,
|
|
||||||
circuit_id: decodeURI(circuit_id),
|
|
||||||
};
|
|
||||||
let json = JSON.stringify(request);
|
|
||||||
this.ws.send(json);
|
|
||||||
}
|
|
||||||
|
|
||||||
requestSiteHeat(site_id: string) {
|
|
||||||
let request = {
|
|
||||||
msg: "siteHeat",
|
|
||||||
period: window.graphPeriod,
|
|
||||||
site_id: decodeURI(site_id),
|
|
||||||
};
|
|
||||||
let json = JSON.stringify(request);
|
|
||||||
this.ws.send(json);
|
|
||||||
}
|
|
||||||
|
|
||||||
sendSearch(term: string) {
|
|
||||||
let request = {
|
|
||||||
msg: "search",
|
|
||||||
term: term,
|
|
||||||
};
|
|
||||||
let json = JSON.stringify(request);
|
|
||||||
this.ws.send(json);
|
|
||||||
}
|
|
||||||
|
|
||||||
requestSiteInfo(site_id: string) {
|
|
||||||
let request = {
|
|
||||||
msg: "siteInfo",
|
|
||||||
site_id: decodeURI(site_id),
|
|
||||||
};
|
|
||||||
let json = JSON.stringify(request);
|
|
||||||
this.ws.send(json);
|
|
||||||
}
|
|
||||||
|
|
||||||
requestCircuitInfo(circuit_id: string) {
|
|
||||||
let request = {
|
|
||||||
msg: "circuitInfo",
|
|
||||||
circuit_id: decodeURI(circuit_id),
|
|
||||||
};
|
|
||||||
let json = JSON.stringify(request);
|
|
||||||
this.ws.send(json);
|
|
||||||
}
|
|
||||||
|
|
||||||
requestSiteParents(site_id: string) {
|
|
||||||
let request = {
|
|
||||||
msg: "siteParents",
|
|
||||||
site_id: decodeURI(site_id),
|
|
||||||
};
|
|
||||||
let json = JSON.stringify(request);
|
|
||||||
this.ws.send(json);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function retryConnect() {
|
|
||||||
if (!window.bus.connected) {
|
|
||||||
//window.bus.connect();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WASM callback
|
|
||||||
export function onAuthFail() {
|
|
||||||
window.auth.hasCredentials = false;
|
|
||||||
window.login = null;
|
|
||||||
window.auth.token = null;
|
|
||||||
localStorage.removeItem("token");
|
|
||||||
window.router.goto("login");
|
|
||||||
}
|
|
||||||
|
|
||||||
// WASM callback
|
|
||||||
export function onAuthOk(token: string, name: string, license_key: string) {
|
|
||||||
window.auth.hasCredentials = true;
|
|
||||||
window.login = { msg: "authOk", token: token, name: name, license_key: license_key };
|
|
||||||
window.auth.token = token;
|
|
||||||
}
|
|
||||||
|
|
||||||
// WASM Callback
|
|
||||||
export function onMessage(rawJson: string) {
|
|
||||||
let json = JSON.parse(rawJson);
|
|
||||||
//console.log(json);
|
|
||||||
//console.log(Object.keys(json));
|
|
||||||
json.msg = Object.keys(json)[0];
|
|
||||||
window.router.onMessage(json);
|
|
||||||
}
|
|
@ -1,213 +0,0 @@
|
|||||||
import html from './template.html';
|
|
||||||
import { Page } from '../page'
|
|
||||||
import { MenuPage } from '../menu/menu';
|
|
||||||
import { Component } from '../components/component';
|
|
||||||
import { CircuitInfo } from '../components/circuit_info';
|
|
||||||
import { ThroughputCircuitChart } from '../components/throughput_circuit';
|
|
||||||
import { RttChartCircuit } from '../components/rtt_circuit';
|
|
||||||
import { request_ext_device_info, request_ext_snr_graph, request_ext_capacity_graph } from "../../wasm/wasm_pipe";
|
|
||||||
import * as echarts from 'echarts';
|
|
||||||
import { scaleNumber } from '../helpers';
|
|
||||||
import { CircuitBreadcrumbs } from '../components/circuit_breadcrumbs';
|
|
||||||
|
|
||||||
export class CircuitPage implements Page {
|
|
||||||
menu: MenuPage;
|
|
||||||
components: Component[];
|
|
||||||
circuitId: string;
|
|
||||||
|
|
||||||
constructor(circuitId: string) {
|
|
||||||
this.circuitId = circuitId;
|
|
||||||
this.menu = new MenuPage("sitetreeDash");
|
|
||||||
let container = document.getElementById('mainContent');
|
|
||||||
if (container) {
|
|
||||||
container.innerHTML = html;
|
|
||||||
}
|
|
||||||
this.components = [
|
|
||||||
new CircuitInfo(this.circuitId),
|
|
||||||
new ThroughputCircuitChart(this.circuitId),
|
|
||||||
new RttChartCircuit(this.circuitId),
|
|
||||||
new CircuitBreadcrumbs(this.circuitId),
|
|
||||||
];
|
|
||||||
}
|
|
||||||
|
|
||||||
wireup() {
|
|
||||||
this.components.forEach(component => {
|
|
||||||
component.wireup();
|
|
||||||
});
|
|
||||||
request_ext_device_info(this.circuitId);
|
|
||||||
}
|
|
||||||
|
|
||||||
ontick(): void {
|
|
||||||
this.menu.ontick();
|
|
||||||
this.components.forEach(component => {
|
|
||||||
component.ontick();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
onmessage(event: any) {
|
|
||||||
if (event.msg) {
|
|
||||||
this.menu.onmessage(event);
|
|
||||||
|
|
||||||
this.components.forEach(component => {
|
|
||||||
component.onmessage(event);
|
|
||||||
});
|
|
||||||
|
|
||||||
if (event.msg == "DeviceExt") {
|
|
||||||
//console.log(event.DeviceExt.data);
|
|
||||||
let div = document.getElementById("ext") as HTMLDivElement;
|
|
||||||
let html = "";
|
|
||||||
|
|
||||||
for (let i=0; i<event.DeviceExt.data.length; i++) {
|
|
||||||
let d = event.DeviceExt.data[i];
|
|
||||||
html += "<div class='row'>";
|
|
||||||
|
|
||||||
html += "<div class='col-4'>";
|
|
||||||
html += "<div class='card'>";
|
|
||||||
html += "<div class='card-body' style='height: 250px'>";
|
|
||||||
html += "<h4>" + d.name + "</h4>";
|
|
||||||
html += "<strong>Status</strong>: " + d.status + "<br>";
|
|
||||||
html += "<strong>Model</strong>: " + d.model + "<br>";
|
|
||||||
html += "<strong>Mode</strong>: " + d.mode + "<br>";
|
|
||||||
html += "<strong>Firmware</strong>: " + d.firmware + "<br>";
|
|
||||||
html += "</div>";
|
|
||||||
html += "</div>";
|
|
||||||
html += "</div>";
|
|
||||||
|
|
||||||
html += "<div class='col-4'>";
|
|
||||||
html += "<div class='card'>";
|
|
||||||
html += "<div class='card-body' id='extdev_" + d.device_id + "' style='height: 250px'>";
|
|
||||||
html += "<p>Signal/noise graph</p>";
|
|
||||||
html += "</div>";
|
|
||||||
html += "</div>";
|
|
||||||
html += "</div>";
|
|
||||||
request_ext_snr_graph(window.graphPeriod, d.device_id);
|
|
||||||
|
|
||||||
html += "<div class='col-4'>";
|
|
||||||
html += "<div class='card'>";
|
|
||||||
html += "<div class='card-body' id='extdev_cap_" + d.device_id + "' style='height: 250px'>";
|
|
||||||
html += "<p>Capacity Graph</p>";
|
|
||||||
html += "</div>";
|
|
||||||
html += "</div>";
|
|
||||||
html += "</div>";
|
|
||||||
request_ext_capacity_graph(window.graphPeriod, d.device_id);
|
|
||||||
|
|
||||||
// End row
|
|
||||||
html += "</div>";
|
|
||||||
}
|
|
||||||
|
|
||||||
div.outerHTML = html;
|
|
||||||
} else if (event.msg == "DeviceExtSnr") {
|
|
||||||
console.log(event);
|
|
||||||
let div = document.getElementById("extdev_" + event.DeviceExtSnr.device_id) as HTMLDivElement;
|
|
||||||
|
|
||||||
let sig: number[] = [];
|
|
||||||
let n: number[] = [];
|
|
||||||
let x: any[] = [];
|
|
||||||
|
|
||||||
for (let i=0; i<event.DeviceExtSnr.data.length; i++) {
|
|
||||||
let d = event.DeviceExtSnr.data[i];
|
|
||||||
sig.push(d.signal);
|
|
||||||
n.push(d.noise);
|
|
||||||
x.push(d.date);
|
|
||||||
}
|
|
||||||
|
|
||||||
let series: echarts.SeriesOption[] = [];
|
|
||||||
let signal: echarts.SeriesOption = {
|
|
||||||
name: "Signal",
|
|
||||||
type: "line",
|
|
||||||
data: sig,
|
|
||||||
symbol: 'none',
|
|
||||||
};
|
|
||||||
let noise: echarts.SeriesOption = {
|
|
||||||
name: "Noise",
|
|
||||||
type: "line",
|
|
||||||
data: n,
|
|
||||||
symbol: 'none',
|
|
||||||
};
|
|
||||||
series.push(signal);
|
|
||||||
series.push(noise);
|
|
||||||
|
|
||||||
let myChart: echarts.ECharts = echarts.init(div);
|
|
||||||
var option: echarts.EChartsOption;
|
|
||||||
myChart.setOption<echarts.EChartsOption>(
|
|
||||||
(option = {
|
|
||||||
title: { text: "Signal/Noise" },
|
|
||||||
legend: {
|
|
||||||
orient: "horizontal",
|
|
||||||
right: 10,
|
|
||||||
top: "bottom",
|
|
||||||
},
|
|
||||||
xAxis: {
|
|
||||||
type: 'category',
|
|
||||||
data: x,
|
|
||||||
},
|
|
||||||
yAxis: {
|
|
||||||
type: 'value',
|
|
||||||
name: 'dB',
|
|
||||||
},
|
|
||||||
series: series
|
|
||||||
})
|
|
||||||
);
|
|
||||||
option && myChart.setOption(option);
|
|
||||||
} else if (event.msg == "DeviceExtCapacity") {
|
|
||||||
console.log(event);
|
|
||||||
let div = document.getElementById("extdev_cap_" + event.DeviceExtCapacity.device_id) as HTMLDivElement;
|
|
||||||
|
|
||||||
let down: number[] = [];
|
|
||||||
let up: number[] = [];
|
|
||||||
let x: any[] = [];
|
|
||||||
|
|
||||||
for (let i=0; i<event.DeviceExtCapacity.data.length; i++) {
|
|
||||||
let d = event.DeviceExtCapacity.data[i];
|
|
||||||
down.push(d.dl);
|
|
||||||
up.push(d.ul);
|
|
||||||
x.push(d.date);
|
|
||||||
}
|
|
||||||
|
|
||||||
let series: echarts.SeriesOption[] = [];
|
|
||||||
let signal: echarts.SeriesOption = {
|
|
||||||
name: "Download",
|
|
||||||
type: "line",
|
|
||||||
data: down,
|
|
||||||
symbol: 'none',
|
|
||||||
};
|
|
||||||
let noise: echarts.SeriesOption = {
|
|
||||||
name: "Upload",
|
|
||||||
type: "line",
|
|
||||||
data: up,
|
|
||||||
symbol: 'none',
|
|
||||||
};
|
|
||||||
series.push(signal);
|
|
||||||
series.push(noise);
|
|
||||||
|
|
||||||
let myChart: echarts.ECharts = echarts.init(div);
|
|
||||||
var option: echarts.EChartsOption;
|
|
||||||
myChart.setOption<echarts.EChartsOption>(
|
|
||||||
(option = {
|
|
||||||
title: { text: "Estimated Capacity" },
|
|
||||||
legend: {
|
|
||||||
orient: "horizontal",
|
|
||||||
right: 10,
|
|
||||||
top: "bottom",
|
|
||||||
},
|
|
||||||
xAxis: {
|
|
||||||
type: 'category',
|
|
||||||
data: x,
|
|
||||||
},
|
|
||||||
yAxis: {
|
|
||||||
type: 'value',
|
|
||||||
name: 'Mbps',
|
|
||||||
axisLabel: {
|
|
||||||
formatter: function (val: number) {
|
|
||||||
return scaleNumber(Math.abs(val), 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
series: series
|
|
||||||
})
|
|
||||||
);
|
|
||||||
option && myChart.setOption(option);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,40 +0,0 @@
|
|||||||
<div class="container">
|
|
||||||
<div class="row">
|
|
||||||
<div class="col-12" id="siteName">
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div class="row">
|
|
||||||
<div class="col-6">
|
|
||||||
<div class="card">
|
|
||||||
<div class="card-body" id="circuitInfo">
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div class="col-6">
|
|
||||||
<div class="card">
|
|
||||||
<div class="card-body" id="circuitDevices">
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="row">
|
|
||||||
<div class="col-6">
|
|
||||||
<div class="card">
|
|
||||||
<div class="card-body">
|
|
||||||
<div id="throughputChart" style="height: 250px"></div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="col-6">
|
|
||||||
<div class="card">
|
|
||||||
<div class="card-body">
|
|
||||||
<div id="rttChart" style="height: 250px"></div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div id="ext"></div>
|
|
||||||
</div>
|
|
@ -1,33 +0,0 @@
|
|||||||
import { request_circuit_parents } from "../../wasm/wasm_pipe";
|
|
||||||
import { makeUrl } from "../helpers";
|
|
||||||
import { Component } from "./component";
|
|
||||||
|
|
||||||
export class CircuitBreadcrumbs implements Component {
|
|
||||||
circuitId: string;
|
|
||||||
|
|
||||||
constructor(siteId: string) {
|
|
||||||
this.circuitId = siteId;
|
|
||||||
}
|
|
||||||
|
|
||||||
wireup(): void {
|
|
||||||
request_circuit_parents(this.circuitId);
|
|
||||||
}
|
|
||||||
|
|
||||||
ontick(): void {
|
|
||||||
}
|
|
||||||
|
|
||||||
onmessage(event: any): void {
|
|
||||||
if (event.msg == "SiteParents") {
|
|
||||||
//console.log(event.data);
|
|
||||||
let div = document.getElementById("siteName") as HTMLDivElement;
|
|
||||||
let html = "";
|
|
||||||
let crumbs = event.SiteParents.data.reverse();
|
|
||||||
for (let i = 0; i < crumbs.length; i++) {
|
|
||||||
let url = makeUrl(crumbs[i][0], crumbs[i][1]);
|
|
||||||
html += "<a href='#" + url + "' onclick='window.router.goto(\"" + url + "\")'>" + crumbs[i][1] + "</a> | ";
|
|
||||||
}
|
|
||||||
html = html.substring(0, html.length - 3);
|
|
||||||
div.innerHTML = html;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,50 +0,0 @@
|
|||||||
import { scaleNumber } from "../helpers";
|
|
||||||
import { mbps_to_bps } from "../site_tree/site_tree";
|
|
||||||
import { Component } from "./component";
|
|
||||||
import { request_circuit_info } from "../../wasm/wasm_pipe";
|
|
||||||
|
|
||||||
export class CircuitInfo implements Component {
|
|
||||||
circuitId: string;
|
|
||||||
count: number = 0;
|
|
||||||
|
|
||||||
constructor(siteId: string) {
|
|
||||||
this.circuitId = decodeURI(siteId);
|
|
||||||
}
|
|
||||||
|
|
||||||
wireup(): void {
|
|
||||||
request_circuit_info(this.circuitId);
|
|
||||||
}
|
|
||||||
|
|
||||||
ontick(): void {
|
|
||||||
this.count++;
|
|
||||||
if (this.count % 10 == 0) {
|
|
||||||
request_circuit_info(this.circuitId);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
onmessage(event: any): void {
|
|
||||||
if (event.msg == "CircuitInfo") {
|
|
||||||
//console.log(event.CircuitInfo.data);
|
|
||||||
let div = document.getElementById("circuitInfo") as HTMLDivElement;
|
|
||||||
let html = "";
|
|
||||||
html += "<table class='table table-striped'>";
|
|
||||||
html += "<tr><td>Circuit Name:</td><td>" + event.CircuitInfo.data[0].circuit_name + "</td></tr>";
|
|
||||||
html += "<tr><td>Min (CIR) Limits:</td><td>" + event.CircuitInfo.data[0].download_min_mbps + " / " + event.CircuitInfo.data[0].upload_min_mbps + " Mbps</td></tr>";
|
|
||||||
html += "<tr><td>Max (Ceiling) Limits:</td><td>" + event.CircuitInfo.data[0].download_max_mbps + " / " + event.CircuitInfo.data[0].upload_max_mbps + " Mbps</td></tr>";
|
|
||||||
html += "</table>";
|
|
||||||
div.innerHTML = html;
|
|
||||||
|
|
||||||
div = document.getElementById("circuitDevices") as HTMLDivElement;
|
|
||||||
html = "";
|
|
||||||
html += "<table class='table table-striped'>";
|
|
||||||
for (let i=0; i<event.CircuitInfo.data.length; i++) {
|
|
||||||
html += "<tr>";
|
|
||||||
html += "<td>Device:</td><td>" + event.CircuitInfo.data[i].device_name + "</td>";
|
|
||||||
html += "<td>IP:</td><td>" + event.CircuitInfo.data[i].ip_range + "/" + event.CircuitInfo.data[i].subnet + "</td>";
|
|
||||||
html += "</tr>";
|
|
||||||
}
|
|
||||||
html += "</table>";
|
|
||||||
div.innerHTML = html;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user