Merge pull request #479 from LibreQoE/uisp_integration_2

Uisp integration 2
This commit is contained in:
Herbert "TheBracket 2024-06-15 09:44:53 -05:00 committed by GitHub
commit ced169f1cd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
38 changed files with 2555 additions and 35 deletions

View File

@ -22,7 +22,7 @@ ETC_DIR=$DPKG_DIR/etc
MOTD_DIR=$DPKG_DIR/etc/update-motd.d
LQOS_FILES="graphInfluxDB.py influxDBdashboardTemplate.json integrationCommon.py integrationRestHttp.py integrationSplynx.py integrationUISP.py integrationSonar.py ispConfig.example.py LibreQoS.py lqos.example lqTools.py mikrotikFindIPv6.py network.example.json pythonCheck.py README.md scheduler.py ShapedDevices.example.csv"
LQOS_BIN_FILES="lqos_scheduler.service.example lqosd.service.example lqos_node_manager.service.example"
RUSTPROGS="lqosd lqtop xdp_iphash_to_cpu_cmdline xdp_pping lqos_node_manager lqusers lqos_setup lqos_map_perf lqos_support_tool"
RUSTPROGS="lqosd lqtop xdp_iphash_to_cpu_cmdline xdp_pping lqos_node_manager lqusers lqos_setup lqos_map_perf uisp_integration lqos_support_tool"
####################################################
# Clean any previous dist build

View File

@ -52,7 +52,7 @@ rustup update
# Start building
echo "Please wait while the system is compiled. Service will not be interrupted during this stage."
PROGS="lqosd lqtop xdp_iphash_to_cpu_cmdline xdp_pping lqos_node_manager lqusers lqos_map_perf lqos_support_tool"
PROGS="lqosd lqtop xdp_iphash_to_cpu_cmdline xdp_pping lqos_node_manager lqusers lqos_map_perf uisp_integration lqos_support_tool"
mkdir -p bin/static
pushd rust > /dev/null
#cargo clean

View File

@ -2,10 +2,15 @@
import routeros_api
import csv
def pullMikrotikIPv6():
def pullMikrotikIPv6(CsvPath):
import routeros_api
import csv
import json
print("Reading from " + CsvPath)
ipv4ToIPv6 = {}
routerList = []
with open('mikrotikDHCPRouterList.csv') as csv_file:
with open(CsvPath) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
next(csv_reader)
for row in csv_reader:
@ -65,7 +70,12 @@ def pullMikrotikIPv6():
ipv4ToIPv6[ipv4] = ipv6
except:
print('Failed to find associated IPv4 for ' + ipv6)
return ipv4ToIPv6
return json.dumps(ipv4ToIPv6)
def pullMikrotikIPv6_Mock(CsvPath):
return "{\n\"172.29.200.2\": \"2602:fdca:800:1500::/56\"\n}"
if __name__ == '__main__':
print(pullMikrotikIPv6())
print("Mikrotik IPv6 Finder")
#print(pullMikrotikIPv6())

236
src/rust/Cargo.lock generated
View File

@ -197,6 +197,12 @@ dependencies = [
"bytemuck",
]
[[package]]
name = "atomic-waker"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0"
[[package]]
name = "autocfg"
version = "1.3.0"
@ -215,8 +221,8 @@ dependencies = [
"bytes",
"futures-util",
"http 0.2.12",
"http-body",
"hyper",
"http-body 0.4.6",
"hyper 0.14.28",
"itoa",
"matchit",
"memchr",
@ -245,7 +251,7 @@ dependencies = [
"bytes",
"futures-util",
"http 0.2.12",
"http-body",
"http-body 0.4.6",
"mime",
"rustversion",
"tower-layer",
@ -273,6 +279,12 @@ version = "0.21.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"
[[package]]
name = "base64"
version = "0.22.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
[[package]]
name = "binascii"
version = "0.1.4"
@ -1202,6 +1214,25 @@ dependencies = [
"tracing",
]
[[package]]
name = "h2"
version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab"
dependencies = [
"atomic-waker",
"bytes",
"fnv",
"futures-core",
"futures-sink",
"http 1.1.0",
"indexmap",
"slab",
"tokio",
"tokio-util",
"tracing",
]
[[package]]
name = "half"
version = "1.8.3"
@ -1288,6 +1319,29 @@ dependencies = [
"pin-project-lite",
]
[[package]]
name = "http-body"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643"
dependencies = [
"bytes",
"http 1.1.0",
]
[[package]]
name = "http-body-util"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f"
dependencies = [
"bytes",
"futures-util",
"http 1.1.0",
"http-body 1.0.0",
"pin-project-lite",
]
[[package]]
name = "httparse"
version = "1.8.0"
@ -1316,9 +1370,9 @@ dependencies = [
"futures-channel",
"futures-core",
"futures-util",
"h2",
"h2 0.3.26",
"http 0.2.12",
"http-body",
"http-body 0.4.6",
"httparse",
"httpdate",
"itoa",
@ -1330,6 +1384,26 @@ dependencies = [
"want",
]
[[package]]
name = "hyper"
version = "1.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d"
dependencies = [
"bytes",
"futures-channel",
"futures-util",
"h2 0.4.5",
"http 1.1.0",
"http-body 1.0.0",
"httparse",
"itoa",
"pin-project-lite",
"smallvec",
"tokio",
"want",
]
[[package]]
name = "hyper-tls"
version = "0.5.0"
@ -1337,12 +1411,48 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905"
dependencies = [
"bytes",
"hyper",
"hyper 0.14.28",
"native-tls",
"tokio",
"tokio-native-tls",
]
[[package]]
name = "hyper-tls"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0"
dependencies = [
"bytes",
"http-body-util",
"hyper 1.3.1",
"hyper-util",
"native-tls",
"tokio",
"tokio-native-tls",
"tower-service",
]
[[package]]
name = "hyper-util"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56"
dependencies = [
"bytes",
"futures-channel",
"futures-util",
"http 1.1.0",
"http-body 1.0.0",
"hyper 1.3.1",
"pin-project-lite",
"socket2 0.5.7",
"tokio",
"tower",
"tower-service",
"tracing",
]
[[package]]
name = "iana-time-zone"
version = "0.1.60"
@ -1720,7 +1830,7 @@ dependencies = [
"lqos_utils",
"nix 0.28.0",
"once_cell",
"reqwest",
"reqwest 0.11.27",
"rocket",
"rocket_async_compression",
"sysinfo",
@ -1737,7 +1847,7 @@ dependencies = [
"lqos_utils",
"nix 0.28.0",
"pyo3",
"reqwest",
"reqwest 0.11.27",
"serde",
"sysinfo",
"tokio",
@ -1852,7 +1962,7 @@ dependencies = [
"nix 0.28.0",
"num-traits",
"once_cell",
"reqwest",
"reqwest 0.11.27",
"serde",
"serde_json",
"signal-hook",
@ -2666,16 +2776,16 @@ version = "0.11.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62"
dependencies = [
"base64",
"base64 0.21.7",
"bytes",
"encoding_rs",
"futures-core",
"futures-util",
"h2",
"h2 0.3.26",
"http 0.2.12",
"http-body",
"hyper",
"hyper-tls",
"http-body 0.4.6",
"hyper 0.14.28",
"hyper-tls 0.5.0",
"ipnet",
"js-sys",
"log",
@ -2684,7 +2794,7 @@ dependencies = [
"once_cell",
"percent-encoding",
"pin-project-lite",
"rustls-pemfile",
"rustls-pemfile 1.0.4",
"serde",
"serde_json",
"serde_urlencoded",
@ -2697,7 +2807,49 @@ dependencies = [
"wasm-bindgen",
"wasm-bindgen-futures",
"web-sys",
"winreg",
"winreg 0.50.0",
]
[[package]]
name = "reqwest"
version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10"
dependencies = [
"base64 0.22.1",
"bytes",
"encoding_rs",
"futures-core",
"futures-util",
"h2 0.4.5",
"http 1.1.0",
"http-body 1.0.0",
"http-body-util",
"hyper 1.3.1",
"hyper-tls 0.6.0",
"hyper-util",
"ipnet",
"js-sys",
"log",
"mime",
"native-tls",
"once_cell",
"percent-encoding",
"pin-project-lite",
"rustls-pemfile 2.1.2",
"serde",
"serde_json",
"serde_urlencoded",
"sync_wrapper",
"system-configuration",
"tokio",
"tokio-native-tls",
"tower-service",
"url",
"wasm-bindgen",
"wasm-bindgen-futures",
"web-sys",
"winreg 0.52.0",
]
[[package]]
@ -2802,7 +2954,7 @@ dependencies = [
"either",
"futures",
"http 0.2.12",
"hyper",
"hyper 0.14.28",
"indexmap",
"log",
"memchr",
@ -2860,9 +3012,25 @@ version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c"
dependencies = [
"base64",
"base64 0.21.7",
]
[[package]]
name = "rustls-pemfile"
version = "2.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d"
dependencies = [
"base64 0.22.1",
"rustls-pki-types",
]
[[package]]
name = "rustls-pki-types"
version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d"
[[package]]
name = "rustversion"
version = "1.0.16"
@ -3582,10 +3750,30 @@ version = "0.1.0"
dependencies = [
"anyhow",
"lqos_config",
"reqwest",
"reqwest 0.11.27",
"serde",
]
[[package]]
name = "uisp_integration"
version = "0.1.0"
dependencies = [
"anyhow",
"csv",
"ip_network",
"ip_network_table",
"lqos_config",
"pyo3",
"reqwest 0.12.4",
"serde",
"serde_json",
"thiserror",
"tokio",
"tracing",
"tracing-subscriber",
"uisp",
]
[[package]]
name = "uncased"
version = "0.9.10"
@ -4083,6 +4271,16 @@ dependencies = [
"windows-sys 0.48.0",
]
[[package]]
name = "winreg"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5"
dependencies = [
"cfg-if",
"windows-sys 0.48.0",
]
[[package]]
name = "xdp_iphash_to_cpu_cmdline"
version = "0.1.0"

View File

@ -31,5 +31,6 @@ members = [
"lts_client", # Shared data and client-side code for long-term stats
"lqos_map_perf", # A CLI tool for testing eBPF map performance
"uisp", # REST support for the UISP API
"uisp_integration", # UISP Integration in Rust
"lqos_support_tool", # A Helper tool to make it easier to request/receive support
]

View File

@ -19,17 +19,23 @@ static CONFIG: Mutex<Option<Config>> = Mutex::new(None);
/// Load the configuration from `/etc/lqos.conf`.
pub fn load_config() -> Result<Config, LibreQoSConfigError> {
let mut config_location = "/etc/lqos.conf".to_string();
if let Ok(lqos_config) = std::env::var("LQOS_CONFIG") {
config_location = lqos_config;
log::info!("Overriding lqos.conf location from environment variable.");
}
let mut lock = CONFIG.lock().unwrap();
if lock.is_none() {
log::info!("Loading configuration file /etc/lqos.conf");
log::info!("Loading configuration file {config_location}");
migrate_if_needed().map_err(|e| {
log::error!("Unable to migrate configuration: {:?}", e);
LibreQoSConfigError::FileNotFoud
})?;
let file_result = std::fs::read_to_string("/etc/lqos.conf");
let file_result = std::fs::read_to_string(&config_location);
if file_result.is_err() {
log::error!("Unable to open /etc/lqos.conf");
log::error!("Unable to open {config_location}");
return Err(LibreQoSConfigError::FileNotFoud);
}
let raw = file_result.unwrap();
@ -43,8 +49,15 @@ pub fn load_config() -> Result<Config, LibreQoSConfigError> {
config_result
)));
}
let mut final_config = config_result.unwrap(); // We know it's good at this point
// Check for environment variable overrides
if let Ok(lqos_dir) = std::env::var("LQOS_DIRECTORY") {
final_config.lqos_directory = lqos_dir;
}
log::info!("Set cached version of config file");
*lock = Some(config_result.unwrap());
*lock = Some(final_config);
}
Ok(lock.as_ref().unwrap().clone())

View File

@ -246,8 +246,8 @@ impl From<Device> for UispExtDevice {
channel_width,
tx_power,
rx_signal,
downlink_capacity_mbps,
uplink_capacity_mbps,
downlink_capacity_mbps: downlink_capacity_mbps as i32,
uplink_capacity_mbps: uplink_capacity_mbps as i32,
noise_floor,
mode,
interfaces: iflist,

View File

@ -6,6 +6,8 @@ pub struct DataLink {
pub id: String,
pub from: DataLinkFrom,
pub to: DataLinkTo,
#[serde(rename = "canDelete")]
pub can_delete: bool,
}
#[allow(non_snake_case)]

View File

@ -189,8 +189,8 @@ pub struct DeviceOverview {
pub frequency: Option<f64>,
pub outageScore: Option<f64>,
pub stationsCount: Option<i32>,
pub downlinkCapacity: Option<i32>,
pub uplinkCapacity: Option<i32>,
pub downlinkCapacity: Option<i64>,
pub uplinkCapacity: Option<i64>,
pub channelWidth: Option<i32>,
pub transmitPower: Option<i32>,
pub signal: Option<i32>,

View File

@ -11,9 +11,9 @@ use lqos_config::Config;
// UISP data link definitions
use self::rest::nms_request_get_vec;
use anyhow::Result;
pub use data_link::DataLink;
pub use data_link::*;
pub use device::Device;
pub use site::Site;
pub use site::{Site, SiteId, Description};
/// Loads a complete list of all sites from UISP
pub async fn load_all_sites(config: Config) -> Result<Vec<Site>> {

View File

@ -20,6 +20,14 @@ impl Site {
None
}
pub fn name_or_blank(&self) -> String {
if let Some(name) = self.name() {
name
} else {
"".to_string()
}
}
pub fn address(&self) -> Option<String> {
if let Some(desc) = &self.description {
if let Some(address) = &desc.address {
@ -83,6 +91,14 @@ impl Site {
}
(down, up)
}
pub fn is_suspended(&self) -> bool {
if let Some(site_id) = &self.identification {
site_id.suspended
} else {
false
}
}
}
#[allow(non_snake_case)]

View File

@ -0,0 +1,22 @@
[package]
name = "uisp_integration"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
anyhow = "1.0.82"
reqwest = { version = "0.12.3", features = ["json"] }
tokio = { version = "1.37.0", features = ["full"] }
tracing = "0.1.40"
tracing-subscriber = "0.3.18"
lqos_config = { path = "../lqos_config" }
uisp = { path = "../uisp" }
thiserror = "1.0.58"
serde = { version = "1.0.198", features = ["derive"] }
csv = "1.3.0"
serde_json = "1.0.116"
ip_network_table = "0"
ip_network = "0"
pyo3 = "0.20"

View File

@ -0,0 +1,24 @@
use thiserror::Error;
/// Error types for UISP Integration
#[derive(Error, Debug, PartialEq)]
pub enum UispIntegrationError {
#[error("Unable to load configuration")]
CannotLoadConfig,
#[error("UISP Integration is Disabled")]
IntegrationDisabled,
#[error("Unknown Integration Strategy")]
UnknownIntegrationStrategy,
#[error("Error contacting UISP")]
UispConnectError,
#[error("Root site not found")]
NoRootSite,
#[error("Unknown Site Type")]
UnknownSiteType,
#[error("CSV Error")]
CsvError,
#[error("Unable to write network.json")]
WriteNetJson,
#[error("Bad IP")]
BadIp,
}

View File

@ -0,0 +1,72 @@
use crate::errors::UispIntegrationError;
use ip_network::IpNetwork;
use ip_network_table::IpNetworkTable;
use lqos_config::Config;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use tracing::info;
/// Represents a set of IP ranges that are allowed or ignored.
pub struct IpRanges {
/// The allowed IP ranges
allowed: IpNetworkTable<bool>,
/// The ignored IP ranges
ignored: IpNetworkTable<bool>,
}
impl IpRanges {
/// Creates a new IpRanges from a configuration.
pub fn new(config: &Config) -> Result<Self, UispIntegrationError> {
info!("Building allowed/excluded IP range lookups from configuration file");
let mut allowed = IpNetworkTable::new();
let mut ignored = IpNetworkTable::new();
for allowed_ip in config.ip_ranges.allow_subnets.iter() {
let split: Vec<_> = allowed_ip.split('/').collect();
if split[0].contains(':') {
// It's IPv6
let ip_network: Ipv6Addr = split[0].parse().unwrap();
let ip = IpNetwork::new(ip_network, split[1].parse().unwrap()).unwrap();
allowed.insert(ip, true);
} else {
// It's IPv4
let ip_network: Ipv4Addr = split[0].parse().unwrap();
let ip = IpNetwork::new(ip_network, split[1].parse().unwrap()).unwrap();
allowed.insert(ip, true);
}
}
for excluded_ip in config.ip_ranges.ignore_subnets.iter() {
let split: Vec<_> = excluded_ip.split('/').collect();
if split[0].contains(':') {
// It's IPv6
let ip_network: Ipv6Addr = split[0].parse().unwrap();
let ip = IpNetwork::new(ip_network, split[1].parse().unwrap()).unwrap();
ignored.insert(ip, true);
} else {
// It's IPv4
let ip_network: Ipv4Addr = split[0].parse().unwrap();
let ip = IpNetwork::new(ip_network, split[1].parse().unwrap()).unwrap();
ignored.insert(ip, true);
}
}
info!(
"{} allowed IP ranges, {} ignored IP ranges",
allowed.len().0,
ignored.len().0
);
Ok(Self { allowed, ignored })
}
/// Checks if an IP address is permitted.
pub fn is_permitted(&self, ip: IpAddr) -> bool {
//println!("Checking: {:?}", ip);
if let Some(_allow) = self.allowed.longest_match(ip) {
if let Some(_deny) = self.ignored.longest_match(ip) {
return false;
}
return true;
}
false
}
}

View File

@ -0,0 +1,94 @@
//! Rust version of the UISP Integration from LibreQoS. This will probably
//! be ported back to Python, with Rust support structures - but I'll iterate
//! faster in Rust.
#[warn(missing_docs)]
mod errors;
pub mod ip_ranges;
mod strategies;
pub mod uisp_types;
use crate::errors::UispIntegrationError;
use crate::ip_ranges::IpRanges;
use lqos_config::Config;
use tokio::time::Instant;
use tracing::{error, info};
/// Start the tracing/logging system
fn init_tracing() {
tracing_subscriber::fmt()
.with_file(true)
.with_line_number(true)
.compact()
.init();
}
fn check_enabled_status(config: &Config) -> Result<(), UispIntegrationError> {
if !config.uisp_integration.enable_uisp {
error!("UISP Integration is disabled in /etc/lqos.conf");
error!("Integration will not run.");
Err(UispIntegrationError::IntegrationDisabled)
} else {
Ok(())
}
}
#[tokio::main]
async fn main() -> Result<(), UispIntegrationError> {
let now = Instant::now();
init_tracing();
info!("UISP Integration 2.0-rust");
// Load the configuration
info!("Loading Configuration");
let config = lqos_config::load_config().map_err(|e| {
error!("Unable to load configuration");
error!("{e:?}");
UispIntegrationError::CannotLoadConfig
})?;
// Check that we're allowed to run
check_enabled_status(&config)?;
// Build our allowed/excluded IP ranges
let ip_ranges = IpRanges::new(&config)?;
// Select a strategy and go from there
strategies::build_with_strategy(config, ip_ranges).await?;
// Print timings
let elapsed = now.elapsed();
info!(
"UISP Integration Run Completed in {:.3} seconds",
elapsed.as_secs_f32()
);
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use lqos_config::Config;
#[test]
fn test_uisp_disabled() {
let mut cfg = Config::default();
cfg.uisp_integration.enable_uisp = false;
let result = check_enabled_status(&cfg);
assert!(result.is_err());
assert_eq!(
result.unwrap_err(),
UispIntegrationError::IntegrationDisabled
);
}
#[test]
fn test_uisp_enabled() {
let mut cfg = Config::default();
cfg.uisp_integration.enable_uisp = true;
let result = check_enabled_status(&cfg);
assert!(result.is_ok());
}
}

View File

@ -0,0 +1,121 @@
use crate::errors::UispIntegrationError;
use crate::ip_ranges::IpRanges;
use crate::uisp_types::UispDevice;
use lqos_config::Config;
use serde::Serialize;
use std::fs;
use std::path::Path;
use tracing::{error, info};
/// Represents a shaped device in the ShapedDevices.csv file.
#[derive(Serialize, Debug)]
struct ShapedDevice {
pub circuit_id: String,
pub circuit_name: String,
pub device_id: String,
pub device_name: String,
pub parent_node: String,
pub mac: String,
pub ipv4: String,
pub ipv6: String,
pub download_min: u64,
pub upload_min: u64,
pub download_max: u64,
pub upload_max: u64,
pub comment: String,
}
/// Builds a flat network for UISP
///
/// # Arguments
/// * `config` - The configuration
/// * `ip_ranges` - The IP ranges to use for the network
pub async fn build_flat_network(
config: Config,
ip_ranges: IpRanges,
) -> Result<(), UispIntegrationError> {
// Load the devices from UISP
let devices = uisp::load_all_devices_with_interfaces(config.clone())
.await
.map_err(|e| {
error!("Unable to load device list from UISP");
error!("{e:?}");
UispIntegrationError::UispConnectError
})?;
let sites = uisp::load_all_sites(config.clone()).await.map_err(|e| {
error!("Unable to load device list from UISP");
error!("{e:?}");
UispIntegrationError::UispConnectError
})?;
// Create a {} network.json
let net_json_path = Path::new(&config.lqos_directory).join("network.json");
fs::write(net_json_path, "{}\n").map_err(|e| {
error!("Unable to access network.json");
error!("{e:?}");
UispIntegrationError::WriteNetJson
})?;
// Simple Shaped Devices File
let mut shaped_devices = Vec::new();
let ipv4_to_v6 = Vec::new();
for site in sites.iter() {
if let Some(site_id) = &site.identification {
if let Some(site_type) = &site_id.site_type {
if site_type == "endpoint" {
let (download_max, upload_max) = site.qos(
config.queues.generated_pn_download_mbps,
config.queues.generated_pn_upload_mbps,
);
let download_min = (download_max as f32
* config.uisp_integration.commit_bandwidth_multiplier)
as u64;
let upload_min = (upload_max as f32
* config.uisp_integration.commit_bandwidth_multiplier)
as u64;
for device in devices.iter() {
let dev = UispDevice::from_uisp(device, &config, &ip_ranges, &ipv4_to_v6);
if dev.site_id == site.id {
// We're an endpoint in the right sight. We're getting there
let sd = ShapedDevice {
circuit_id: site.id.clone(),
circuit_name: site.name_or_blank(),
device_id: device.get_id(),
device_name: device.get_name().unwrap_or("".to_string()),
parent_node: "".to_string(),
mac: device.identification.mac.clone().unwrap_or("".to_string()),
ipv4: dev.ipv4_list(),
ipv6: dev.ipv6_list(),
download_min: u64::max(2, download_min),
download_max: u64::max(3, download_max as u64),
upload_min: u64::max(2, upload_min),
upload_max: u64::max(3, upload_max as u64),
comment: "".to_string(),
};
shaped_devices.push(sd);
}
}
}
}
}
}
// Write it to disk
let file_path = Path::new(&config.lqos_directory).join("ShapedDevices.csv");
let mut writer = csv::WriterBuilder::new()
.has_headers(true)
.from_path(file_path)
.unwrap();
for d in shaped_devices.iter() {
writer.serialize(d).unwrap();
}
writer.flush().map_err(|e| {
error!("Unable to flush CSV file");
error!("{e:?}");
UispIntegrationError::CsvError
})?;
info!("Wrote {} lines to ShapedDevices.csv", shaped_devices.len());
Ok(())
}

View File

@ -0,0 +1,79 @@
use crate::uisp_types::{UispDevice, UispSite, UispSiteType};
use lqos_config::Config;
use std::collections::HashSet;
use tracing::info;
use uisp::{DataLink, Device, Site};
/// Finds access points that are connected to other sites and promotes them to their own site.
/// This is useful for sites that have multiple APs, but are currently represented as a single site.
///
/// # Arguments
/// * `sites` - The list of sites to modify
/// * `devices_raw` - The list of devices
/// * `data_links_raw` - The list of data links
/// * `sites_raw` - The list of sites
/// * `devices` - The list of devices with their speeds
/// * `config` - The configuration
pub fn promote_access_points(
sites: &mut Vec<UispSite>,
devices_raw: &[Device],
data_links_raw: &[DataLink],
sites_raw: &[Site],
devices: &[UispDevice],
config: &Config,
) {
let mut all_links = Vec::new();
sites.iter().for_each(|s| {
let links = s.find_aps(&devices_raw, &data_links_raw, &sites_raw);
if !links.is_empty() {
all_links.extend(links);
}
});
info!("Detected {} intra-site links", all_links.len());
// Insert AP entries
for link in all_links {
// Create the new AP site
let parent_site_id = sites.iter().position(|s| s.id == link.site_id).unwrap();
/*if sites[parent_site_id].site_type == UispSiteType::Client {
warn!(
"{} is a client, but has an AP pointing at other locations",
sites[parent_site_id].name
);
}*/
let mut max_up_mbps = config.queues.generated_pn_upload_mbps;
let mut max_down_mbps = config.queues.generated_pn_download_mbps;
if let Some(ap) = devices.iter().find(|d| d.id == link.device_id) {
max_up_mbps = ap.upload;
max_down_mbps = ap.download;
}
// If the parent is a client, use the client's speeds
if sites[parent_site_id].site_type == UispSiteType::Client {
//println!("Setting speed to client speed: {} = {}/{} -> {}/{}", link.device_name, max_up_mbps, max_down_mbps, sites[parent_site_id].max_up_mbps, sites[parent_site_id].max_down_mbps);
max_up_mbps = sites[parent_site_id].max_up_mbps;
max_down_mbps = sites[parent_site_id].max_down_mbps;
}
let mut new_site = UispSite {
id: link.device_id,
name: link.device_name,
site_type: UispSiteType::AccessPoint,
uisp_parent_id: None,
parent_indices: HashSet::new(),
max_up_mbps,
max_down_mbps,
..Default::default()
};
new_site.parent_indices.insert(parent_site_id);
// Add it
let new_id = sites.len();
sites.push(new_site);
sites.iter_mut().for_each(|s| {
if link.child_sites.contains(&s.id) {
s.parent_indices.insert(new_id);
}
});
}
}

View File

@ -0,0 +1,198 @@
use crate::errors::UispIntegrationError;
use crate::uisp_types::UispSite;
use csv::ReaderBuilder;
use lqos_config::Config;
use std::collections::HashMap;
use std::path::Path;
use tracing::{error, info};
pub type BandwidthOverrides = HashMap<String, (f32, f32)>;
/// Attempts to load integrationUISPbandwidths.csv to use for
/// bandwidth overrides. Returns an empty set if not found.
/// Returns an error if the file is found but cannot be read.
///
/// The file should be a CSV with the following columns:
///
/// | Parent Node | Down | Up |
/// |-------------|------|----|
/// | Site1 | 100 | 10 |
/// | Site2 | 200 | 20 |
///
/// The Parent Node should match the name of the site in UISP.
/// The Down and Up columns should be the desired bandwidth in Mbps.
///
/// If the file is found, the overrides will be applied to the sites
/// in the `UispSite` array by the `apply_bandwidth_overrides` function.
///
/// # Arguments
/// * `config` - The configuration
///
/// # Returns
/// * A `BandwidthOverrides` map of site names to bandwidth overrides
pub fn get_site_bandwidth_overrides(
config: &Config,
) -> Result<BandwidthOverrides, UispIntegrationError> {
info!("Looking for integrationUISPbandwidths.csv");
let file_path = Path::new(&config.lqos_directory).join("integrationUISPbandwidths.csv");
if file_path.exists() {
let reader = ReaderBuilder::new()
.comment(Some(b'#'))
.trim(csv::Trim::All)
.from_path(file_path);
if reader.is_err() {
error!("Unable to read integrationUISPbandwidths.csv");
error!("{:?}", reader);
return Err(UispIntegrationError::CsvError);
}
let mut reader = reader.unwrap();
let mut overrides = HashMap::new();
for (line, result) in reader.records().enumerate() {
if let Ok(result) = result {
if result.len() != 3 {
error!("Wrong number of records on line {line}");
continue;
}
let parent_node = result[0].to_string();
if let Some(d) = numeric_string_to_f32(&result[1]) {
if let Some(u) = numeric_string_to_f32(&result[2]) {
info!("Using bandiwdth override: {}, {}/{}", parent_node, d, u);
overrides.insert(parent_node, (d, u));
} else {
error!("Cannot parse {} as float on line {line}", &result[2]);
}
} else {
error!("Cannot parse {} as float on line {line}", &result[1]);
}
} else {
error!("Error reading integrationUISPbandwidths.csv line");
error!("{result:?}");
}
}
info!("Loaded {} bandwidth overrides", overrides.len());
return Ok(overrides);
}
info!("No bandwidth overrides loaded.");
Ok(HashMap::new())
}
fn numeric_string_to_f32(text: &str) -> Option<f32> {
if let Ok(n) = text.parse::<f32>() {
Some(n)
} else if let Ok(n) = text.parse::<i64>() {
Some(n as f32)
} else {
error!("Unable to parse {text} as a numeric");
None
}
}
/// Applies the bandwidth overrides to the sites in the array.
///
/// # Arguments
/// * `sites` - The list of sites to modify
/// * `bandwidth_overrides` - The bandwidth overrides to apply
pub fn apply_bandwidth_overrides(sites: &mut [UispSite], bandwidth_overrides: &BandwidthOverrides) {
for site in sites.iter_mut() {
if let Some((down, up)) = bandwidth_overrides.get(&site.name) {
// Apply the overrides
site.max_down_mbps = *down as u32;
site.max_up_mbps = *up as u32;
info!(
"Bandwidth override for {} applied ({} / {})",
&site.name, site.max_down_mbps, site.max_up_mbps
);
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_numeric_string_to_f32_valid_float() {
let result = numeric_string_to_f32("3.2");
assert_eq!(result, Some(3.2));
}
#[test]
fn test_numeric_string_to_f32_valid_integer() {
let result = numeric_string_to_f32("42");
assert_eq!(result, Some(42.0));
}
#[test]
fn test_numeric_string_to_f32_invalid_string() {
let result = numeric_string_to_f32("abc");
assert_eq!(result, None);
}
#[test]
fn test_apply_bandwidth_overrides_existing_site() {
let mut sites = vec![
UispSite {
name: "SiteA".to_string(),
max_down_mbps: 100,
max_up_mbps: 100,
..Default::default()
}
];
let mut overrides = BandwidthOverrides::new();
overrides.insert("SiteA".to_string(), (150., 200.));
apply_bandwidth_overrides(&mut sites, &overrides);
assert_eq!(sites[0].max_down_mbps, 200);
assert_eq!(sites[0].max_up_mbps, 150);
}
#[test]
fn test_apply_bandwidth_overrides_non_existing_site() {
let mut sites = vec![
UispSite {
name: "SiteB".to_string(),
max_down_mbps: 100,
max_up_mbps: 100,
..Default::default()
}
];
let overrides = BandwidthOverrides::new(); // No override added
apply_bandwidth_overrides(&mut sites, &overrides);
// Ensure no changes are made
assert_eq!(sites[0].max_down_mbps, 100);
assert_eq!(sites[0].max_up_mbps, 100);
}
#[test]
fn test_apply_bandwidth_overrides_multiple_sites() {
let mut sites = vec![
UispSite {
name: "SiteC".to_string(),
max_down_mbps: 300,
max_up_mbps: 300,
..Default::default()
},
UispSite {
name: "SiteD".to_string(),
max_down_mbps: 400,
max_up_mbps: 400,
..Default::default()
}
];
let mut overrides = BandwidthOverrides::new();
overrides.insert("SiteC".to_string(), (350., 450.));
apply_bandwidth_overrides(&mut sites, &overrides);
assert_eq!(sites[0].max_down_mbps, 450);
assert_eq!(sites[0].max_up_mbps, 350);
// SiteD should not change
assert_eq!(sites[1].max_down_mbps, 400);
assert_eq!(sites[1].max_up_mbps, 400);
}
}

View File

@ -0,0 +1,67 @@
use crate::errors::UispIntegrationError;
use crate::uisp_types::{UispSite, UispSiteType};
use std::collections::HashSet;
use tracing::info;
/// Promotes client sites with multiple child sites to a new site type.
/// This is useful for sites that have multiple child sites, but are currently represented as a single site.
///
/// # Arguments
/// * `sites` - The list of sites to modify
///
/// # Returns
/// * An `Ok` if the operation was successful
/// * An `Err` if the operation failed
pub fn promote_clients_with_children(
sites: &mut Vec<UispSite>,
) -> Result<(), UispIntegrationError> {
info!("Scanning for client sites with child sites");
let mut client_sites_with_children = Vec::new();
// Iterate sites and find Client types with >1 child
sites
.iter()
.enumerate()
.filter(|(_, s)| s.site_type == UispSiteType::Client)
.for_each(|(idx, _s)| {
let child_count = sites
.iter()
.filter(|c| c.parent_indices.contains(&idx))
.count();
if child_count > 1 {
client_sites_with_children.push(idx);
}
});
for child_site in client_sites_with_children {
//info!("Promoting {} to ClientWithChildren", sites[child_site].name);
sites[child_site].site_type = UispSiteType::ClientWithChildren;
let old_name = sites[child_site].name.clone();
sites[child_site].name = format!("(Generated Site) {}", sites[child_site].name);
let old_id = sites[child_site].id.clone();
sites[child_site].id = format!("GEN-{}", sites[child_site].id);
sites[child_site].suspended = false;
let mut parent_indices = HashSet::new();
parent_indices.insert(child_site);
let mut new_site = UispSite {
id: old_id,
name: old_name,
site_type: UispSiteType::Client,
uisp_parent_id: None,
parent_indices,
max_down_mbps: sites[child_site].max_down_mbps,
max_up_mbps: sites[child_site].max_up_mbps,
suspended: sites[child_site].suspended,
selected_parent: Some(child_site),
..Default::default()
};
new_site
.device_indices
.extend_from_slice(&sites[child_site].device_indices);
sites[child_site].device_indices.clear();
sites.push(new_site);
}
Ok(())
}

View File

@ -0,0 +1,92 @@
use std::{fs::read_to_string, path::Path};
use lqos_config::Config;
use pyo3::{prepare_freethreaded_python, PyResult, Python};
use crate::uisp_types::Ipv4ToIpv6;
// To ease debugging in the absense of this particular setup, there's a mock function
// available, too.
//
// Enable one of these!
//const PY_FUNC: &str = "pullMikrotikIPv6_Mock";
const PY_FUNC: &str = "pullMikrotikIPv6";
pub async fn mikrotik_data(config: &Config) -> anyhow::Result<Vec<Ipv4ToIpv6>> {
if config.uisp_integration.ipv6_with_mikrotik {
fetch_mikrotik_data(config).await
} else {
Ok(Vec::new())
}
}
async fn fetch_mikrotik_data(config: &Config) -> anyhow::Result<Vec<Ipv4ToIpv6>> {
// Find the script and error out if it doesn't exist
let base_path = Path::new(&config.lqos_directory);
let mikrotik_script_path = base_path.join("mikrotikFindIPv6.py");
if !mikrotik_script_path.exists() {
tracing::error!("Mikrotik script not found at {:?}", mikrotik_script_path);
return Err(anyhow::anyhow!("Mikrotik script not found at {:?}", mikrotik_script_path));
}
// Find the `mikrotikDHCPRouterList.csv` file.
let mikrotik_dhcp_router_list_path = base_path.join("mikrotikDHCPRouterList.csv");
if !mikrotik_dhcp_router_list_path.exists() {
tracing::error!("Mikrotik DHCP router list not found at {:?}", mikrotik_dhcp_router_list_path);
return Err(anyhow::anyhow!("Mikrotik DHCP router list not found at {:?}", mikrotik_dhcp_router_list_path));
}
// Load the script
let code = read_to_string(mikrotik_script_path)?;
// Get the Python environment going
let mut json_from_python = None;
prepare_freethreaded_python();
let result = Python::with_gil(|python| -> PyResult<()> {
// Run the Python script
let locals = pyo3::types::PyDict::new(python);
python.run(&code, None, Some(locals))?;
// Run the function to pull the Mikrotik data
let result = python
.eval(
&format!("{PY_FUNC}('{}')", mikrotik_dhcp_router_list_path.to_string_lossy()),
Some(locals),
None
)?
.extract::<String>()?;
// Parse the response.
// it is an object that looks like this:
// {
// "1.2.3.4" : "2001:db8::1",
// }
// We're forcibly returning JSON to make the bridge easier.
json_from_python = Some(result);
Ok(())
});
// If an error occured, fail with as much information as possible
if let Err(e) = result {
tracing::error!("Python error: {:?}", e);
return Err(anyhow::anyhow!("Python error: {:?}", e));
}
// If we got this far, we have some JSON to work with
let json_from_python = json_from_python.unwrap();
let json = serde_json::from_str::<serde_json::Value>(&json_from_python)?;
if let Some(map) = json.as_object() {
let mut result = Vec::new();
for (ipv4, ipv6) in map {
result.push(Ipv4ToIpv6 {
ipv4: ipv4.to_string().replace("\"", ""),
ipv6: ipv6.to_string().replace("\"", ""),
});
}
return Ok(result);
} else {
tracing::error!("Mikrotik data is not an object");
return Err(anyhow::anyhow!("Mikrotik data is not an object"));
}
}

View File

@ -0,0 +1,138 @@
mod ap_promotion;
mod bandwidth_overrides;
mod client_site_promotion;
mod network_json;
mod parse;
mod root_site;
mod routes_override;
mod shaped_devices_writer;
mod squash_single_entry_aps;
mod tree_walk;
mod uisp_fetch;
mod utils;
mod zero_capacity_sites;
mod mikrotik;
use crate::errors::UispIntegrationError;
use crate::ip_ranges::IpRanges;
use crate::strategies::full::ap_promotion::promote_access_points;
use crate::strategies::full::bandwidth_overrides::{
apply_bandwidth_overrides, get_site_bandwidth_overrides,
};
use crate::strategies::full::client_site_promotion::promote_clients_with_children;
use crate::strategies::full::network_json::write_network_file;
use crate::strategies::full::parse::parse_uisp_datasets;
use crate::strategies::full::root_site::{find_root_site, set_root_site};
use crate::strategies::full::routes_override::get_route_overrides;
use crate::strategies::full::shaped_devices_writer::write_shaped_devices;
use crate::strategies::full::squash_single_entry_aps::squash_single_aps;
use crate::strategies::full::tree_walk::walk_tree_for_routing;
use crate::strategies::full::uisp_fetch::load_uisp_data;
use crate::strategies::full::utils::{print_sites, warn_of_no_parents_and_promote};
use crate::strategies::full::zero_capacity_sites::correct_zero_capacity_sites;
use crate::uisp_types::{UispSite, UispSiteType};
use lqos_config::Config;
/// Attempt to construct a full hierarchy topology for the UISP network.
/// This function will load the UISP data, parse it into a more usable format,
/// and then attempt to build a full network topology.
///
/// # Arguments
/// * `config` - The configuration
/// * `ip_ranges` - The IP ranges to use for the network
///
/// # Returns
/// * An `Ok` if the operation was successful
/// * An `Err` if the operation failed
pub async fn build_full_network(
config: Config,
ip_ranges: IpRanges,
) -> Result<(), UispIntegrationError> {
// Load any bandwidth overrides
let bandwidth_overrides = get_site_bandwidth_overrides(&config)?;
// Load any routing overrrides
let routing_overrides = get_route_overrides(&config)?;
// Obtain the UISP data and transform it into easier to work with types
let (sites_raw, devices_raw, data_links_raw) = load_uisp_data(config.clone()).await?;
// If Mikrotik is enabled, we need to fetch the Mikrotik data
let ipv4_to_v6 = mikrotik::mikrotik_data(&config).await.unwrap_or_else(|_| Vec::new());
//println!("{:?}", ipv4_to_v6);
// Parse the UISP data into a more usable format
let (mut sites, data_links, devices) = parse_uisp_datasets(
&sites_raw,
&data_links_raw,
&devices_raw,
&config,
&ip_ranges,
ipv4_to_v6
);
// Check root sites
let root_site = find_root_site(&config, &mut sites, &data_links)?;
// Set the site root
set_root_site(&mut sites, &root_site)?;
// Create a new "_Infrastructure" node for the parent, since we can't link to the top
// level very easily
if let Some(root_idx) = sites.iter().position(|s| s.name == root_site) {
sites.push(UispSite {
id: format!("{}_Infrastructure", sites[root_idx].name.clone()),
name: format!("{}_Infrastructure", sites[root_idx].name.clone()),
site_type: UispSiteType::Site,
uisp_parent_id: None,
parent_indices: Default::default(),
max_down_mbps: sites[root_idx].max_down_mbps,
max_up_mbps: sites[root_idx].max_down_mbps,
suspended: false,
device_indices: vec![],
route_weights: vec![],
selected_parent: Some(root_idx),
});
}
// Search for devices that provide links elsewhere
promote_access_points(
&mut sites,
&devices_raw,
&data_links_raw,
&sites_raw,
&devices,
&config,
);
// Sites that are clients but have children should be promoted
promote_clients_with_children(&mut sites)?;
// Do Link Squashing
squash_single_aps(&mut sites)?;
// Build Path Weights
walk_tree_for_routing(&mut sites, &root_site, &routing_overrides)?;
// Apply bandwidth overrides
apply_bandwidth_overrides(&mut sites, &bandwidth_overrides);
// Correct any sites with zero capacity
correct_zero_capacity_sites(&mut sites, &config);
// Print Sites
if let Some(root_idx) = sites.iter().position(|s| s.name == root_site) {
// Issue No Parent Warnings
warn_of_no_parents_and_promote(&mut sites, &devices_raw, root_idx, &config);
print_sites(&sites, root_idx);
// Output a network.json
write_network_file(&config, &sites, root_idx)?;
// Write ShapedDevices.csv
write_shaped_devices(&config, &sites, root_idx, &devices)?;
}
Ok(())
}

View File

@ -0,0 +1,81 @@
use crate::errors::UispIntegrationError;
use crate::uisp_types::{UispSite, UispSiteType};
use lqos_config::Config;
use std::fs::write;
use std::path::Path;
use tracing::{error, info};
/// Writes the network.json file for UISP
///
/// # Arguments
/// * `config` - The configuration
/// * `sites` - The list of sites
/// * `root_idx` - The index of the root site
///
/// # Returns
/// * An `Ok` if the operation was successful
/// * An `Err` if the operation failed
pub fn write_network_file(
config: &Config,
sites: &[UispSite],
root_idx: usize,
) -> Result<(), UispIntegrationError> {
let network_path = Path::new(&config.lqos_directory).join("network.json");
if network_path.exists() && !config.integration_common.always_overwrite_network_json {
tracing::warn!("Network.json exists, and always overwrite network json is not true - not writing network.json");
return Ok(());
}
// Write the network JSON file
let root = traverse_sites(sites, root_idx, 0)?;
if let Some(children) = root.get("children") {
let json = serde_json::to_string_pretty(&children).unwrap();
write(network_path, json).map_err(|e| {
error!("Unable to write network.json");
error!("{e:?}");
UispIntegrationError::WriteNetJson
})?;
info!("Written network.json");
}
Ok(())
}
fn traverse_sites(
sites: &[UispSite],
idx: usize,
depth: u32,
) -> Result<serde_json::Map<String, serde_json::Value>, UispIntegrationError> {
let mut entry = serde_json::Map::new();
entry.insert(
"downloadBandwidthMbps".to_string(),
serde_json::Value::Number(sites[idx].max_down_mbps.into()),
);
entry.insert(
"uploadBandwidthMbps".to_string(),
serde_json::Value::Number(sites[idx].max_up_mbps.into()),
);
if depth < 10 {
let mut children = serde_json::Map::new();
for (child_id, child) in sites.iter().enumerate() {
if let Some(parent) = child.selected_parent {
if parent == idx && should_traverse(&sites[child_id].site_type) {
children.insert(
child.name.clone(),
serde_json::Value::Object(traverse_sites(sites, child_id, depth + 1)?),
);
}
}
}
if !children.is_empty() {
entry.insert("children".to_string(), serde_json::Value::Object(children));
}
}
Ok(entry)
}
fn should_traverse(t: &UispSiteType) -> bool {
!matches!(t, UispSiteType::Client)
}

View File

@ -0,0 +1,74 @@
use crate::ip_ranges::IpRanges;
use crate::uisp_types::{Ipv4ToIpv6, UispDataLink, UispDevice, UispSite};
use lqos_config::Config;
use tracing::info;
use uisp::{DataLink, Device, Site};
/// Parses the UISP datasets into a more usable format.
///
/// # Arguments
/// * `sites_raw` - The raw site data
/// * `data_links_raw` - The raw data link data
/// * `devices_raw` - The raw device data
/// * `config` - The configuration
/// * `ip_ranges` - The IP ranges to use for the network
///
/// # Returns
/// * A tuple containing the parsed sites, data links, and devices
pub fn parse_uisp_datasets(
sites_raw: &[Site],
data_links_raw: &[DataLink],
devices_raw: &[Device],
config: &Config,
ip_ranges: &IpRanges,
ipv4_to_v6: Vec<Ipv4ToIpv6>,
) -> (Vec<UispSite>, Vec<UispDataLink>, Vec<UispDevice>) {
let (mut sites, data_links, devices) = (
parse_sites(sites_raw, config),
parse_data_links(data_links_raw),
parse_devices(devices_raw, config, ip_ranges, ipv4_to_v6),
);
// Assign devices to sites
for site in sites.iter_mut() {
devices
.iter()
.enumerate()
.filter(|(_, device)| device.site_id == site.id)
.for_each(|(idx, _)| {
site.device_indices.push(idx);
});
}
(sites, data_links, devices)
}
fn parse_sites(sites_raw: &[Site], config: &Config) -> Vec<UispSite> {
let sites: Vec<UispSite> = sites_raw
.iter()
.map(|s| UispSite::from_uisp(s, config))
.collect();
info!("{} sites have been successfully parsed", sites.len());
sites
}
fn parse_data_links(data_links_raw: &[DataLink]) -> Vec<UispDataLink> {
let data_links: Vec<UispDataLink> = data_links_raw
.iter()
.filter_map(UispDataLink::from_uisp)
.collect();
info!(
"{} data-links have been successfully parsed",
data_links.len()
);
data_links
}
fn parse_devices(devices_raw: &[Device], config: &Config, ip_ranges: &IpRanges, ipv4_to_v6: Vec<Ipv4ToIpv6>) -> Vec<UispDevice> {
let devices: Vec<UispDevice> = devices_raw
.iter()
.map(|d| UispDevice::from_uisp(d, config, ip_ranges, &ipv4_to_v6))
.collect();
info!("{} devices have been sucessfully parsed", devices.len());
devices
}

View File

@ -0,0 +1,196 @@
use crate::errors::UispIntegrationError;
use crate::uisp_types::{UispDataLink, UispSite, UispSiteType};
use lqos_config::Config;
use tracing::{error, info, warn};
/// Looks to identify the root site for the site tree.
/// If the "site" is defined in the configuration, it will try to use it.
/// If the site is defined but does not exist, it will search for an Internet-connected site
/// and try to use that.
/// If it still hasn't found one, and there are multiple Internet connected sites - it will insert
/// a fake root and use that instead. I'm not sure that's a great idea.
pub fn find_root_site(
config: &Config,
sites: &mut Vec<UispSite>,
data_links: &[UispDataLink],
) -> Result<String, UispIntegrationError> {
let mut root_site_name = config.uisp_integration.site.clone();
if root_site_name.is_empty() {
warn!("Root site name isn't specified in /etc/lqos.conf - we'll try and figure it out");
root_site_name = handle_multiple_internet_connected_sites(sites, data_links)?;
} else {
info!("Using root UISP site from /etc/lqos.conf: {root_site_name}");
if !sites.iter().any(|s| s.name == root_site_name) {
error!("Site {root_site_name} (from /etc/lqos.conf) not found in the UISP sites list");
return Err(UispIntegrationError::NoRootSite);
} else {
info!("{root_site_name} found in the sites list.");
}
}
Ok(root_site_name)
}
fn handle_multiple_internet_connected_sites(
sites: &mut Vec<UispSite>,
data_links: &[UispDataLink],
) -> Result<String, UispIntegrationError> {
let mut root_site_name = String::new();
let mut candidates = Vec::new();
data_links.iter().filter(|l| !l.can_delete).for_each(|l| {
candidates.push(l.from_site_name.clone());
});
if candidates.is_empty() {
error!("Unable to find a root site in the sites/data-links.");
return Err(UispIntegrationError::NoRootSite);
} else if candidates.len() == 1 {
info!(
"Found only one site with an Internet connection: {root_site_name}, using it as root"
);
root_site_name = candidates[0].clone();
} else {
warn!("Multiple Internet links detected. Will create an 'Internet' root node");
root_site_name = "INSERTED_INTERNET".to_string();
sites.push(UispSite {
id: "ROOT-001".to_string(),
name: "INSERTED_INTERNET".to_string(),
site_type: UispSiteType::Root,
..Default::default()
})
}
Ok(root_site_name)
}
/// Sets the root site in the site list.
/// If there are multiple root sites, it will return an error.
///
/// # Arguments
/// * `sites` - The list of sites
/// * `root_site` - The name of the root site
pub fn set_root_site(sites: &mut [UispSite], root_site: &str) -> Result<(), UispIntegrationError> {
if let Some(root) = sites.iter_mut().find(|s| s.name == root_site) {
root.site_type = UispSiteType::Root;
}
let number_of_roots = sites
.iter()
.filter(|s| s.site_type == UispSiteType::Root)
.count();
if number_of_roots > 1 {
error!("More than one root present in the tree! That's not going to work. Bailing.");
return Err(UispIntegrationError::NoRootSite);
} else {
info!("Single root tagged in the tree");
}
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_known_root() {
let mut cfg = Config::default();
cfg.uisp_integration.enable_uisp = true;
cfg.uisp_integration.site = "TEST".to_string();
let mut sites = vec![UispSite {
id: "TEST".to_string(),
name: "TEST".to_string(),
site_type: UispSiteType::Site,
..Default::default()
}];
let data_links = vec![];
let result = find_root_site(&cfg, &mut sites, &data_links);
assert!(result.is_ok());
assert_eq!(result.unwrap(), "TEST");
}
#[test]
fn fail_find_a_known_root() {
let mut cfg = Config::default();
cfg.uisp_integration.enable_uisp = true;
cfg.uisp_integration.site = "DOES NOT EXIST".to_string();
let mut sites = vec![UispSite {
id: "TEST".to_string(),
name: "TEST".to_string(),
site_type: UispSiteType::Site,
..Default::default()
}];
let data_links = vec![];
let result = find_root_site(&cfg, &mut sites, &data_links);
assert!(result.is_err());
assert_eq!(result.unwrap_err(), UispIntegrationError::NoRootSite);
}
#[test]
fn find_single_root_from_data_links() {
let mut cfg = Config::default();
cfg.uisp_integration.enable_uisp = true;
cfg.uisp_integration.site = String::new();
let mut sites = vec![UispSite {
id: "TEST".to_string(),
name: "TEST".to_string(),
site_type: UispSiteType::Site,
..Default::default()
}];
let data_links = vec![UispDataLink {
id: "".to_string(),
from_site_id: "TEST".to_string(),
from_site_name: "TEST".to_string(),
to_site_id: "".to_string(),
to_site_name: "".to_string(),
can_delete: false,
}];
let result = find_root_site(&cfg, &mut sites, &data_links);
assert!(result.is_ok());
assert_eq!(result.unwrap(), "TEST");
}
#[test]
fn test_inserted_internet() {
let mut cfg = Config::default();
cfg.uisp_integration.enable_uisp = true;
cfg.uisp_integration.site = String::new();
let mut sites = vec![
UispSite {
id: "TEST".to_string(),
name: "TEST".to_string(),
site_type: UispSiteType::Site,
..Default::default()
},
UispSite {
id: "TEST2".to_string(),
name: "TEST2".to_string(),
site_type: UispSiteType::Site,
..Default::default()
},
];
let data_links = vec![
UispDataLink {
id: "".to_string(),
from_site_id: "".to_string(),
to_site_id: "TEST".to_string(),
from_site_name: "".to_string(),
to_site_name: "TEST".to_string(),
can_delete: false,
},
UispDataLink {
id: "".to_string(),
from_site_id: "".to_string(),
to_site_id: "TEST2".to_string(),
from_site_name: "".to_string(),
to_site_name: "TEST2".to_string(),
can_delete: false,
},
];
let result = find_root_site(&cfg, &mut sites, &data_links);
assert!(result.is_ok());
assert!(sites.iter().any(|s| s.name == "INSERTED_INTERNET"));
}
}

View File

@ -0,0 +1,64 @@
use crate::errors::UispIntegrationError;
use csv::ReaderBuilder;
use lqos_config::Config;
use serde::Deserialize;
use std::path::Path;
use tracing::{error, info};
/// Represents a route override in the integrationUISProutes.csv file.
#[derive(Deserialize, Debug)]
pub struct RouteOverride {
/// The site to override the route from.
pub from_site: String,
/// The site to override the route to.
pub to_site: String,
/// The cost of the route.
pub cost: u32,
}
/// Attempts to load integrationUISProutes.csv to use for
/// route overrides. Returns an empty set if not found.
/// Returns an error if the file is found but cannot be read.
///
/// The file should be a CSV with the following columns:
///
/// | From Site | To Site | Cost |
/// |-----------|---------|------|
/// | Site1 | Site2 | 100 |
/// | Site2 | Site3 | 200 |
///
/// The From Site and To Site should match the name of the site in UISP.
///
/// If the file is found, the overrides will be applied to the routes
/// in the `UispSite` array by the `apply_route_overrides` function.
///
/// # Arguments
/// * `config` - The configuration
///
/// # Returns
/// * An `Ok(Vec)` of `RouteOverride` objects
/// * An `Err` if the file is found but cannot be read
pub fn get_route_overrides(config: &Config) -> Result<Vec<RouteOverride>, UispIntegrationError> {
let file_path = Path::new(&config.lqos_directory).join("integrationUISProutes.csv");
if file_path.exists() {
let reader = ReaderBuilder::new()
.comment(Some(b'#'))
.trim(csv::Trim::All)
.from_path(file_path);
if reader.is_err() {
error!("Unable to read integrationUISProutes.csv");
error!("{:?}", reader);
return Err(UispIntegrationError::CsvError);
}
let mut reader = reader.unwrap();
let mut overrides = Vec::new();
for result in reader.deserialize::<RouteOverride>().flatten() {
overrides.push(result);
}
info!("Loaded {} route overrides", overrides.len());
Ok(overrides)
} else {
info!("No integrationUISProutes.csv found - no route overrides loaded.");
Ok(Vec::new())
}
}

View File

@ -0,0 +1,178 @@
use crate::errors::UispIntegrationError;
use crate::uisp_types::{UispDevice, UispSite, UispSiteType};
use lqos_config::Config;
use serde::Serialize;
use std::path::Path;
use tracing::{error, info};
/// Represents a shaped device in the ShapedDevices.csv file.
#[derive(Serialize, Debug)]
struct ShapedDevice {
pub circuit_id: String,
pub circuit_name: String,
pub device_id: String,
pub device_name: String,
pub parent_node: String,
pub mac: String,
pub ipv4: String,
pub ipv6: String,
pub download_min: u64,
pub upload_min: u64,
pub download_max: u64,
pub upload_max: u64,
pub comment: String,
}
/// Writes the ShapedDevices.csv file for UISP
///
/// # Arguments
/// * `config` - The configuration
/// * `sites` - The list of sites
/// * `root_idx` - The index of the root site
/// * `devices` - The list of devices
pub fn write_shaped_devices(
config: &Config,
sites: &[UispSite],
root_idx: usize,
devices: &[UispDevice],
) -> Result<(), UispIntegrationError> {
let file_path = Path::new(&config.lqos_directory).join("ShapedDevices.csv");
let mut shaped_devices = Vec::new();
// Traverse
traverse(
sites,
root_idx,
0,
devices,
&mut shaped_devices,
config,
root_idx,
);
// Write the CSV
let mut writer = csv::WriterBuilder::new()
.has_headers(true)
.from_path(file_path)
.unwrap();
for d in shaped_devices.iter() {
writer.serialize(d).unwrap();
}
writer.flush().map_err(|e| {
error!("Unable to flush CSV file");
error!("{e:?}");
UispIntegrationError::CsvError
})?;
info!("Wrote {} lines to ShapedDevices.csv", shaped_devices.len());
Ok(())
}
fn traverse(
sites: &[UispSite],
idx: usize,
depth: u32,
devices: &[UispDevice],
shaped_devices: &mut Vec<ShapedDevice>,
config: &Config,
root_idx: usize,
) {
if !sites[idx].device_indices.is_empty() {
// We have devices!
if sites[idx].site_type == UispSiteType::Client {
// Add as normal clients
for device in sites[idx].device_indices.iter() {
let device = &devices[*device];
if device.has_address() {
let download_max = (sites[idx].max_down_mbps as f32
* config.uisp_integration.bandwidth_overhead_factor)
as u64;
let upload_max = (sites[idx].max_up_mbps as f32
* config.uisp_integration.bandwidth_overhead_factor)
as u64;
let download_min = (download_max as f32
* config.uisp_integration.commit_bandwidth_multiplier)
as u64;
let upload_min = (upload_max as f32
* config.uisp_integration.commit_bandwidth_multiplier)
as u64;
let sd = ShapedDevice {
circuit_id: sites[idx].id.clone(),
circuit_name: sites[idx].name.clone(),
device_id: device.id.clone(),
device_name: device.name.clone(),
parent_node: sites[sites[idx].selected_parent.unwrap()].name.clone(),
mac: device.mac.clone(),
ipv4: device.ipv4_list(),
ipv6: device.ipv6_list(),
download_min: u64::max(2, download_min),
download_max: u64::max(3, download_max),
upload_min: u64::max(2, upload_min),
upload_max: u64::max(3, upload_max),
comment: "".to_string(),
};
shaped_devices.push(sd);
}
}
} else {
// It's an infrastructure node
for device in sites[idx].device_indices.iter() {
let device = &devices[*device];
let parent_node = if idx != root_idx {
sites[idx].name.clone()
} else {
format!("{}_Infrastructure", sites[idx].name.clone())
};
if device.has_address() {
let download_max = (sites[idx].max_down_mbps as f32
* config.uisp_integration.bandwidth_overhead_factor)
as u64;
let upload_max = (sites[idx].max_up_mbps as f32
* config.uisp_integration.bandwidth_overhead_factor)
as u64;
let download_min = (download_max as f32
* config.uisp_integration.commit_bandwidth_multiplier)
as u64;
let upload_min = (upload_max as f32
* config.uisp_integration.commit_bandwidth_multiplier)
as u64;
let sd = ShapedDevice {
circuit_id: format!("{}-inf", sites[idx].id),
circuit_name: format!("{} Infrastructure", sites[idx].name),
device_id: device.id.clone(),
device_name: device.name.clone(),
parent_node,
mac: device.mac.clone(),
ipv4: device.ipv4_list(),
ipv6: device.ipv6_list(),
download_min: u64::max(2, download_min),
download_max: u64::max(3, download_max),
upload_min: u64::max(2, upload_min),
upload_max: u64::max(3, upload_max),
comment: "Infrastructure Entry".to_string(),
};
shaped_devices.push(sd);
}
}
}
}
if depth < 10 {
for (child_idx, child) in sites.iter().enumerate() {
if let Some(parent_idx) = child.selected_parent {
if parent_idx == idx {
traverse(
sites,
child_idx,
depth + 1,
devices,
shaped_devices,
config,
root_idx,
);
}
}
}
}
}

View File

@ -0,0 +1,45 @@
use crate::errors::UispIntegrationError;
use crate::uisp_types::{UispSite, UispSiteType};
/// Squashes single entry access points
///
/// This function will squash access points that have only one child site.
///
/// # Arguments
/// * `sites` - The list of sites to modify
///
/// # Returns
/// * An `Ok` if the operation was successful
pub fn squash_single_aps(sites: &mut [UispSite]) -> Result<(), UispIntegrationError> {
let mut squashable = Vec::new();
for (idx, site) in sites.iter().enumerate() {
if site.site_type == UispSiteType::AccessPoint {
let target_count = sites
.iter()
.filter(|s| s.parent_indices.contains(&idx))
.count();
if target_count == 1 && site.parent_indices.len() == 1 {
//tracing::info!("Site {} has only one child and is therefore eligible for squashing.", site.name);
squashable.push(idx);
}
}
}
for squash_idx in squashable {
sites[squash_idx].site_type = UispSiteType::SquashDeleted;
sites[squash_idx].name += " (SQUASHED)";
let up = sites[squash_idx].max_up_mbps;
let down = sites[squash_idx].max_down_mbps;
let new_parent = *sites[squash_idx].parent_indices.iter().next().unwrap();
sites.iter_mut().for_each(|s| {
if s.parent_indices.contains(&squash_idx) {
s.parent_indices.remove(&squash_idx);
s.parent_indices.insert(new_parent);
s.max_up_mbps = u32::min(up, s.max_up_mbps);
s.max_down_mbps = u32::min(down, s.max_down_mbps);
}
});
sites[squash_idx].parent_indices.clear();
}
Ok(())
}

View File

@ -0,0 +1,65 @@
use crate::errors::UispIntegrationError;
use crate::strategies::full::routes_override::RouteOverride;
use crate::uisp_types::{UispSite, UispSiteType};
/// Walks the tree to determine the best route for each site
///
/// This function will walk the tree to determine the best route for each site.
///
/// # Arguments
/// * `sites` - The list of sites
/// * `root_site` - The name of the root site
/// * `overrides` - The list of route overrides
pub fn walk_tree_for_routing(
sites: &mut Vec<UispSite>,
root_site: &str,
overrides: &Vec<RouteOverride>,
) -> Result<(), UispIntegrationError> {
if let Some(root_idx) = sites.iter().position(|s| s.name == root_site) {
let mut visited = std::collections::HashSet::new();
let current_node = root_idx;
walk_node(current_node, 10, sites, &mut visited, overrides);
} else {
tracing::error!("Unable to build a path-weights graph because I can't find the root node");
return Err(UispIntegrationError::NoRootSite);
}
// Apply the lowest weight route
for site in sites.iter_mut() {
if site.site_type != UispSiteType::Root && !site.route_weights.is_empty() {
// Sort to find the lowest exit
site.route_weights.sort_by(|a, b| a.1.cmp(&b.1));
site.selected_parent = Some(site.route_weights[0].0);
}
}
Ok(())
}
fn walk_node(
idx: usize,
weight: u32,
sites: &mut Vec<UispSite>,
visited: &mut std::collections::HashSet<usize>,
overrides: &Vec<RouteOverride>,
) {
if visited.contains(&idx) {
return;
}
visited.insert(idx);
for i in 0..sites.len() {
if sites[i].parent_indices.contains(&idx) {
let from = sites[i].name.clone();
let to = sites[idx].name.clone();
if let Some(route_override) = overrides
.iter()
.find(|o| o.from_site == from && o.to_site == to)
{
sites[i].route_weights.push((idx, route_override.cost));
} else {
sites[i].route_weights.push((idx, weight));
}
walk_node(i, weight + 10, sites, visited, overrides);
}
}
}

View File

@ -0,0 +1,48 @@
use crate::errors::UispIntegrationError;
use lqos_config::Config;
use tokio::join;
use tracing::{error, info};
use uisp::{DataLink, Device, Site};
/// Load required data from UISP, using the API.
/// Requires a valid configuration with working token data.
pub async fn load_uisp_data(
config: Config,
) -> Result<(Vec<Site>, Vec<Device>, Vec<DataLink>), UispIntegrationError> {
info!("Loading Devices, Sites and Data-Links from UISP");
let (devices, sites, data_links) = join!(
uisp::load_all_devices_with_interfaces(config.clone()),
uisp::load_all_sites(config.clone()),
uisp::load_all_data_links(config.clone()),
);
// Error Handling
if devices.is_err() {
error!("Error downloading devices list from UISP");
error!("{:?}", devices);
return Err(UispIntegrationError::UispConnectError);
}
let devices = devices.unwrap();
if sites.is_err() {
error!("Error downloading sites list from UISP");
error!("{:?}", sites);
return Err(UispIntegrationError::UispConnectError);
}
let sites = sites.unwrap();
if data_links.is_err() {
error!("Error downloading data_links list from UISP");
error!("{:?}", data_links);
return Err(UispIntegrationError::UispConnectError);
}
let data_links = data_links.unwrap();
info!(
"Loaded backing data: {} sites, {} devices, {} links",
sites.len(),
devices.len(),
data_links.len()
);
Ok((sites, devices, data_links))
}

View File

@ -0,0 +1,103 @@
use crate::uisp_types::{UispSite, UispSiteType};
use lqos_config::Config;
use tracing::warn;
use uisp::Device;
/// Counts how many devices are present at a siteId. It's a simple
/// iteration of the devices.
pub fn count_devices_in_site(site_id: &str, devices: &[Device]) -> usize {
devices
.iter()
.filter(|d| {
if let Some(site) = &d.identification.site {
if let Some(parent) = &site.parent {
if parent.id == site_id {
return true;
}
}
}
false
})
.count()
}
/// Utility function to dump the site tree to the console.
/// Useful for debugging.
pub fn print_sites(sites: &[UispSite], root_idx: usize) {
println!("{}", sites[root_idx].name);
iterate_child_sites(sites, root_idx, 2);
}
fn iterate_child_sites(sites: &[UispSite], parent: usize, indent: usize) {
sites
.iter()
.enumerate()
.filter(|(_, s)| s.selected_parent == Some(parent))
.for_each(|(i, s)| {
// Indent print
for _ in 0..indent {
print!("-");
}
s.print_tree_summary();
println!();
if indent < 20 {
iterate_child_sites(sites, i, indent + 2);
}
});
}
/// Warns if there are any sites with no parents, and promotes them to be parented off of the root
/// site.
///
/// # Arguments
/// * `sites` - The list of sites
/// * `devices_raw` - The raw device data
/// * `root_idx` - The index of the root site
/// * `config` - The configuration
pub fn warn_of_no_parents_and_promote(
sites: &mut Vec<UispSite>,
devices_raw: &[Device],
root_idx: usize,
config: &Config,
) {
let mut orphans = Vec::new();
sites
.iter()
.filter(|s| s.selected_parent.is_none())
.for_each(|s| {
if count_devices_in_site(&s.id, devices_raw) > 0 {
warn!("Site: {} has no parents", s.name);
orphans.push(s.id.clone());
}
});
// If we have orphans, promote them to be parented off of a special branch
if !orphans.is_empty() {
let orgphanage_id = sites.len();
let orphanage = UispSite {
id: "orphans".to_string(),
name: "Orphaned Nodes".to_string(),
site_type: UispSiteType::Site,
uisp_parent_id: None,
parent_indices: Default::default(),
max_down_mbps: config.queues.downlink_bandwidth_mbps,
max_up_mbps: config.queues.uplink_bandwidth_mbps,
suspended: false,
device_indices: vec![],
route_weights: vec![],
selected_parent: Some(root_idx),
};
sites.push(orphanage);
for orphan_id in orphans {
if let Some((_, site)) = sites
.iter_mut()
.enumerate()
.find(|(idx, s)| *idx != root_idx && s.id == orphan_id)
{
site.selected_parent = Some(orgphanage_id);
}
}
}
}

View File

@ -0,0 +1,28 @@
use crate::uisp_types::UispSite;
use lqos_config::Config;
/// Corrects zero capacity sites by setting their capacity to the parent's capacity.
/// If the site has no parent, the capacity is set to the default generated capacity.
///
/// # Arguments
/// * `sites` - The list of sites to correct
/// * `config` - The configuration
pub fn correct_zero_capacity_sites(sites: &mut [UispSite], config: &Config) {
for i in 0..sites.len() {
if sites[i].max_down_mbps == 0 {
if let Some(parent_idx) = sites[i].selected_parent {
sites[i].max_down_mbps = sites[parent_idx].max_down_mbps;
} else {
sites[i].max_down_mbps = config.queues.generated_pn_download_mbps;
}
}
if sites[i].max_up_mbps == 0 {
if let Some(parent_idx) = sites[i].selected_parent {
sites[i].max_up_mbps = sites[parent_idx].max_up_mbps;
} else {
sites[i].max_up_mbps = config.queues.generated_pn_upload_mbps;
}
}
}
}

View File

@ -0,0 +1,34 @@
mod flat;
mod full;
use crate::errors::UispIntegrationError;
use crate::ip_ranges::IpRanges;
use lqos_config::Config;
use tracing::{error, info};
/// Builds the network using the selected strategy.
pub async fn build_with_strategy(
config: Config,
ip_ranges: IpRanges,
) -> Result<(), UispIntegrationError> {
// Select a Strategy
match config.uisp_integration.strategy.to_lowercase().as_str() {
"flat" => {
info!("Strategy selected: flat");
flat::build_flat_network(config, ip_ranges).await?;
Ok(())
}
"full" => {
info!("Strategy selected: full");
full::build_full_network(config, ip_ranges).await?;
Ok(())
}
_ => {
error!(
"Unknown strategy: {}. Bailing.",
config.uisp_integration.strategy
);
Err(UispIntegrationError::UnknownIntegrationStrategy)
}
}
}

View File

@ -0,0 +1,8 @@
/// Detected Access Point
#[derive(Debug)]
pub struct DetectedAccessPoint {
pub site_id: String,
pub device_id: String,
pub device_name: String,
pub child_sites: Vec<String>,
}

View File

@ -0,0 +1,11 @@
mod detected_ap;
mod uisp_data_link;
mod uisp_device;
mod uisp_site;
mod uisp_site_type;
pub use detected_ap::*;
pub use uisp_data_link::*;
pub use uisp_device::*;
pub use uisp_site::*;
pub use uisp_site_type::*;

View File

@ -0,0 +1,53 @@
use uisp::DataLink;
/// Shortened/Flattened version of the UISP DataLink type.
pub struct UispDataLink {
pub id: String,
pub from_site_id: String,
pub to_site_id: String,
pub from_site_name: String,
pub to_site_name: String,
pub can_delete: bool,
}
impl UispDataLink {
/// Converts a UISP DataLink into a UispDataLink.
///
/// # Arguments
/// * `value` - The UISP DataLink to convert
pub fn from_uisp(value: &DataLink) -> Option<Self> {
let mut from_site_id = String::new();
let mut to_site_id = String::new();
let mut to_site_name = String::new();
let from_site_name = String::new();
// Obvious Site Links
if let Some(from_site) = &value.from.site {
from_site_id = from_site.identification.id.clone();
to_site_id = from_site.identification.name.clone();
}
if let Some(to_site) = &value.to.site {
to_site_id = to_site.identification.id.clone();
to_site_name = to_site.identification.name.clone();
}
// Remove any links with no site targets
if from_site_id.is_empty() || to_site_id.is_empty() {
return None;
}
// Remove any links that go to themselves
if from_site_id == to_site_id {
return None;
}
Some(Self {
id: value.id.clone(),
from_site_id,
to_site_id,
from_site_name,
to_site_name,
can_delete: value.can_delete,
})
}
}

View File

@ -0,0 +1,164 @@
use crate::ip_ranges::IpRanges;
use lqos_config::Config;
use std::collections::HashSet;
use std::net::IpAddr;
use uisp::Device;
#[derive(Debug)]
pub struct Ipv4ToIpv6 {
pub ipv4: String,
pub ipv6: String,
}
/// Trimmed UISP device for easy use
pub struct UispDevice {
pub id: String,
pub name: String,
pub mac: String,
pub site_id: String,
pub download: u32,
pub upload: u32,
pub ipv4: HashSet<String>,
pub ipv6: HashSet<String>,
}
impl UispDevice {
/// Creates a new UispDevice from a UISP device
///
/// # Arguments
/// * `device` - The device to convert
/// * `config` - The configuration
/// * `ip_ranges` - The IP ranges to use for the network
pub fn from_uisp(device: &Device, config: &Config, ip_ranges: &IpRanges, ipv4_to_v6: &[Ipv4ToIpv6]) -> Self {
let mut ipv4 = HashSet::new();
let mut ipv6 = HashSet::new();
let mac = if let Some(id) = &device.identification.mac {
id.clone()
} else {
"".to_string()
};
let mut download = config.queues.generated_pn_download_mbps;
let mut upload = config.queues.generated_pn_upload_mbps;
if let Some(overview) = &device.overview {
if let Some(dl) = overview.downlinkCapacity {
download = dl as u32 / 1000000;
}
if let Some(ul) = overview.uplinkCapacity {
upload = ul as u32 / 1000000;
}
}
// Accumulate IP address listings
if let Some(interfaces) = &device.interfaces {
for interface in interfaces.iter() {
if let Some(addr) = &interface.addresses {
for address in addr.iter() {
if let Some(address) = &address.cidr {
if address.contains(':') {
// It's IPv6
ipv6.insert(address.clone());
} else {
// It's IPv4
// We can't trust UISP to provide the correct suffix, so change that to /32
if address.contains('/') {
let splits: Vec<_> = address.split('/').collect();
ipv4.insert(format!("{}/32", splits[0]));
// Check for a Mikrotik Mapping
if let Some(mapping) = ipv4_to_v6.iter().find(|m| m.ipv4 == splits[0]) {
ipv6.insert(mapping.ipv6.clone());
}
} else {
ipv4.insert(format!("{address}/32"));
// Check for a Mikrotik Mapping
if let Some(mapping) = ipv4_to_v6.iter().find(|m| m.ipv4 == address.as_str()) {
ipv6.insert(mapping.ipv6.clone());
}
}
}
}
}
}
}
}
// Remove IP addresses that are disallowed
ipv4.retain(|ip| {
let split: Vec<_> = ip.split('/').collect();
//let subnet: u8 = split[1].parse().unwrap();
let addr: IpAddr = split[0].parse().unwrap();
ip_ranges.is_permitted(addr)
});
ipv6.retain(|ip| {
let split: Vec<_> = ip.split('/').collect();
//let subnet: u8 = split[1].parse().unwrap();
let addr: IpAddr = split[0].parse().unwrap();
ip_ranges.is_permitted(addr)
});
// Handle any "exception CPE" entries
let mut site_id = device.get_site_id().unwrap_or("".to_string());
for exception in config.uisp_integration.exception_cpes.iter() {
if exception.cpe == device.get_name().unwrap() {
site_id = exception.parent.clone();
}
}
Self {
id: device.get_id(),
name: device.get_name().unwrap(),
mac,
site_id,
upload,
download,
ipv4,
ipv6,
}
}
pub fn has_address(&self) -> bool {
!(self.ipv4.is_empty() && self.ipv6.is_empty())
}
pub fn ipv4_list(&self) -> String {
if self.ipv4.is_empty() {
return "".to_string();
}
if self.ipv4.len() == 1 {
let mut result = "".to_string();
for ip in self.ipv4.iter() {
result = ip.clone();
}
return result;
}
let mut result = "".to_string();
for ip in self.ipv4.iter() {
result += &format!("{}, ", &ip);
}
result.truncate(result.len() - 2);
result.to_string()
}
pub fn ipv6_list(&self) -> String {
if self.ipv6.is_empty() {
return "".to_string();
}
if self.ipv6.len() == 1 {
let mut result = "".to_string();
for ip in self.ipv6.iter() {
result = ip.clone();
}
return result;
}
let mut result = "".to_string();
for ip in self.ipv6.iter() {
result += &format!("{}, ", &ip);
}
result.truncate(result.len() - 2);
let result = format!("[{result}]");
result
}
}

View File

@ -0,0 +1,174 @@
use crate::uisp_types::uisp_site_type::UispSiteType;
use crate::uisp_types::DetectedAccessPoint;
use lqos_config::Config;
use std::collections::HashSet;
use tracing::warn;
use uisp::{DataLink, Device, Site};
/// Shortened/flattened version of the UISP Site type.
#[derive(Debug)]
pub struct UispSite {
pub id: String,
pub name: String,
pub site_type: UispSiteType,
pub uisp_parent_id: Option<String>,
pub parent_indices: HashSet<usize>,
pub max_down_mbps: u32,
pub max_up_mbps: u32,
pub suspended: bool,
pub device_indices: Vec<usize>,
pub route_weights: Vec<(usize, u32)>,
pub selected_parent: Option<usize>,
}
impl Default for UispSite {
fn default() -> Self {
Self {
id: "".to_string(),
name: "".to_string(),
site_type: UispSiteType::Site,
uisp_parent_id: None,
parent_indices: Default::default(),
max_down_mbps: 0,
max_up_mbps: 0,
suspended: false,
device_indices: Vec::new(),
route_weights: Vec::new(),
selected_parent: None,
}
}
}
impl UispSite {
/// Converts a UISP Site into a UispSite.
pub fn from_uisp(value: &Site, config: &Config) -> Self {
let mut uisp_parent_id = None;
if let Some(id) = &value.identification {
if let Some(parent) = &id.parent {
if let Some(pid) = &parent.id {
uisp_parent_id = Some(pid.clone());
}
}
if let Some(status) = &id.status {
if status == "disconnected" {
warn!("Site {:?} is disconnected.", id.name);
}
}
}
let (mut max_down_mbps, mut max_up_mbps) = value.qos(
config.queues.generated_pn_download_mbps,
config.queues.generated_pn_upload_mbps,
);
let suspended = value.is_suspended();
if suspended {
match config.uisp_integration.suspended_strategy.as_str() {
"slow" => {
warn!(
"{} is suspended. Setting a slow speed.",
value.name_or_blank()
);
max_down_mbps = 1;
max_up_mbps = 1;
}
_ => warn!(
"{} is suspended. No strategy is set, leaving at full speed.",
value.name_or_blank()
),
}
}
Self {
id: value.id.clone(),
name: value.name_or_blank(),
site_type: UispSiteType::from_uisp_record(value).unwrap(),
parent_indices: HashSet::new(),
uisp_parent_id,
max_down_mbps,
max_up_mbps,
suspended,
..Default::default()
}
}
pub fn find_aps(
&self,
devices: &[Device],
data_links: &[DataLink],
sites: &[Site],
) -> Vec<DetectedAccessPoint> {
let mut links = Vec::new();
for device in devices.iter() {
if let Some(device_site) = device.get_site_id() {
if device_site == self.id {
// We're in the correct site, now look for anything that
// links to/from this device
let potential_ap_id = device.get_id();
let mut potential_ap = DetectedAccessPoint {
site_id: self.id.clone(),
device_id: potential_ap_id.clone(),
device_name: device.get_name().unwrap_or(String::new()),
child_sites: vec![],
};
for dl in data_links.iter() {
// The "I'm the FROM device case"
if let Some(from_device) = &dl.from.device {
if from_device.identification.id == potential_ap_id {
if let Some(to_site) = &dl.to.site {
if to_site.identification.id != self.id {
// We have a data link from this device that goes to
// another site.
if let Some(remote_site) =
sites.iter().find(|s| s.id == to_site.identification.id)
{
potential_ap.child_sites.push(remote_site.id.clone());
}
}
}
}
}
// The "I'm the TO the device case"
if let Some(to_device) = &dl.to.device {
if to_device.identification.id == potential_ap_id {
if let Some(from_site) = &dl.from.site {
if from_site.identification.id != self.id {
// We have a data link from this device that goes to
// another site.
if let Some(remote_site) = sites
.iter()
.find(|s| s.id == from_site.identification.id)
{
potential_ap.child_sites.push(remote_site.id.clone());
}
}
}
}
}
}
if !potential_ap.child_sites.is_empty() {
links.push(potential_ap);
}
}
}
}
links
}
pub fn print_tree_summary(&self) {
print!(
"{} ({}) {}/{} Mbps",
self.name, self.site_type, self.max_down_mbps, self.max_up_mbps
);
if self.suspended {
print!(" (SUSPENDED)");
}
if !self.device_indices.is_empty() {
print!(" [{} devices]", self.device_indices.len());
}
}
}

View File

@ -0,0 +1,47 @@
use crate::errors::UispIntegrationError;
use std::fmt::{Display, Formatter};
use tracing::error;
use uisp::Site;
/// Defines the types of sites found in the UISP Tree
#[derive(Debug, PartialEq)]
pub enum UispSiteType {
Site,
Client,
ClientWithChildren,
AccessPoint,
Root,
SquashDeleted,
}
impl Display for UispSiteType {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match &self {
Self::Site => write!(f, "Site"),
Self::Client => write!(f, "Client"),
Self::ClientWithChildren => write!(f, "GeneratedNode"),
Self::AccessPoint => write!(f, "AP"),
Self::Root => write!(f, "Root"),
Self::SquashDeleted => write!(f, "SquashDeleted"),
}
}
}
impl UispSiteType {
/// Converts a UISP site record into a UispSiteType
pub fn from_uisp_record(site: &Site) -> Result<Self, UispIntegrationError> {
if let Some(id) = &site.identification {
if let Some(t) = &id.site_type {
return match t.as_str() {
"site" => Ok(Self::Site),
"endpoint" => Ok(Self::Client),
_ => {
error!("Unknown site type: {t}");
Err(UispIntegrationError::UnknownSiteType)
}
};
}
}
Err(UispIntegrationError::UnknownSiteType)
}
}