Regenerate cargo lock

This commit is contained in:
Herbert Wolverson
2024-05-13 14:24:19 -05:00
28 changed files with 2048 additions and 713 deletions

View File

@@ -45,7 +45,16 @@ def buildFlatGraph():
if (site['qos']['downloadSpeed']) and (site['qos']['uploadSpeed']):
download = int(round(site['qos']['downloadSpeed']/1000000))
upload = int(round(site['qos']['uploadSpeed']/1000000))
if site['identification'] is not None and site['identification']['suspended'] is not None and site['identification']['suspended'] == True:
if uisp_suspended_strategy() == "ignore":
print("WARNING: Site " + name + " is suspended")
continue
if uisp_suspended_strategy() == "slow":
print("WARNING: Site " + name + " is suspended")
download = 1
upload = 1
if site['identification']['status'] == "disconnected":
print("WARNING: Site " + name + " is disconnected")
node = NetworkNode(id=id, displayName=name, type=NodeType.client, download=download, upload=upload, address=address, customerName=customerName)
net.addRawNode(node)
for device in devices:
@@ -290,43 +299,53 @@ def loadRoutingOverrides():
def findNodesBranchedOffPtMP(siteList, dataLinks, sites, rootSite, foundAirFibersBySite):
nodeOffPtMP = {}
for site in siteList:
id = site['id']
name = site['name']
if id != rootSite['id']:
if id not in foundAirFibersBySite:
trueParent = findInSiteListById(siteList, id)['parent']
#parent = findInSiteListById(siteList, id)['parent']
isTrueSite = False
for siteItem in sites:
if siteItem['identification']['id'] == id:
if siteItem['identification']['type'] == 'site':
isTrueSite = True
if isTrueSite:
if site['parent'] is not None:
parent = site['parent']
for link in dataLinks:
if (link['to']['site'] is not None) and (link['to']['site']['identification'] is not None):
if ('identification' in link['to']['site']) and (link['to']['site']['identification'] is not None) and link['from'] is not None and link['from']['site'] is not None and link['from']['site']['identification'] is not None:
# Respect parent defined by topology and overrides
if link['from']['site']['identification']['id'] == trueParent:
if link['to']['site']['identification']['id'] == id:
if (link['from']['device']['overview']['wirelessMode'] == 'ap-ptmp') or (link['from']['device']['overview']['wirelessMode'] == 'ap'):
if 'overview' in link['to']['device']:
if ('downlinkCapacity' in link['to']['device']['overview']) and ('uplinkCapacity' in link['to']['device']['overview']):
if (link['to']['device']['overview']['downlinkCapacity'] is not None) and (link['to']['device']['overview']['uplinkCapacity'] is not None):
apID = link['from']['device']['identification']['id']
# Capacity of the PtMP client radio feeding the PoP will be used as the site bandwidth limit
download = int(round(link['to']['device']['overview']['downlinkCapacity']/1000000))
upload = int(round(link['to']['device']['overview']['uplinkCapacity']/1000000))
nodeOffPtMP[id] = {'download': download,
'upload': upload,
parent: apID
}
if use_ptmp_as_parent():
site['parent'] = apID
print('Site ' + name + ' will use PtMP AP as parent.')
if use_ptmp_as_parent():
for site in siteList:
id = site['id']
name = site['name']
if id != rootSite['id']:
if id not in foundAirFibersBySite:
trueParent = findInSiteListById(siteList, id)['parent']
#parent = findInSiteListById(siteList, id)['parent']
isTrueSite = False
for siteItem in sites:
if siteItem['identification']['id'] == id:
if siteItem['identification']['type'] == 'site':
isTrueSite = True
if isTrueSite:
if site['parent'] is not None:
parent = site['parent']
# Check how many links connect site to parent
# If only one link conencts them, we can then consdier if it's a node off PtMP
howManyLinksToParent = 0
for link in dataLinks:
if (link['to']['site'] is not None) and (link['to']['site']['identification'] is not None):
if ('identification' in link['to']['site']) and (link['to']['site']['identification'] is not None) and link['from'] is not None and link['from']['site'] is not None and link['from']['site']['identification'] is not None:
if link['from']['site']['identification']['id'] == trueParent:
if link['to']['site']['identification']['id'] == id:
howManyLinksToParent += 1
if howManyLinksToParent == 1:
for link in dataLinks:
if (link['to']['site'] is not None) and (link['to']['site']['identification'] is not None):
if ('identification' in link['to']['site']) and (link['to']['site']['identification'] is not None) and link['from'] is not None and link['from']['site'] is not None and link['from']['site']['identification'] is not None:
# Respect parent defined by topology and overrides
if link['from']['site']['identification']['id'] == trueParent:
if link['to']['site']['identification']['id'] == id:
if (link['from']['device']['overview']['wirelessMode'] == 'ap-ptmp') or (link['from']['device']['overview']['wirelessMode'] == 'ap'):
if 'overview' in link['to']['device']:
if ('downlinkCapacity' in link['to']['device']['overview']) and ('uplinkCapacity' in link['to']['device']['overview']):
if (link['to']['device']['overview']['downlinkCapacity'] is not None) and (link['to']['device']['overview']['uplinkCapacity'] is not None):
apID = link['from']['device']['identification']['id']
# Capacity of the PtMP client radio feeding the PoP will be used as the site bandwidth limit
download = int(round(link['to']['device']['overview']['downlinkCapacity']/1000000))
upload = int(round(link['to']['device']['overview']['uplinkCapacity']/1000000))
nodeOffPtMP[id] = {'download': download,
'upload': upload,
parent: apID
}
if use_ptmp_as_parent():
site['parent'] = apID
print('Site ' + name + ' will use PtMP AP ' + link['from']['device']['identification']['name'] + ' as parent from site ' + link['from']['site']['identification']['name'])
return siteList, nodeOffPtMP
def handleMultipleInternetNodes(sites, dataLinks, uispSite):

View File

@@ -53,6 +53,15 @@ uisp_reporting_interval_seconds = 300
ignore_subnets = []
allow_subnets = [ "172.16.0.0/12", "10.0.0.0/8", "100.64.0.0/16", "192.168.0.0/16" ]
[flows]
# You need to change the netflow_port, netflow_ip to your receiver (and uncomment them), and netflow_version must be either 5 (IPv4 only, faster) or 9 (IPv6 and 4, much larger packets)
flow_timeout_seconds = 30
netflow_enabled = false
# netflow_port = 2055
# netflow_ip = "127.0.0.1"
# netflow_version = 9
do_not_track_subnets = [ "192.168.66.0/24" ]
[integration_common]
circuit_name_as_address = false
always_overwrite_network_json = false
@@ -99,4 +108,4 @@ enable_influxdb = false
url = "http://localhost:8086"
org = "libreqos"
bucket = "Your ISP Name Here"
token = ""
token = ""

516
src/rust/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize};
/// One or more `BusRequest` objects must be included in a `BusSession`
/// request. Each `BusRequest` represents a single request for action
/// or data.
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub enum BusRequest {
/// A generic "is it alive?" test. Returns an `Ack`.
Ping,
@@ -113,6 +113,9 @@ pub enum BusRequest {
/// Requests a real-time adjustment of the `lqosd` tuning settings
UpdateLqosDTuning(u64, Tunables),
/// Requests that the configuration be updated
UpdateLqosdConfig(Box<lqos_config::Config>),
/// Request that we start watching a circuit's queue
WatchQueue(String),

View File

@@ -1,6 +1,8 @@
//! Manages the `/etc/lqos.conf` file.
mod etclqos_migration;
use std::path::Path;
use self::migration::migrate_if_needed;
pub use self::v15::Config;
pub use etclqos_migration::*;
@@ -70,6 +72,38 @@ pub fn enable_long_term_stats(license_key: String) -> Result<(), LibreQoSConfigE
Ok(())
}
/// Update the configuration on disk
pub fn update_config(new_config: &Config) -> Result<(), LibreQoSConfigError> {
log::info!("Updating stored configuration");
let mut lock = CONFIG.lock().unwrap();
*lock = Some(new_config.clone());
// Does the configuration exist?
let config_path = Path::new("/etc/lqos.conf");
if config_path.exists() {
let backup_path = Path::new("/etc/lqos.conf.webbackup");
std::fs::copy(config_path, backup_path)
.map_err(|e| {
log::error!("Unable to create backup configuration: {e:?}");
LibreQoSConfigError::CannotCopy
})?;
}
// Serialize the new one
let serialized = toml::to_string_pretty(new_config)
.map_err(|e| {
log::error!("Unable to serialize new configuration to TOML: {e:?}");
LibreQoSConfigError::SerializeError
})?;
std::fs::write(config_path, serialized)
.map_err(|e| {
log::error!("Unable to write new configuration: {e:?}");
LibreQoSConfigError::CannotWrite
})?;
Ok(())
}
#[derive(Debug, Error)]
pub enum LibreQoSConfigError {
#[error("Unable to read /etc/lqos.conf. See other errors for details.")]
@@ -90,4 +124,6 @@ pub enum LibreQoSConfigError {
CannotWrite,
#[error("Unable to read IP")]
CannotReadIP,
#[error("Unable to serialize config")]
SerializeError,
}

View File

@@ -3,7 +3,7 @@
use serde::{Deserialize, Serialize};
#[derive(Clone, Serialize, Deserialize, Debug)]
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)]
pub struct UsageStats {
/// Are we allowed to send stats at all?
pub send_anonymous: bool,

View File

@@ -5,7 +5,7 @@
use serde::{Deserialize, Serialize};
/// Represents a two-interface bridge configuration.
#[derive(Clone, Serialize, Deserialize, Debug)]
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)]
pub struct BridgeConfig {
/// Use the XDP-accelerated bridge?
pub use_xdp_bridge: bool,
@@ -27,7 +27,7 @@ impl Default for BridgeConfig {
}
}
#[derive(Clone, Serialize, Deserialize, Debug)]
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)]
pub struct SingleInterfaceConfig {
/// The name of the interface
pub interface: String,

View File

@@ -4,7 +4,7 @@
use serde::{Serialize, Deserialize};
#[derive(Clone, Serialize, Deserialize, Debug)]
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)]
pub struct FlowConfig {
pub flow_timeout_seconds: u64,
pub netflow_enabled: bool,

View File

@@ -1,6 +1,6 @@
use serde::{Serialize, Deserialize};
#[derive(Clone, Serialize, Deserialize, Debug)]
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)]
pub struct InfluxDbConfig {
pub enable_influxdb: bool,
pub url: String,

View File

@@ -2,7 +2,7 @@
use serde::{Deserialize, Serialize};
#[derive(Clone, Serialize, Deserialize, Debug)]
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)]
pub struct IntegrationConfig {
/// Replace names with addresses?
pub circuit_name_as_address: bool,

View File

@@ -1,6 +1,6 @@
use serde::{Serialize, Deserialize};
#[derive(Clone, Serialize, Deserialize, Debug)]
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)]
pub struct IpRanges {
pub ignore_subnets: Vec<String>,
pub allow_subnets: Vec<String>,

View File

@@ -2,7 +2,7 @@
use serde::{Serialize, Deserialize};
#[derive(Serialize, Deserialize, Clone, Debug)]
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct LongTermStats {
/// Should we store long-term stats at all?
pub gather_stats: bool,

View File

@@ -1,6 +1,6 @@
use serde::{Serialize, Deserialize};
#[derive(Clone, Debug, Serialize, Deserialize)]
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct PowercodeIntegration {
pub enable_powercode: bool,
pub powercode_api_key: String,

View File

@@ -2,7 +2,7 @@
use serde::{Serialize, Deserialize};
#[derive(Clone, Serialize, Deserialize, Debug)]
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)]
pub struct QueueConfig {
/// Which SQM to use by default
pub default_sqm: String,

View File

@@ -1,6 +1,6 @@
use serde::{Serialize, Deserialize};
#[derive(Clone, Debug, Serialize, Deserialize)]
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct SonarIntegration {
pub enable_sonar: bool,
pub sonar_api_url: String,

View File

@@ -1,6 +1,6 @@
use serde::{Serialize, Deserialize};
#[derive(Clone, Debug, Serialize, Deserialize)]
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct SplynxIntegration {
pub enable_spylnx: bool,
pub api_key: String,

View File

@@ -8,7 +8,7 @@ use sha2::Digest;
use uuid::Uuid;
/// Top-level configuration file for LibreQoS.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct Config {
/// Version number for the configuration file.
/// This will be set to "1.5". Versioning will make

View File

@@ -4,7 +4,7 @@ use serde::{Deserialize, Serialize};
/// Represents a set of `sysctl` and `ethtool` tweaks that may be
/// applied (in place of the previous version's offload service)
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)]
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)]
pub struct Tunables {
/// Should the `irq_balance` system service be stopped?
pub stop_irq_balance: bool,

View File

@@ -1,6 +1,6 @@
use serde::{Serialize, Deserialize};
#[derive(Clone, Debug, Serialize, Deserialize)]
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct UispIntegration {
pub enable_uisp: bool,
pub token: String,
@@ -18,7 +18,7 @@ pub struct UispIntegration {
pub use_ptmp_as_parent: bool,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct ExceptionCpe {
pub cpe: String,
pub parent: String,

View File

@@ -13,7 +13,7 @@ mod program_control;
mod shaped_devices;
pub use authentication::{UserRole, WebUsers};
pub use etc::{load_config, Config, enable_long_term_stats, Tunables, BridgeConfig};
pub use etc::{load_config, Config, enable_long_term_stats, Tunables, BridgeConfig, update_config};
pub use network_json::{NetworkJson, NetworkJsonNode, NetworkJsonTransport};
pub use program_control::load_libreqos;
pub use shaped_devices::{ConfigShapedDevices, ShapedDevice};

View File

@@ -110,6 +110,13 @@ impl ConfigShapedDevices {
Ok(Self { devices, trie })
}
/// Replace the current shaped devices list with a new one
pub fn replace_with_new_data(&mut self, devices: Vec<ShapedDevice>) {
self.devices = devices;
log::info!("{:?}", self.devices);
self.trie = ConfigShapedDevices::make_trie(&self.devices);
}
fn make_trie(
devices: &[ShapedDevice],
) -> ip_network_table::IpNetworkTable<usize> {
@@ -156,6 +163,7 @@ impl ConfigShapedDevices {
error!("Unable to write ShapedDevices.csv. Permissions?");
return Err(ShapedDevicesError::WriteFail);
}
//println!("Would write to file: {}", csv);
Ok(())
}
}

View File

@@ -1,8 +1,10 @@
use crate::{auth_guard::AuthGuard, cache_control::NoCache};
use default_net::get_interfaces;
use lqos_bus::{bus_request, BusRequest, BusResponse};
use lqos_config::{Tunables, Config};
use rocket::{fs::NamedFile, serde::{json::Json, Serialize}};
use lqos_config::{Tunables, Config, ShapedDevice};
use rocket::{fs::NamedFile, serde::{json::Json, Serialize, Deserialize}};
use rocket::serde::json::Value;
use crate::tracker::SHAPED_DEVICES;
// Note that NoCache can be replaced with a cache option
// once the design work is complete.
@@ -39,6 +41,54 @@ pub async fn get_current_lqosd_config(
NoCache::new(Json(config))
}
#[post("/api/update_config", data = "<data>")]
pub async fn update_lqosd_config(
data: Json<Config>
) -> String {
let config: Config = (*data).clone();
bus_request(vec![BusRequest::UpdateLqosdConfig(Box::new(config))])
.await
.unwrap();
"Ok".to_string()
}
#[derive(Deserialize, Clone)]
#[serde(crate = "rocket::serde")]
pub struct NetworkAndDevices {
shaped_devices: Vec<ShapedDevice>,
network_json: Value,
}
#[post("/api/update_network_and_devices", data = "<data>")]
pub async fn update_network_and_devices(
data: Json<NetworkAndDevices>
) -> String {
let config = lqos_config::load_config().unwrap();
// Save network.json
let serialized_string = rocket::serde::json::to_pretty_string(&data.network_json).unwrap();
let net_json_path = std::path::Path::new(&config.lqos_directory).join("network.json");
let net_json_backup_path = std::path::Path::new(&config.lqos_directory).join("network.json.backup");
if net_json_path.exists() {
// Make a backup
std::fs::copy(&net_json_path, net_json_backup_path).unwrap();
}
std::fs::write(net_json_path, serialized_string).unwrap();
// Save the Shaped Devices
let sd_path = std::path::Path::new(&config.lqos_directory).join("ShapedDevices.csv");
let sd_backup_path = std::path::Path::new(&config.lqos_directory).join("ShapedDevices.csv.backup");
if sd_path.exists() {
std::fs::copy(&sd_path, sd_backup_path).unwrap();
}
let mut lock = SHAPED_DEVICES.write().unwrap();
lock.replace_with_new_data(data.shaped_devices.clone());
println!("{:?}", lock.devices);
lock.write_csv(&format!("{}/ShapedDevices.csv", config.lqos_directory)).unwrap();
"Ok".to_string()
}
#[post("/api/lqos_tuning/<period>", data = "<tuning>")]
pub async fn update_lqos_tuning(
auth: AuthGuard,

View File

@@ -86,7 +86,9 @@ fn rocket() -> _ {
//config_control::get_current_python_config,
config_control::get_current_lqosd_config,
//config_control::update_python_config,
config_control::update_network_and_devices,
config_control::update_lqos_tuning,
config_control::update_lqosd_config,
auth_guard::create_first_user,
auth_guard::login,
auth_guard::admin_check,
@@ -97,6 +99,7 @@ fn rocket() -> _ {
network_tree::network_tree_summary,
network_tree::node_names,
network_tree::funnel_for_queue,
network_tree::get_network_json,
config_control::stats,
// Supporting files
static_pages::bootsrap_css,

View File

@@ -6,6 +6,7 @@ use rocket::{
fs::NamedFile,
serde::{json::Json, Serialize, msgpack::MsgPack},
};
use rocket::serde::json::Value;
use crate::{cache_control::NoCache, tracker::SHAPED_DEVICES};
@@ -129,3 +130,17 @@ pub async fn funnel_for_queue(
}
NoCache::new(MsgPack(result))
}
#[get("/api/network_json")]
pub async fn get_network_json() -> NoCache<Json<Value>> {
if let Ok(config) = lqos_config::load_config() {
let path = std::path::Path::new(&config.lqos_directory).join("network.json");
if path.exists() {
let raw = std::fs::read_to_string(path).unwrap();
let json: Value = rocket::serde::json::from_str(&raw).unwrap();
return NoCache::new(Json(json));
}
}
NoCache::new(Json(Value::String("Not done yet".to_string())))
}

File diff suppressed because it is too large Load Diff

View File

@@ -18,4 +18,5 @@
.row220 { height: 220px; }
.redact { font-display: unset; }
footer > a { color: white; }
footer { color: white; font-style: italic; }
footer { color: white; font-style: italic; }
.invalid { background-color: #ffdddd }

View File

@@ -178,6 +178,13 @@ fn handle_bus_requests(
lqos_bus::BusResponse::Ack
}
BusRequest::UpdateLqosDTuning(..) => tuning::tune_lqosd_from_bus(req),
BusRequest::UpdateLqosdConfig(config) => {
let result = lqos_config::update_config(config);
if result.is_err() {
log::error!("Error updating config: {:?}", result);
}
BusResponse::Ack
},
#[cfg(feature = "equinix_tests")]
BusRequest::RequestLqosEquinixTest => lqos_daht_test::lqos_daht_test(),
BusRequest::ValidateShapedDevicesCsv => {

View File

@@ -1,9 +1,9 @@
import time
import datetime
from LibreQoS import refreshShapers, refreshShapersUpdateOnly
#from graphInfluxDB import refreshBandwidthGraphs, refreshLatencyGraphs
from graphInfluxDB import refreshBandwidthGraphs, refreshLatencyGraphs
from liblqos_python import automatic_import_uisp, automatic_import_splynx, queue_refresh_interval_mins, \
automatic_import_powercode, automatic_import_sonar
automatic_import_powercode, automatic_import_sonar, influx_db_enabled
if automatic_import_uisp():
from integrationUISP import importFromUISP
if automatic_import_splynx():
@@ -39,15 +39,15 @@ def importFromCRM():
except:
print("Failed to import from Sonar")
#def graphHandler():
# try:
# refreshBandwidthGraphs()
# except:
# print("Failed to update bandwidth graphs")
# try:
# refreshLatencyGraphs()
# except:
# print("Failed to update latency graphs")
def graphHandler():
try:
refreshBandwidthGraphs()
except:
print("Failed to update bandwidth graphs")
try:
refreshLatencyGraphs()
except:
print("Failed to update latency graphs")
def importAndShapeFullReload():
importFromCRM()
@@ -62,7 +62,7 @@ if __name__ == '__main__':
ads.add_job(importAndShapePartialReload, 'interval', minutes=queue_refresh_interval_mins(), max_instances=1)
#if influxDBEnabled:
# ads.add_job(graphHandler, 'interval', seconds=10, max_instances=1)
if influx_db_enabled():
ads.add_job(graphHandler, 'interval', seconds=10, max_instances=1)
ads.start()