Merge pull request #616 from LibreQoE/embed_python

No More Embed python version requirement
This commit is contained in:
Robert Chacón
2024-12-20 23:32:39 -08:00
committed by GitHub
10 changed files with 238 additions and 280 deletions

View File

@@ -60,7 +60,6 @@ pushd rust > /dev/null || exit
#cargo clean
cargo build --all --release
popd > /dev/null || exit
LINKED_PYTHON=$(ldd rust/target/release/lqosd | grep libpython | sed -e '/^[^\t]/ d' | sed -e 's/\t//' | sed -e 's/.*=..//' | sed -e 's/ (0.*)//')
# Create the post-installation file
pushd "$DEBIAN_DIR" > /dev/null || exit
@@ -95,17 +94,6 @@ echo "/bin/systemctl enable lqosd lqos_scheduler" >> postinst
echo "/bin/systemctl start lqosd" >> postinst
echo "/bin/systemctl start lqos_scheduler" >> postinst
echo "popd" >> postinst
# Attempting to fixup versioning issues with libpython.
# This requires that you already have LibreQoS installed.
echo "if ! test -f $LINKED_PYTHON; then" >> postinst
echo " if test -f /lib/x86_64-linux-gnu/libpython3.12.so.1.0; then" >> postinst
echo " ln -s /lib/x86_64-linux-gnu/libpython3.12.so.1.0 $LINKED_PYTHON" >> postinst
echo " fi" >> postinst
echo " if test -f /lib/x86_64-linux-gnu/libpython3.11.so.1.0; then" >> postinst
echo " ln -s /lib/x86_64-linux-gnu/libpython3.11.so.1.0 $LINKED_PYTHON" >> postinst
echo " fi" >> postinst
echo "fi" >> postinst
# End of symlink insanity
chmod a+x postinst
# Uninstall Script

View File

@@ -40,7 +40,7 @@ rustup update
echo "Please wait while the system is compiled. Service will not be interrupted during this stage."
PROGS="lqosd lqtop xdp_iphash_to_cpu_cmdline xdp_pping lqusers lqos_map_perf uisp_integration lqos_support_tool"
mkdir -p bin/static
pushd rust > /dev/null
pushd rust > /dev/null || exit
#cargo clean
for prog in $PROGS
do
@@ -55,44 +55,46 @@ do
FEATURE=""
fi
echo "Building lqosd"
pushd lqosd > /dev/null
pushd lqosd > /dev/null || exit
cargo build $BUILD_FLAGS $FEATURE
if [ $? -ne 0 ]; then
echo "Cargo build failed. Exiting with code 1."
exit 1
fi
popd > /dev/null
popd > /dev/null || exit
else
pushd $prog > /dev/null
pushd $prog > /dev/null || exit
cargo build $BUILD_FLAGS
if [ $? -ne 0 ]; then
echo "Cargo build failed. Exiting with code 1."
exit 1
fi
popd || exit
fi
done
popd > /dev/null
popd > /dev/null || exit
echo "Installing new binaries into bin folder."
pushd rust > /dev/null || exit
for prog in $PROGS
do
echo "Installing $prog in bin folder"
cp target/$TARGET/$prog ../bin/$prog.new
cp target/$TARGET/$prog ../bin/$prog.new || exit
# Use a move to avoid file locking
mv ../bin/$prog.new ../bin/$prog
mv ../bin/$prog.new ../bin/$prog || exit
done
popd > /dev/null
popd > /dev/null || exit
# Copy the node manager's static web content
mkdir -p bin/static2/vendor
pushd rust/lqosd > /dev/null
pushd rust/lqosd > /dev/null || exit
./copy_files.sh
popd > /dev/null
popd > /dev/null || exit
# Copy the Python library for LibreQoS.py et al.
pushd rust/lqos_python > /dev/null
pushd rust/lqos_python > /dev/null || exit
cargo build $BUILD_FLAGS
popd > /dev/null
popd > /dev/null || exit
cp rust/target/$TARGET/liblqos_python.so ./liblqos_python.so.new
mv liblqos_python.so.new liblqos_python.so

85
src/configMigrator.py Normal file
View File

@@ -0,0 +1,85 @@
#!/usr/bin/python3
import ispConfig
import json
# Function that tries to retrieve a value from ispConfig and returns it if it exists, otherwise returns a default value
def getIspConfigValue(value, defaultValue):
try:
return getattr(ispConfig, value)
except AttributeError:
return defaultValue
oldConfig = {
'sqm': getIspConfigValue('sqm', 'cake diffserv4'),
'monitorOnlyMode': getIspConfigValue('monitorOnlyMode', False),
'upstreamBandwidthCapacityDownloadMbps': getIspConfigValue('upstreamBandwidthCapacityDownloadMbps', 1000),
'upstreamBandwidthCapacityUploadMbps': getIspConfigValue('upstreamBandwidthCapacityUploadMbps', 1000),
'generatedPNDownloadMbps': getIspConfigValue('generatedPNDownloadMbps', 1000),
'generatedPNUploadMbps': getIspConfigValue('generatedPNUploadMbps', 1000),
'interfaceA': getIspConfigValue('interfaceA', 'veth_tointernal'),
'interfaceB': getIspConfigValue('interfaceB', 'veth_toexternal'),
'queueRefreshIntervalMins': getIspConfigValue('queueRefreshIntervalMins', 30),
'OnAStick': getIspConfigValue('OnAStick', False),
'StickVlanA': getIspConfigValue('StickVlanA', 0),
'StickVlanB': getIspConfigValue('StickVlanB', 0),
'enableActualShellCommands': getIspConfigValue('enableActualShellCommands', True),
'runShellCommandsAsSudo': getIspConfigValue('runShellCommandsAsSudo', False),
'queuesAvailableOverride': getIspConfigValue('queuesAvailableOverride', 0),
'useBinPackingToBalanceCPU': getIspConfigValue('useBinPackingToBalanceCPU', False),
# Influx
'influxEnabled': getIspConfigValue('influxDBEnabled', False),
'influxDBurl': getIspConfigValue('influxDBurl', 'http://localhost:8086'),
'influxDBBucket': getIspConfigValue('influxDBBucket', 'libreqos'),
'influxDBOrg': getIspConfigValue('influxDBOrg', 'libreqos'),
'influxDBtoken': getIspConfigValue('influxDBtoken', ''),
# Common
'circuitNameUseAddress': getIspConfigValue('circuitNameUseAddress', True),
'overwriteNetworkJSONalways': getIspConfigValue('overwriteNetworkJSONalways', False),
'ignoreSubnets': getIspConfigValue('ignoreSubnets', ["192.168.0.0/16"]),
'allowedSubnets': getIspConfigValue('allowedSubnets', ["100.64.0.0/10"]),
'excludeSites': getIspConfigValue('excludeSites', []),
'findIPv6usingMikrotikAPI': getIspConfigValue('findIPv6usingMikrotikAPI', False),
# Spylnx
'automaticImportSplynx': getIspConfigValue('automaticImportSplynx', False),
'splynx_api_key': getIspConfigValue('splynx_api_key', ''),
'splynx_api_secret': getIspConfigValue('splynx_api_secret', ''),
'splynx_api_url': getIspConfigValue('splynx_api_url', 'https://splynx.example.com/api/v1/'),
# UISP
'automaticImportUISP': getIspConfigValue('automaticImportUISP', False),
'uispAuthToken': getIspConfigValue('uispAuthToken', ''),
'UISPbaseURL': getIspConfigValue('UISPbaseURL', 'https://unms.example.com'),
'uispSite': getIspConfigValue('uispSite', 'Main Site'),
'uispStrategy': getIspConfigValue('uispStrategy', 'full'),
'uispSuspendedStrategy': getIspConfigValue('uispSuspendedStrategy', 'none'),
'airMax_capacity': getIspConfigValue('airMax_capacity', 0.65),
'ltu_capacity': getIspConfigValue('ltu_capacity', 0.90),
'bandwidthOverheadFactor': getIspConfigValue('bandwidthOverheadFactor', 1.0),
'committedBandwidthMultiplier': getIspConfigValue('committedBandwidthMultiplier', 0.98),
'exceptionCPEs': getIspConfigValue('exceptionCPEs', {}),
# API
'apiUsername': getIspConfigValue('apiUsername', 'testUser'),
'apiPassword': getIspConfigValue('apiPassword', 'testPassword'),
'apiHostIP': getIspConfigValue('apiHostIP', '127.0.0.1'),
'apiHostPost': getIspConfigValue('apiHostPost', 5000),
# Powercode
'automaticImportPowercode': getIspConfigValue('automaticImportPowercode', False),
'powercode_api_key': getIspConfigValue('powercode_api_key', ''),
'powercode_api_url': getIspConfigValue('powercode_api_url', 'https://powercode.example.com/api/v1/'),
# Sonar
'automaticImportSonar': getIspConfigValue('automaticImportSonar', False),
'sonar_api_key': getIspConfigValue('sonar_api_key', ''),
'sonar_api_url': getIspConfigValue('sonar_api_url', 'https://sonar.example.com/api/v1/'),
'snmp_community': getIspConfigValue('snmp_community', 'public'),
'sonar_active_status_ids': getIspConfigValue('sonar_active_status_ids', []),
'sonar_airmax_ap_model_ids': getIspConfigValue('sonar_airmax_ap_model_ids', []),
'sonar_ltu_ap_model_ids': getIspConfigValue('sonar_ltu_ap_model_ids', []),
}
print(json.dumps(oldConfig))

View File

@@ -77,5 +77,12 @@ def pullMikrotikIPv6_Mock(CsvPath):
return "{\n\"172.29.200.2\": \"2602:fdca:800:1500::/56\"\n}"
if __name__ == '__main__':
print("Mikrotik IPv6 Finder")
# If the first argument is a string, that's the path to the csv file
import sys
if len(sys.argv) > 1:
CsvPath = sys.argv[1]
print(pullMikrotikIPv6(CsvPath))
else:
print("Mikrotik IPv6 Finder")
#print(pullMikrotikIPv6())

2
src/rust/Cargo.lock generated
View File

@@ -1923,7 +1923,6 @@ dependencies = [
"ip_network_table",
"lqos_utils",
"once_cell",
"pyo3",
"serde",
"serde_json",
"sha2",
@@ -3880,7 +3879,6 @@ dependencies = [
"ip_network_table",
"lqos_bus",
"lqos_config",
"pyo3",
"serde",
"serde_cbor",
"serde_json",

View File

@@ -15,7 +15,6 @@ ip_network = { workspace = true }
sha2 = { workspace = true }
uuid = { workspace = true }
tracing = { workspace = true }
pyo3 = { workspace = true }
toml = { workspace = true }
lqos_utils = { path = "../lqos_utils" }
arc-swap = { workspace = true }

View File

@@ -147,12 +147,12 @@ fn migrate_bridge(
python_config: &PythonMigration,
new_config: &mut Config,
) -> Result<(), MigrationError> {
if python_config.on_a_stick {
if python_config.on_astick {
new_config.bridge = None;
new_config.single_interface = Some(SingleInterfaceConfig {
interface: python_config.interface_a.clone(),
internet_vlan: python_config.stick_vlan_a,
network_vlan: python_config.stick_vlan_b,
internet_vlan: python_config.stick_vlan_a as u32,
network_vlan: python_config.stick_vlan_b as u32,
});
} else {
new_config.single_interface = None;
@@ -171,17 +171,17 @@ fn migrate_queues(
) -> Result<(), MigrationError> {
new_config.queues.default_sqm = python_config.sqm.clone();
new_config.queues.monitor_only = python_config.monitor_only_mode;
new_config.queues.uplink_bandwidth_mbps = python_config.upstream_bandwidth_capacity_upload_mbps;
new_config.queues.uplink_bandwidth_mbps = python_config.upstream_bandwidth_capacity_upload_mbps as u32;
new_config.queues.downlink_bandwidth_mbps =
python_config.upstream_bandwidth_capacity_download_mbps;
new_config.queues.generated_pn_upload_mbps = python_config.generated_pn_upload_mbps;
new_config.queues.generated_pn_download_mbps = python_config.generated_pn_download_mbps;
python_config.upstream_bandwidth_capacity_download_mbps as u32;
new_config.queues.generated_pn_upload_mbps = python_config.generated_pnupload_mbps as u32;
new_config.queues.generated_pn_download_mbps = python_config.generated_pndownload_mbps as u32;
new_config.queues.dry_run = !python_config.enable_actual_shell_commands;
new_config.queues.sudo = python_config.run_shell_commands_as_sudo;
if python_config.queues_available_override == 0 {
new_config.queues.override_available_queues = None;
} else {
new_config.queues.override_available_queues = Some(python_config.queues_available_override);
new_config.queues.override_available_queues = Some(python_config.queues_available_override as u32);
}
new_config.queues.use_binpacking = python_config.use_bin_packing_to_balance_cpu;
Ok(())
@@ -215,9 +215,9 @@ fn migrate_integration_common(
) -> Result<(), MigrationError> {
new_config.integration_common.circuit_name_as_address = python_config.circuit_name_use_address;
new_config.integration_common.always_overwrite_network_json =
python_config.overwrite_network_json_always;
python_config.overwrite_network_jsonalways;
new_config.integration_common.queue_refresh_interval_mins =
python_config.queue_refresh_interval_mins;
python_config.queue_refresh_interval_mins as u32;
Ok(())
}
@@ -227,8 +227,8 @@ fn migrate_spylnx(
) -> Result<(), MigrationError> {
new_config.spylnx_integration.enable_spylnx = python_config.automatic_import_splynx;
new_config.spylnx_integration.api_key = python_config.splynx_api_key.clone();
new_config.spylnx_integration.api_secret = python_config.spylnx_api_secret.clone();
new_config.spylnx_integration.url = python_config.spylnx_api_url.clone();
new_config.spylnx_integration.api_secret = python_config.splynx_api_secret.clone();
new_config.spylnx_integration.url = python_config.splynx_api_url.clone();
Ok(())
}
@@ -259,17 +259,17 @@ fn migrate_uisp(
) -> Result<(), MigrationError> {
new_config.uisp_integration.enable_uisp = python_config.automatic_import_uisp;
new_config.uisp_integration.token = python_config.uisp_auth_token.clone();
new_config.uisp_integration.url = python_config.uisp_base_url.clone();
new_config.uisp_integration.url = python_config.uispbase_url.clone();
new_config.uisp_integration.site = python_config.uisp_site.clone();
new_config.uisp_integration.strategy = python_config.uisp_strategy.clone();
new_config.uisp_integration.suspended_strategy = python_config.uisp_suspended_strategy.clone();
new_config.uisp_integration.airmax_capacity = python_config.airmax_capacity;
new_config.uisp_integration.ltu_capacity = python_config.ltu_capacity;
new_config.uisp_integration.airmax_capacity = python_config.air_max_capacity as f32;
new_config.uisp_integration.ltu_capacity = python_config.ltu_capacity as f32;
new_config.uisp_integration.exclude_sites = python_config.exclude_sites.clone();
new_config.uisp_integration.ipv6_with_mikrotik = python_config.find_ipv6_using_mikrotik;
new_config.uisp_integration.bandwidth_overhead_factor = python_config.bandwidth_overhead_factor;
new_config.uisp_integration.ipv6_with_mikrotik = python_config.find_ipv6using_mikrotik_api;
new_config.uisp_integration.bandwidth_overhead_factor = python_config.bandwidth_overhead_factor as f32;
new_config.uisp_integration.commit_bandwidth_multiplier =
python_config.committed_bandwidth_multiplier;
python_config.committed_bandwidth_multiplier as f32;
// TODO: ExceptionCPEs is going to require some real work
Ok(())
}
@@ -278,24 +278,10 @@ fn migrate_influx(
python_config: &PythonMigration,
new_config: &mut Config,
) -> Result<(), MigrationError> {
new_config.influxdb.enable_influxdb = python_config.influx_db_enabled;
new_config.influxdb.url = python_config.influx_db_url.clone();
new_config.influxdb.bucket = python_config.infux_db_bucket.clone();
new_config.influxdb.org = python_config.influx_db_org.clone();
new_config.influxdb.token = python_config.influx_db_token.clone();
new_config.influxdb.enable_influxdb = python_config.influx_enabled;
new_config.influxdb.url = python_config.influx_dburl.clone();
new_config.influxdb.bucket = python_config.influx_dbbucket.clone();
new_config.influxdb.org = python_config.influx_dborg.clone();
new_config.influxdb.token = python_config.influx_dbtoken.clone();
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use crate::etc::test_data::{OLD_CONFIG, PYTHON_CONFIG};
#[test]
fn test_migration() {
let old_config = EtcLqos::load_from_string(OLD_CONFIG).unwrap();
let python_config = PythonMigration::load_from_string(PYTHON_CONFIG).unwrap();
let new_config = do_migration_14_to_15(&old_config, &python_config).unwrap();
assert_eq!(new_config.version, "1.5");
}
}

View File

@@ -3,22 +3,18 @@
//! for version 1.5.
use super::EtcLqos;
use pyo3::{prepare_freethreaded_python, Python};
use std::{
collections::HashMap,
fs::read_to_string,
path::{Path, PathBuf},
};
use std::process::Command;
use serde::{Deserialize, Serialize};
use thiserror::Error;
use tracing::error;
#[derive(Debug, Error)]
pub enum PythonMigrationError {
#[error("The ispConfig.py file does not exist.")]
ConfigFileNotFound,
#[error("Unable to parse variable")]
ParseError,
#[error("Variable not found")]
VariableNotFound(String),
}
fn isp_config_py_path(cfg: &EtcLqos) -> PathBuf {
@@ -32,223 +28,141 @@ fn config_exists(cfg: &EtcLqos) -> bool {
isp_config_py_path(&cfg).exists()
}
fn from_python<'a, T>(py: &'a Python, variable_name: &str) -> Result<T, PythonMigrationError>
where
T: pyo3::FromPyObject<'a>,
{
let result = py
.eval(variable_name, None, None)
.map_err(|_| PythonMigrationError::VariableNotFound(variable_name.to_string()))?
.extract::<T>()
.map_err(|_| PythonMigrationError::ParseError)?;
#[derive(Serialize, Deserialize, Default)]
pub struct ExceptionCpes {}
Ok(result)
}
#[derive(Default, Debug)]
#[derive(Serialize, Deserialize, Default)]
pub struct PythonMigration {
pub sqm: String,
#[serde(rename = "monitorOnlyMode")]
pub monitor_only_mode: bool,
pub upstream_bandwidth_capacity_download_mbps: u32,
pub upstream_bandwidth_capacity_upload_mbps: u32,
pub generated_pn_download_mbps: u32,
pub generated_pn_upload_mbps: u32,
#[serde(rename = "upstreamBandwidthCapacityDownloadMbps")]
pub upstream_bandwidth_capacity_download_mbps: i64,
#[serde(rename = "upstreamBandwidthCapacityUploadMbps")]
pub upstream_bandwidth_capacity_upload_mbps: i64,
#[serde(rename = "generatedPNDownloadMbps")]
pub generated_pndownload_mbps: i64,
#[serde(rename = "generatedPNUploadMbps")]
pub generated_pnupload_mbps: i64,
#[serde(rename = "interfaceA")]
pub interface_a: String,
#[serde(rename = "interfaceB")]
pub interface_b: String,
pub queue_refresh_interval_mins: u32,
pub on_a_stick: bool,
pub stick_vlan_a: u32,
pub stick_vlan_b: u32,
#[serde(rename = "queueRefreshIntervalMins")]
pub queue_refresh_interval_mins: i64,
#[serde(rename = "OnAStick")]
pub on_astick: bool,
#[serde(rename = "StickVlanA")]
pub stick_vlan_a: i64,
#[serde(rename = "StickVlanB")]
pub stick_vlan_b: i64,
#[serde(rename = "enableActualShellCommands")]
pub enable_actual_shell_commands: bool,
#[serde(rename = "runShellCommandsAsSudo")]
pub run_shell_commands_as_sudo: bool,
pub queues_available_override: u32,
#[serde(rename = "queuesAvailableOverride")]
pub queues_available_override: i64,
#[serde(rename = "useBinPackingToBalanceCPU")]
pub use_bin_packing_to_balance_cpu: bool,
pub influx_db_enabled: bool,
pub influx_db_url: String,
pub infux_db_bucket: String,
pub influx_db_org: String,
pub influx_db_token: String,
#[serde(rename = "influxEnabled")]
pub influx_enabled: bool,
#[serde(rename = "influxDBurl")]
pub influx_dburl: String,
#[serde(rename = "influxDBBucket")]
pub influx_dbbucket: String,
#[serde(rename = "influxDBOrg")]
pub influx_dborg: String,
#[serde(rename = "influxDBtoken")]
pub influx_dbtoken: String,
#[serde(rename = "circuitNameUseAddress")]
pub circuit_name_use_address: bool,
pub overwrite_network_json_always: bool,
#[serde(rename = "overwriteNetworkJSONalways")]
pub overwrite_network_jsonalways: bool,
#[serde(rename = "ignoreSubnets")]
pub ignore_subnets: Vec<String>,
#[serde(rename = "allowedSubnets")]
pub allowed_subnets: Vec<String>,
#[serde(rename = "excludeSites")]
pub exclude_sites: Vec<String>,
#[serde(rename = "findIPv6usingMikrotikAPI")]
pub find_ipv6using_mikrotik_api: bool,
#[serde(rename = "automaticImportSplynx")]
pub automatic_import_splynx: bool,
pub splynx_api_key: String,
pub spylnx_api_secret: String,
pub spylnx_api_url: String,
pub splynx_api_secret: String,
pub splynx_api_url: String,
#[serde(rename = "automaticImportUISP")]
pub automatic_import_uisp: bool,
#[serde(rename = "uispAuthToken")]
pub uisp_auth_token: String,
pub uisp_base_url: String,
#[serde(rename = "UISPbaseURL")]
pub uispbase_url: String,
#[serde(rename = "uispSite")]
pub uisp_site: String,
#[serde(rename = "uispStrategy")]
pub uisp_strategy: String,
#[serde(rename = "uispSuspendedStrategy")]
pub uisp_suspended_strategy: String,
pub airmax_capacity: f32,
pub ltu_capacity: f32,
pub exclude_sites: Vec<String>,
pub find_ipv6_using_mikrotik: bool,
pub bandwidth_overhead_factor: f32,
pub committed_bandwidth_multiplier: f32,
pub exception_cpes: HashMap<String, String>,
#[serde(rename = "airMax_capacity")]
pub air_max_capacity: f64,
pub ltu_capacity: f64,
#[serde(rename = "bandwidthOverheadFactor")]
pub bandwidth_overhead_factor: f64,
#[serde(rename = "committedBandwidthMultiplier")]
pub committed_bandwidth_multiplier: f64,
#[serde(rename = "exceptionCPEs")]
pub exception_cpes: ExceptionCpes,
#[serde(rename = "apiUsername")]
pub api_username: String,
#[serde(rename = "apiPassword")]
pub api_password: String,
#[serde(rename = "apiHostIP")]
pub api_host_ip: String,
pub api_host_port: u32,
#[serde(rename = "apiHostPost")]
pub api_host_post: i64,
#[serde(rename = "automaticImportPowercode")]
pub automatic_import_powercode: bool,
pub powercode_api_key: String,
pub powercode_api_url: String,
#[serde(rename = "automaticImportSonar")]
pub automatic_import_sonar: bool,
pub sonar_api_url: String,
pub sonar_api_key: String,
pub sonar_api_url: String,
pub snmp_community: String,
pub sonar_active_status_ids: Vec<String>,
pub sonar_airmax_ap_model_ids: Vec<String>,
pub sonar_ltu_ap_model_ids: Vec<String>,
pub sonar_active_status_ids: Vec<String>,
// TODO: httpRestIntegrationConfig
}
impl PythonMigration {
fn parse(cfg: &mut Self, py: &Python) -> Result<(), PythonMigrationError> {
cfg.sqm = from_python(&py, "sqm").unwrap_or("cake diffserv4".to_string());
cfg.monitor_only_mode = from_python(&py, "monitorOnlyMode").unwrap_or(false);
cfg.upstream_bandwidth_capacity_download_mbps =
from_python(&py, "upstreamBandwidthCapacityDownloadMbps").unwrap_or(1000);
cfg.upstream_bandwidth_capacity_upload_mbps =
from_python(&py, "upstreamBandwidthCapacityUploadMbps").unwrap_or(1000);
cfg.generated_pn_download_mbps = from_python(&py, "generatedPNDownloadMbps").unwrap_or(1000);
cfg.generated_pn_upload_mbps = from_python(&py, "generatedPNUploadMbps").unwrap_or(1000);
cfg.interface_a = from_python(&py, "interfaceA").unwrap_or("eth1".to_string());
cfg.interface_b = from_python(&py, "interfaceB").unwrap_or("eth2".to_string());
cfg.queue_refresh_interval_mins = from_python(&py, "queueRefreshIntervalMins").unwrap_or(15);
cfg.on_a_stick = from_python(&py, "OnAStick").unwrap_or(false);
cfg.stick_vlan_a = from_python(&py, "StickVlanA").unwrap_or(0);
cfg.stick_vlan_b = from_python(&py, "StickVlanB").unwrap_or(0);
cfg.enable_actual_shell_commands = from_python(&py, "enableActualShellCommands").unwrap_or(true);
cfg.run_shell_commands_as_sudo = from_python(&py, "runShellCommandsAsSudo").unwrap_or(false);
cfg.queues_available_override = from_python(&py, "queuesAvailableOverride").unwrap_or(0);
cfg.use_bin_packing_to_balance_cpu = from_python(&py, "useBinPackingToBalanceCPU").unwrap_or(false);
// Influx
cfg.influx_db_enabled = from_python(&py, "influxDBEnabled").unwrap_or(false);
cfg.influx_db_url = from_python(&py, "influxDBurl").unwrap_or("http://localhost:8086".to_string());
cfg.infux_db_bucket = from_python(&py, "influxDBBucket").unwrap_or("libreqos".to_string());
cfg.influx_db_org = from_python(&py, "influxDBOrg").unwrap_or("Your ISP Name Here".to_string());
cfg.influx_db_token = from_python(&py, "influxDBtoken").unwrap_or("".to_string());
// Common
cfg.circuit_name_use_address = from_python(&py, "circuitNameUseAddress").unwrap_or(true);
cfg.overwrite_network_json_always = from_python(&py, "overwriteNetworkJSONalways").unwrap_or(false);
cfg.ignore_subnets = from_python(&py, "ignoreSubnets").unwrap_or(vec!["192.168.0.0/16".to_string()]);
cfg.allowed_subnets = from_python(&py, "allowedSubnets").unwrap_or(vec!["100.64.0.0/10".to_string()]);
cfg.exclude_sites = from_python(&py, "excludeSites").unwrap_or(vec![]);
cfg.find_ipv6_using_mikrotik = from_python(&py, "findIPv6usingMikrotik").unwrap_or(false);
// Spylnx
cfg.automatic_import_splynx = from_python(&py, "automaticImportSplynx").unwrap_or(false);
cfg.splynx_api_key = from_python(&py, "splynx_api_key").unwrap_or("Your API Key Here".to_string());
cfg.spylnx_api_secret = from_python(&py, "splynx_api_secret").unwrap_or("Your API Secret Here".to_string());
cfg.spylnx_api_url = from_python(&py, "splynx_api_url").unwrap_or("https://your.splynx.url/api/v1".to_string());
// UISP
cfg.automatic_import_uisp = from_python(&py, "automaticImportUISP").unwrap_or(false);
cfg.uisp_auth_token = from_python(&py, "uispAuthToken").unwrap_or("Your API Token Here".to_string());
cfg.uisp_base_url = from_python(&py, "UISPbaseURL").unwrap_or("https://your.uisp.url".to_string());
cfg.uisp_site = from_python(&py, "uispSite").unwrap_or("Your parent site name here".to_string());
cfg.uisp_strategy = from_python(&py, "uispStrategy").unwrap_or("full".to_string());
cfg.uisp_suspended_strategy = from_python(&py, "uispSuspendedStrategy").unwrap_or("none".to_string());
cfg.airmax_capacity = from_python(&py, "airMax_capacity").unwrap_or(0.65);
cfg.ltu_capacity = from_python(&py, "ltu_capacity").unwrap_or(0.9);
cfg.bandwidth_overhead_factor = from_python(&py, "bandwidthOverheadFactor").unwrap_or(1.0);
cfg.committed_bandwidth_multiplier = from_python(&py, "committedBandwidthMultiplier").unwrap_or(0.98);
cfg.exception_cpes = from_python(&py, "exceptionCPEs").unwrap_or(HashMap::new());
// API
cfg.api_username = from_python(&py, "apiUsername").unwrap_or("testUser".to_string());
cfg.api_password = from_python(&py, "apiPassword").unwrap_or("testPassword".to_string());
cfg.api_host_ip = from_python(&py, "apiHostIP").unwrap_or("127.0.0.1".to_string());
cfg.api_host_port = from_python(&py, "apiHostPost").unwrap_or(5000);
// Powercode
cfg.automatic_import_powercode = from_python(&py, "automaticImportPowercode").unwrap_or(false);
cfg.powercode_api_key = from_python(&py,"powercode_api_key").unwrap_or("".to_string());
cfg.powercode_api_url = from_python(&py,"powercode_api_url").unwrap_or("".to_string());
// Sonar
cfg.automatic_import_sonar = from_python(&py, "automaticImportSonar").unwrap_or(false);
cfg.sonar_api_key = from_python(&py, "sonar_api_key").unwrap_or("".to_string());
cfg.sonar_api_url = from_python(&py, "sonar_api_url").unwrap_or("".to_string());
cfg.snmp_community = from_python(&py, "snmp_community").unwrap_or("public".to_string());
cfg.sonar_active_status_ids = from_python(&py, "sonar_active_status_ids").unwrap_or(vec![]);
cfg.sonar_airmax_ap_model_ids = from_python(&py, "sonar_airmax_ap_model_ids").unwrap_or(vec![]);
cfg.sonar_ltu_ap_model_ids = from_python(&py, "sonar_ltu_ap_model_ids").unwrap_or(vec![]);
// InfluxDB
cfg.influx_db_enabled = from_python(&py, "influxDBEnabled").unwrap_or(false);
cfg.influx_db_url = from_python(&py, "influxDBurl").unwrap_or("http://localhost:8086".to_string());
cfg.infux_db_bucket = from_python(&py, "influxDBBucket").unwrap_or("libreqos".to_string());
cfg.influx_db_org = from_python(&py, "influxDBOrg").unwrap_or("Your ISP Name Here".to_string());
cfg.influx_db_token = from_python(&py, "influxDBtoken").unwrap_or("".to_string());
Ok(())
}
pub fn load() -> Result<Self, PythonMigrationError> {
let mut old_config = Self::default();
if let Ok(cfg) = crate::etc::EtcLqos::load() {
if !config_exists(&cfg) {
return Err(PythonMigrationError::ConfigFileNotFound);
}
let code = read_to_string(isp_config_py_path(&cfg)).unwrap();
prepare_freethreaded_python();
Python::with_gil(|py| {
py.run(&code, None, None).unwrap();
let result = Self::parse(&mut old_config, &py);
if result.is_err() {
println!("Error parsing Python config: {:?}", result);
}
});
let migrator_path = Path::new(&cfg.lqos_directory)
.join("configMigrator.py");
if !migrator_path.exists() {
return Err(PythonMigrationError::ConfigFileNotFound);
}
let output = Command::new("/usr/bin/python3")
.arg(migrator_path)
.output()
.map_err(|e| {
error!("Error running Python migrator: {:?}", e);
PythonMigrationError::ConfigFileNotFound
})?;
if !output.status.success() {
error!("Error running Python migrator: {:?}", output);
return Err(PythonMigrationError::ConfigFileNotFound);
}
let json = String::from_utf8(output.stdout).unwrap();
let json: Self = serde_json::from_str(&json).unwrap();
return Ok(json);
} else {
return Err(PythonMigrationError::ConfigFileNotFound);
}
Ok(old_config)
}
#[allow(dead_code)]
pub(crate) fn load_from_string(s: &str) -> Result<Self, PythonMigrationError> {
let mut old_config = Self::default();
prepare_freethreaded_python();
Python::with_gil(|py| {
py.run(s, None, None).unwrap();
let result = Self::parse(&mut old_config, &py);
if result.is_err() {
println!("Error parsing Python config: {:?}", result);
}
});
Ok(old_config)
}
}
#[cfg(test)]
mod test {
use super::super::test_data::*;
use super::*;
#[test]
fn test_parsing_the_default() {
let mut cfg = PythonMigration::default();
prepare_freethreaded_python();
let mut worked = true;
Python::with_gil(|py| {
py.run(PYTHON_CONFIG, None, None).unwrap();
let result = PythonMigration::parse(&mut cfg, &py);
if result.is_err() {
println!("Error parsing Python config: {:?}", result);
worked = false;
}
});
assert!(worked)
}
}

View File

@@ -18,6 +18,5 @@ csv = { workspace = true }
serde_json = { workspace = true }
ip_network_table = { workspace = true }
ip_network = { workspace = true }
pyo3 = { workspace = true }
serde_cbor = { workspace = true }
lqos_bus = { path = "../lqos_bus" }

View File

@@ -1,6 +1,6 @@
use std::{fs::read_to_string, path::Path};
use std::process::Command;
use lqos_config::Config;
use pyo3::{prepare_freethreaded_python, PyResult, Python};
use crate::uisp_types::Ipv4ToIpv6;
// To ease debugging in the absense of this particular setup, there's a mock function
@@ -37,41 +37,21 @@ async fn fetch_mikrotik_data(config: &Config) -> anyhow::Result<Vec<Ipv4ToIpv6>>
// Load the script
let code = read_to_string(mikrotik_script_path)?;
let csv_path = mikrotik_dhcp_router_list_path.to_string_lossy().to_string();
// Get the Python environment going
let mut json_from_python = None;
prepare_freethreaded_python();
let result = Python::with_gil(|python| -> PyResult<()> {
// Run the Python script
let locals = pyo3::types::PyDict::new(python);
python.run(&code, None, Some(locals))?;
// Run the function to pull the Mikrotik data
let result = python
.eval(
&format!("{PY_FUNC}('{}')", mikrotik_dhcp_router_list_path.to_string_lossy()),
Some(locals),
None
)?
.extract::<String>()?;
// Parse the response.
// it is an object that looks like this:
// {
// "1.2.3.4" : "2001:db8::1",
// }
// We're forcibly returning JSON to make the bridge easier.
json_from_python = Some(result);
Ok(())
});
// If an error occured, fail with as much information as possible
if let Err(e) = result {
let output = Command::new("/usr/bin/python3")
.args(&[ &code, &csv_path ])
.output();
if let Err(e) = output {
tracing::error!("Python error: {:?}", e);
return Err(anyhow::anyhow!("Python error: {:?}", e));
}
let output = output?;
json_from_python = Some(String::from_utf8(output.stdout)?);
// Parse the JSON
// If we got this far, we have some JSON to work with
let json_from_python = json_from_python.unwrap();
@@ -84,9 +64,9 @@ async fn fetch_mikrotik_data(config: &Config) -> anyhow::Result<Vec<Ipv4ToIpv6>>
ipv6: ipv6.to_string().replace("\"", ""),
});
}
return Ok(result);
Ok(result)
} else {
tracing::error!("Mikrotik data is not an object");
return Err(anyhow::anyhow!("Mikrotik data is not an object"));
Err(anyhow::anyhow!("Mikrotik data is not an object"))
}
}