mirror of
https://github.com/LibreQoE/LibreQoS.git
synced 2025-02-25 18:55:32 -06:00
Merge branch 'develop'
This commit is contained in:
commit
77ecb8afcd
@ -383,6 +383,10 @@ class NetworkGraph:
|
||||
#Remove brackets and quotes of list so LibreQoS.py can parse it
|
||||
device["ipv4"] = str(device["ipv4"]).replace('[','').replace(']','').replace("'",'')
|
||||
device["ipv6"] = str(device["ipv6"]).replace('[','').replace(']','').replace("'",'')
|
||||
if circuit["upload"] is None:
|
||||
circuit["upload"] = 0.0
|
||||
if circuit["download"] is None:
|
||||
circuit["download"] = 0.0
|
||||
row = [
|
||||
circuit["id"],
|
||||
circuit["name"],
|
||||
@ -392,10 +396,10 @@ class NetworkGraph:
|
||||
device["mac"],
|
||||
device["ipv4"],
|
||||
device["ipv6"],
|
||||
int(circuit["download"] * 0.98),
|
||||
int(circuit["upload"] * 0.98),
|
||||
int(circuit["download"] * bandwidthOverheadFactor),
|
||||
int(circuit["upload"] * bandwidthOverheadFactor),
|
||||
int(float(circuit["download"]) * 0.98),
|
||||
int(float(circuit["upload"]) * 0.98),
|
||||
int(float(circuit["download"]) * bandwidthOverheadFactor),
|
||||
int(float(circuit["upload"]) * bandwidthOverheadFactor),
|
||||
""
|
||||
]
|
||||
wr.writerow(row)
|
||||
@ -414,7 +418,7 @@ class NetworkGraph:
|
||||
|
||||
import graphviz
|
||||
dot = graphviz.Digraph(
|
||||
'network', comment="Network Graph", engine="fdp")
|
||||
'network', comment="Network Graph", engine="dot")
|
||||
|
||||
for (i, node) in enumerate(self.nodes):
|
||||
if ((node.type != NodeType.client and node.type != NodeType.device) or showClients):
|
||||
|
@ -3,8 +3,12 @@ checkPythonVersion()
|
||||
import requests
|
||||
import os
|
||||
import csv
|
||||
from ispConfig import uispSite, uispStrategy
|
||||
from integrationCommon import isIpv4Permitted, fixSubnet
|
||||
try:
|
||||
from ispConfig import uispSite, uispStrategy, overwriteNetworkJSONalways
|
||||
except:
|
||||
from ispConfig import uispSite, uispStrategy
|
||||
overwriteNetworkJSONalways = False
|
||||
|
||||
def uispRequest(target):
|
||||
# Sends an HTTP request to UISP and returns the
|
||||
@ -71,11 +75,171 @@ def buildFlatGraph():
|
||||
net.prepareTree()
|
||||
net.plotNetworkGraph(False)
|
||||
if net.doesNetworkJsonExist():
|
||||
print("network.json already exists. Leaving in-place.")
|
||||
if overwriteNetworkJSONalways:
|
||||
net.createNetworkJson()
|
||||
else:
|
||||
print("network.json already exists and overwriteNetworkJSONalways set to False. Leaving in-place.")
|
||||
else:
|
||||
net.createNetworkJson()
|
||||
net.createShapedDevices()
|
||||
|
||||
def linkSiteTarget(link, direction):
|
||||
# Helper function to extract the site ID from a data link. Returns
|
||||
# None if not present.
|
||||
if link[direction]['site'] is not None:
|
||||
return link[direction]['site']['identification']['id']
|
||||
|
||||
return None
|
||||
|
||||
def findSiteLinks(dataLinks, siteId):
|
||||
# Searches the Data Links for any links to/from the specified site.
|
||||
# Returns a list of site IDs that are linked to the specified site.
|
||||
links = []
|
||||
for dl in dataLinks:
|
||||
fromSiteId = linkSiteTarget(dl, "from")
|
||||
if fromSiteId is not None and fromSiteId == siteId:
|
||||
# We have a link originating in this site.
|
||||
target = linkSiteTarget(dl, "to")
|
||||
if target is not None:
|
||||
links.append(target)
|
||||
|
||||
toSiteId = linkSiteTarget(dl, "to")
|
||||
if toSiteId is not None and toSiteId == siteId:
|
||||
# We have a link originating in this site.
|
||||
target = linkSiteTarget(dl, "from")
|
||||
if target is not None:
|
||||
links.append(target)
|
||||
return links
|
||||
|
||||
def buildSiteBandwidths():
|
||||
# Builds a dictionary of site bandwidths from the integrationUISPbandwidths.csv file.
|
||||
siteBandwidth = {}
|
||||
if os.path.isfile("integrationUISPbandwidths.csv"):
|
||||
with open('integrationUISPbandwidths.csv') as csv_file:
|
||||
csv_reader = csv.reader(csv_file, delimiter=',')
|
||||
next(csv_reader)
|
||||
for row in csv_reader:
|
||||
name, download, upload = row
|
||||
download = int(download)
|
||||
upload = int(upload)
|
||||
siteBandwidth[name] = {"download": download, "upload": upload}
|
||||
return siteBandwidth
|
||||
|
||||
def findApCapacities(devices, siteBandwidth):
|
||||
# Searches the UISP devices for APs and adds their capacities to the siteBandwidth dictionary.
|
||||
for device in devices:
|
||||
if device['identification']['role'] == "ap":
|
||||
name = device['identification']['name']
|
||||
if not name in siteBandwidth and device['overview']['downlinkCapacity'] and device['overview']['uplinkCapacity']:
|
||||
download = int(device['overview']
|
||||
['downlinkCapacity'] / 1000000)
|
||||
upload = int(device['overview']['uplinkCapacity'] / 1000000)
|
||||
siteBandwidth[device['identification']['name']] = {
|
||||
"download": download, "upload": upload}
|
||||
|
||||
def findAirfibers(devices, generatedPNDownloadMbps, generatedPNUploadMbps):
|
||||
foundAirFibersBySite = {}
|
||||
for device in devices:
|
||||
if device['identification']['site']['type'] == 'site':
|
||||
if device['identification']['role'] == "station":
|
||||
if device['identification']['type'] == "airFiber":
|
||||
if device['overview']['status'] == 'active':
|
||||
if device['overview']['downlinkCapacity'] is not None and device['overview']['uplinkCapacity'] is not None:
|
||||
download = int(device['overview']['downlinkCapacity']/ 1000000)
|
||||
upload = int(device['overview']['uplinkCapacity']/ 1000000)
|
||||
else:
|
||||
download = generatedPNDownloadMbps
|
||||
upload = generatedPNUploadMbps
|
||||
# Make sure to use half of reported bandwidth for AF60-LRs
|
||||
if device['identification']['model'] == "AF60-LR":
|
||||
download = int(download / 2)
|
||||
upload = int(download / 2)
|
||||
if device['identification']['site']['id'] in foundAirFibersBySite:
|
||||
if (download > foundAirFibersBySite[device['identification']['site']['id']]['download']) or (upload > foundAirFibersBySite[device['identification']['site']['id']]['upload']):
|
||||
foundAirFibersBySite[device['identification']['site']['id']]['download'] = download
|
||||
foundAirFibersBySite[device['identification']['site']['id']]['upload'] = upload
|
||||
else:
|
||||
foundAirFibersBySite[device['identification']['site']['id']] = {'download': download, 'upload': upload}
|
||||
return foundAirFibersBySite
|
||||
|
||||
def buildSiteList(sites, dataLinks):
|
||||
# Builds a list of sites, including their IDs, names, and connections.
|
||||
# Connections are determined by the dataLinks list.
|
||||
siteList = []
|
||||
for site in sites:
|
||||
newSite = {
|
||||
'id': site['identification']['id'],
|
||||
'name': site['identification']['name'],
|
||||
'connections': findSiteLinks(dataLinks, site['identification']['id']),
|
||||
'cost': 10000,
|
||||
'parent': "",
|
||||
'type': type,
|
||||
}
|
||||
siteList.append(newSite)
|
||||
return siteList
|
||||
|
||||
def findInSiteList(siteList, name):
|
||||
# Searches the siteList for a site with the specified name.
|
||||
for site in siteList:
|
||||
if site['name'] == name:
|
||||
return site
|
||||
return None
|
||||
|
||||
def findInSiteListById(siteList, id):
|
||||
# Searches the siteList for a site with the specified name.
|
||||
for site in siteList:
|
||||
if site['id'] == id:
|
||||
return site
|
||||
return None
|
||||
|
||||
def debugSpaces(n):
|
||||
# Helper function to print n spaces.
|
||||
spaces = ""
|
||||
for i in range(int(n)):
|
||||
spaces = spaces + " "
|
||||
return spaces
|
||||
|
||||
def walkGraphOutwards(siteList, root, routeOverrides):
|
||||
def walkGraph(node, parent, cost, backPath):
|
||||
site = findInSiteListById(siteList, node)
|
||||
routeName = parent['name'] + "->" + site['name']
|
||||
if routeName in routeOverrides:
|
||||
# We have an overridden cost for this route, so use it instead
|
||||
#print("--> Using overridden cost for " + routeName + ". New cost: " + str(routeOverrides[routeName]) + ".")
|
||||
cost = routeOverrides[routeName]
|
||||
if cost < site['cost']:
|
||||
# It's cheaper to get here this way, so update the cost and parent.
|
||||
site['cost'] = cost
|
||||
site['parent'] = parent['id']
|
||||
#print(debugSpaces(cost/10) + parent['name'] + "->" + site['name'] + " -> New cost: " + str(cost))
|
||||
|
||||
for connection in site['connections']:
|
||||
if not connection in backPath:
|
||||
#target = findInSiteListById(siteList, connection)
|
||||
#print(debugSpaces((cost+10)/10) + site['name'] + " -> " + target['name'] + " (" + str(target['cost']) + ")")
|
||||
newBackPath = backPath.copy()
|
||||
newBackPath.append(site['id'])
|
||||
walkGraph(connection, site, cost+10, newBackPath)
|
||||
|
||||
for connection in root['connections']:
|
||||
# Force the parent since we're at the top
|
||||
site = findInSiteListById(siteList, connection)
|
||||
site['parent'] = root['id']
|
||||
walkGraph(connection, root, 10, [root['id']])
|
||||
|
||||
def loadRoutingOverrides():
|
||||
# Loads integrationUISProutes.csv and returns a set of "from", "to", "cost"
|
||||
overrides = {}
|
||||
if os.path.isfile("integrationUISProutes.csv"):
|
||||
with open("integrationUISProutes.csv", "r") as f:
|
||||
reader = csv.reader(f)
|
||||
for row in reader:
|
||||
if not row[0].startswith("#") and len(row) == 3:
|
||||
fromSite, toSite, cost = row
|
||||
overrides[fromSite.strip() + "->" + toSite.strip()] = int(cost)
|
||||
#print(overrides)
|
||||
return overrides
|
||||
|
||||
def buildFullGraph():
|
||||
# Attempts to build a full network graph, incorporating as much of the UISP
|
||||
# hierarchy as possible.
|
||||
@ -88,49 +252,46 @@ def buildFullGraph():
|
||||
devices = uispRequest("devices?withInterfaces=true&authorized=true")
|
||||
dataLinks = uispRequest("data-links?siteLinksOnly=true")
|
||||
|
||||
# Do we already have a integrationUISPbandwidths.csv file?
|
||||
siteBandwidth = {}
|
||||
if os.path.isfile("integrationUISPbandwidths.csv"):
|
||||
with open('integrationUISPbandwidths.csv') as csv_file:
|
||||
csv_reader = csv.reader(csv_file, delimiter=',')
|
||||
next(csv_reader)
|
||||
for row in csv_reader:
|
||||
name, download, upload = row
|
||||
download = int(download)
|
||||
upload = int(upload)
|
||||
siteBandwidth[name] = {"download": download, "upload": upload}
|
||||
# Build Site Capacities
|
||||
siteBandwidth = buildSiteBandwidths()
|
||||
findApCapacities(devices, siteBandwidth)
|
||||
foundAirFibersBySite = findAirfibers(devices, generatedPNDownloadMbps, generatedPNUploadMbps)
|
||||
|
||||
# Find AP capacities from UISP
|
||||
for device in devices:
|
||||
if device['identification']['role'] == "ap":
|
||||
name = device['identification']['name']
|
||||
if not name in siteBandwidth and device['overview']['downlinkCapacity'] and device['overview']['uplinkCapacity']:
|
||||
download = int(device['overview']
|
||||
['downlinkCapacity'] / 1000000)
|
||||
upload = int(device['overview']['uplinkCapacity'] / 1000000)
|
||||
siteBandwidth[device['identification']['name']] = {
|
||||
"download": download, "upload": upload}
|
||||
|
||||
# Find Site Capacities by AirFiber capacities
|
||||
foundAirFibersBySite = {}
|
||||
for device in devices:
|
||||
if device['identification']['site']['type'] == 'site':
|
||||
if device['identification']['role'] == "station":
|
||||
if device['identification']['type'] == "airFiber":
|
||||
if device['overview']['status'] == 'active':
|
||||
download = int(device['overview']['downlinkCapacity']/ 1000000)
|
||||
upload = int(device['overview']['uplinkCapacity']/ 1000000)
|
||||
# Make sure to use half of reported bandwidth for AF60-LRs
|
||||
if device['identification']['model'] == "AF60-LR":
|
||||
download = int(download / 2)
|
||||
upload = int(download / 2)
|
||||
if device['identification']['site']['id'] in foundAirFibersBySite:
|
||||
if (download > foundAirFibersBySite[device['identification']['site']['id']]['download']) or (upload > foundAirFibersBySite[device['identification']['site']['id']]['upload']):
|
||||
foundAirFibersBySite[device['identification']['site']['id']]['download'] = download
|
||||
foundAirFibersBySite[device['identification']['site']['id']]['upload'] = upload
|
||||
else:
|
||||
foundAirFibersBySite[device['identification']['site']['id']] = {'download': download, 'upload': upload}
|
||||
# Create a list of just network sites
|
||||
siteList = buildSiteList(sites, dataLinks)
|
||||
rootSite = findInSiteList(siteList, uispSite)
|
||||
routeOverrides = loadRoutingOverrides()
|
||||
if rootSite is None:
|
||||
print("ERROR: Unable to find root site in UISP")
|
||||
return
|
||||
walkGraphOutwards(siteList, rootSite, routeOverrides)
|
||||
# Debug code: dump the list of site parents
|
||||
# for s in siteList:
|
||||
# if s['parent'] == "":
|
||||
# p = "None"
|
||||
# else:
|
||||
# p = findInSiteListById(siteList, s['parent'])['name']
|
||||
# print(s['name'] + " (" + str(s['cost']) + ") <-- " + p)
|
||||
|
||||
# Find Nodes Connected By PtMP
|
||||
nodeOffPtMP = {}
|
||||
for site in sites:
|
||||
id = site['identification']['id']
|
||||
name = site['identification']['name']
|
||||
type = site['identification']['type']
|
||||
parent = findInSiteListById(siteList, id)['parent']
|
||||
if type == 'site':
|
||||
for link in dataLinks:
|
||||
if link['from']['site'] is not None and link['from']['site']['identification']['id'] == parent:
|
||||
if link['to']['site'] is not None and link['to']['site']['identification']['id'] == id:
|
||||
if link['from']['device'] is not None and link['to']['device'] is not None and link['to']['device']['overview'] is not None and link['to']['device']['overview']['downlinkCapacity'] is not None and link['from']['device']['overview']['wirelessMode'] == 'ap-ptmp':
|
||||
# Capacity of the PtMP client radio feeding the PoP will be used as the site bandwidth limit
|
||||
download = int(round(link['to']['device']['overview']['downlinkCapacity']/1000000))
|
||||
upload = int(round(link['to']['device']['overview']['uplinkCapacity']/1000000))
|
||||
nodeOffPtMP[id] = { 'parent': link['from']['device']['identification']['id'],
|
||||
'download': download,
|
||||
'upload': upload
|
||||
}
|
||||
print("Building Topology")
|
||||
net = NetworkGraph()
|
||||
# Add all sites and client sites
|
||||
@ -142,17 +303,24 @@ def buildFullGraph():
|
||||
upload = generatedPNUploadMbps
|
||||
address = ""
|
||||
customerName = ""
|
||||
if site['identification']['parent'] is None:
|
||||
parent = ""
|
||||
else:
|
||||
parent = site['identification']['parent']['id']
|
||||
parent = findInSiteListById(siteList, id)['parent']
|
||||
if parent == "":
|
||||
if site['identification']['parent'] is None:
|
||||
parent = ""
|
||||
else:
|
||||
parent = site['identification']['parent']['id']
|
||||
match type:
|
||||
case "site":
|
||||
nodeType = NodeType.site
|
||||
if id in nodeOffPtMP:
|
||||
parent = nodeOffPtMP[id]['parent']
|
||||
if name in siteBandwidth:
|
||||
# Use the CSV bandwidth values
|
||||
download = siteBandwidth[name]["download"]
|
||||
upload = siteBandwidth[name]["upload"]
|
||||
elif id in nodeOffPtMP:
|
||||
download = nodeOffPtMP[id]['download']
|
||||
upload = nodeOffPtMP[id]['upload']
|
||||
elif id in foundAirFibersBySite:
|
||||
download = foundAirFibersBySite[id]['download']
|
||||
upload = foundAirFibersBySite[id]['upload']
|
||||
@ -221,11 +389,14 @@ def buildFullGraph():
|
||||
# Add some defaults in case they want to change them
|
||||
siteBandwidth[node.displayName] = {
|
||||
"download": generatedPNDownloadMbps, "upload": generatedPNUploadMbps}
|
||||
|
||||
|
||||
net.prepareTree()
|
||||
net.plotNetworkGraph(False)
|
||||
if net.doesNetworkJsonExist():
|
||||
print("network.json already exists. Leaving in-place.")
|
||||
if overwriteNetworkJSONalways:
|
||||
net.createNetworkJson()
|
||||
else:
|
||||
print("network.json already exists and overwriteNetworkJSONalways set to False. Leaving in-place.")
|
||||
else:
|
||||
net.createNetworkJson()
|
||||
net.createShapedDevices()
|
||||
|
5
src/integrationUISProutes.csv
Normal file
5
src/integrationUISProutes.csv
Normal file
@ -0,0 +1,5 @@
|
||||
# Allows you to override route costs in the UISP integration, to better
|
||||
# represent your network. Costs by default increment 10 at each hop.
|
||||
# So if you want to skip 10 links, put a cost of 100 in.
|
||||
# From Site Name, To Site Name, New Cost
|
||||
# MYCORE, MYTOWER, 100
|
|
@ -63,6 +63,9 @@ influxDBtoken = ""
|
||||
# Use Customer Name or Address as Circuit Name
|
||||
circuitNameUseAddress = True
|
||||
|
||||
# Should integrationUISP overwrite network.json on each run?
|
||||
overwriteNetworkJSONalways = False
|
||||
|
||||
# If a device shows a WAN IP within these subnets, assume they are behind NAT / un-shapable, and ignore them
|
||||
ignoreSubnets = ['192.168.0.0/16']
|
||||
allowedSubnets = ['100.64.0.0/10']
|
||||
|
30
src/rust/Cargo.lock
generated
30
src/rust/Cargo.lock
generated
@ -733,6 +733,18 @@ dependencies = [
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dns-lookup"
|
||||
version = "1.0.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "53ecafc952c4528d9b51a458d1a8904b81783feff9fde08ab6ed2545ff396872"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"socket2",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "either"
|
||||
version = "1.8.1"
|
||||
@ -800,7 +812,7 @@ dependencies = [
|
||||
"atomic",
|
||||
"pear",
|
||||
"serde",
|
||||
"toml 0.5.11",
|
||||
"toml",
|
||||
"uncased",
|
||||
"version_check",
|
||||
]
|
||||
@ -1424,7 +1436,7 @@ dependencies = [
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"thiserror",
|
||||
"toml 0.7.2",
|
||||
"toml_edit",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
@ -1448,7 +1460,9 @@ name = "lqos_node_manager"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"dashmap",
|
||||
"default-net",
|
||||
"dns-lookup",
|
||||
"jemallocator",
|
||||
"lqos_bus",
|
||||
"lqos_config",
|
||||
@ -2773,18 +2787,6 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f7afcae9e3f0fe2c370fd4657108972cbb2fa9db1b9f84849cefd80741b01cb6"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
"toml_edit",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_datetime"
|
||||
version = "0.6.1"
|
||||
|
@ -6,7 +6,7 @@ license = "GPL-2.0-only"
|
||||
|
||||
[dependencies]
|
||||
thiserror = "1"
|
||||
toml = "0"
|
||||
toml_edit = { version = "0", features = [ "serde" ] }
|
||||
serde = { version = "1.0", features = [ "derive" ] }
|
||||
serde_json = "1"
|
||||
csv = "1"
|
||||
|
@ -68,7 +68,7 @@ impl WebUsers {
|
||||
|
||||
fn save_to_disk(&self) -> Result<(), AuthenticationError> {
|
||||
let path = Self::path()?;
|
||||
let new_contents = toml::to_string(&self);
|
||||
let new_contents = toml_edit::ser::to_string(&self);
|
||||
if let Err(e) = new_contents {
|
||||
return Err(AuthenticationError::SerializationError(e));
|
||||
}
|
||||
@ -109,7 +109,7 @@ impl WebUsers {
|
||||
} else {
|
||||
// Load from disk
|
||||
if let Ok(raw) = read_to_string(path) {
|
||||
let parse_result = toml::from_str(&raw);
|
||||
let parse_result = toml_edit::de::from_str(&raw);
|
||||
if let Ok(users) = parse_result {
|
||||
Ok(users)
|
||||
} else {
|
||||
@ -255,7 +255,7 @@ pub enum AuthenticationError {
|
||||
#[error("Unable to load /etc/lqos.conf")]
|
||||
UnableToLoadEtcLqos,
|
||||
#[error("Unable to serialize to TOML")]
|
||||
SerializationError(toml::ser::Error),
|
||||
SerializationError(toml_edit::ser::Error),
|
||||
#[error("Unable to remove existing web users file")]
|
||||
UnableToDelete,
|
||||
#[error("Unable to open lqusers.toml for writing. Check permissions?")]
|
||||
|
@ -1,6 +1,7 @@
|
||||
//! Manages the `/etc/lqos.conf` file.
|
||||
use log::error;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use toml_edit::{Document, value};
|
||||
use std::{fs, path::Path};
|
||||
use thiserror::Error;
|
||||
|
||||
@ -136,17 +137,27 @@ impl EtcLqos {
|
||||
return Err(EtcLqosError::ConfigDoesNotExist);
|
||||
}
|
||||
if let Ok(raw) = std::fs::read_to_string("/etc/lqos.conf") {
|
||||
let config_result: Result<Self, toml::de::Error> = toml::from_str(&raw);
|
||||
match config_result {
|
||||
Ok(mut config) => {
|
||||
check_config(&mut config);
|
||||
Ok(config)
|
||||
}
|
||||
let document = raw.parse::<Document>();
|
||||
match document {
|
||||
Err(e) => {
|
||||
error!("Unable to parse TOML from /etc/lqos.conf");
|
||||
error!("Full error: {:?}", e);
|
||||
Err(EtcLqosError::CannotParseToml)
|
||||
}
|
||||
Ok(mut config_doc) => {
|
||||
let cfg = toml_edit::de::from_document::<EtcLqos>(config_doc.clone());
|
||||
match cfg {
|
||||
Ok(mut cfg) => {
|
||||
check_config(&mut config_doc, &mut cfg);
|
||||
Ok(cfg)
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Unable to parse TOML from /etc/lqos.conf");
|
||||
error!("Full error: {:?}", e);
|
||||
Err(EtcLqosError::CannotParseToml)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
error!("Unable to read contents of /etc/lqos.conf");
|
||||
@ -156,7 +167,7 @@ impl EtcLqos {
|
||||
|
||||
/// Saves changes made to /etc/lqos.conf
|
||||
/// Copies current configuration into /etc/lqos.conf.backup first
|
||||
pub fn save(&self) -> Result<(), EtcLqosError> {
|
||||
pub fn save(&self, document: &mut Document) -> Result<(), EtcLqosError> {
|
||||
let cfg_path = Path::new("/etc/lqos.conf");
|
||||
let backup_path = Path::new("/etc/lqos.conf.backup");
|
||||
if let Err(e) = std::fs::copy(cfg_path, backup_path) {
|
||||
@ -164,26 +175,17 @@ impl EtcLqos {
|
||||
log::error!("{e:?}");
|
||||
return Err(EtcLqosError::BackupFail);
|
||||
}
|
||||
let new_cfg = toml::to_string_pretty(&self);
|
||||
match new_cfg {
|
||||
Err(e) => {
|
||||
log::error!("Unable to serialize new /etc/lqos.conf");
|
||||
log::error!("{e:?}");
|
||||
return Err(EtcLqosError::SerializeFail);
|
||||
}
|
||||
Ok(new_cfg) => {
|
||||
if let Err(e) = fs::write(cfg_path, new_cfg) {
|
||||
log::error!("Unable to write to /etc/lqos.conf");
|
||||
log::error!("{e:?}");
|
||||
return Err(EtcLqosError::WriteFail);
|
||||
}
|
||||
}
|
||||
let new_cfg = document.to_string();
|
||||
if let Err(e) = fs::write(cfg_path, new_cfg) {
|
||||
log::error!("Unable to write to /etc/lqos.conf");
|
||||
log::error!("{e:?}");
|
||||
return Err(EtcLqosError::WriteFail);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn check_config(cfg: &mut EtcLqos) {
|
||||
fn check_config(cfg_doc: &mut Document, cfg: &mut EtcLqos) {
|
||||
use sha2::digest::Update;
|
||||
use sha2::Digest;
|
||||
|
||||
@ -191,6 +193,12 @@ fn check_config(cfg: &mut EtcLqos) {
|
||||
if let Ok(machine_id) = std::fs::read_to_string("/etc/machine-id") {
|
||||
let hash = sha2::Sha256::new().chain(machine_id).finalize();
|
||||
cfg.node_id = Some(format!("{:x}", hash));
|
||||
cfg_doc["node_id"] = value(format!("{:x}", hash));
|
||||
println!("Updating");
|
||||
if let Err(e) = cfg.save(cfg_doc) {
|
||||
log::error!("Unable to save /etc/lqos.conf");
|
||||
log::error!("{e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -212,3 +220,23 @@ pub enum EtcLqosError {
|
||||
#[error("Unable to write to /etc/lqos.conf")]
|
||||
WriteFail,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
const EXAMPLE_LQOS_CONF: &str = include_str!("../../../lqos.example");
|
||||
|
||||
#[test]
|
||||
fn round_trip_toml() {
|
||||
let doc = EXAMPLE_LQOS_CONF.parse::<toml_edit::Document>().unwrap();
|
||||
let reserialized = doc.to_string();
|
||||
assert_eq!(EXAMPLE_LQOS_CONF, reserialized);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_node_id() {
|
||||
let mut doc = EXAMPLE_LQOS_CONF.parse::<toml_edit::Document>().unwrap();
|
||||
doc["node_id"] = toml_edit::value("test");
|
||||
let reserialized = doc.to_string();
|
||||
assert!(reserialized.contains("node_id = \"test\""));
|
||||
}
|
||||
}
|
@ -133,6 +133,7 @@ pub fn read_flows() {
|
||||
});
|
||||
}
|
||||
|
||||
/// Expire flows that have not been seen in a while.
|
||||
pub fn expire_heimdall_flows() {
|
||||
if let Ok(now) = time_since_boot() {
|
||||
let since_boot = Duration::from(now);
|
||||
@ -142,6 +143,7 @@ pub fn expire_heimdall_flows() {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the flow stats for a given IP address.
|
||||
pub fn get_flow_stats(ip: XdpIpAddress) -> BusResponse {
|
||||
let mut result = Vec::new();
|
||||
|
||||
|
@ -1,8 +1,9 @@
|
||||
//! Provides an interface to the Heimdall packet watching
|
||||
//! system. Heimdall watches traffic flows, and is notified
|
||||
//! about their contents via the eBPF Perf system.
|
||||
|
||||
#![warn(missing_docs)]
|
||||
mod config;
|
||||
/// Interface to the performance tracking system
|
||||
pub mod perf_interface;
|
||||
pub mod stats;
|
||||
pub use config::{HeimdalConfig, HeimdallMode};
|
||||
|
@ -6,21 +6,37 @@ use crate::timeline::store_on_timeline;
|
||||
/// This constant MUST exactly match PACKET_OCTET_STATE in heimdall.h
|
||||
pub(crate) const PACKET_OCTET_SIZE: usize = 128;
|
||||
|
||||
/// A representation of the eBPF `heimdall_event` type.
|
||||
/// This is the type that is sent from the eBPF program to userspace.
|
||||
/// It is a representation of the `heimdall_event` type in heimdall.h
|
||||
#[derive(FromBytes, Debug, Clone, PartialEq, Eq, Hash)]
|
||||
#[repr(C)]
|
||||
pub struct HeimdallEvent {
|
||||
/// Timestamp of the event, in nanoseconds since boot time.
|
||||
pub timestamp: u64,
|
||||
/// Source IP address
|
||||
pub src: XdpIpAddress,
|
||||
/// Destination IP address
|
||||
pub dst: XdpIpAddress,
|
||||
/// Source port number, or ICMP type.
|
||||
pub src_port : u16,
|
||||
/// Destination port number.
|
||||
pub dst_port: u16,
|
||||
/// IP protocol number
|
||||
pub ip_protocol: u8,
|
||||
/// IP header TOS value
|
||||
pub tos: u8,
|
||||
/// Total size of the packet, in bytes
|
||||
pub size: u32,
|
||||
/// TCP flags
|
||||
pub tcp_flags: u8,
|
||||
/// TCP window size
|
||||
pub tcp_window: u16,
|
||||
/// TCP sequence number
|
||||
pub tcp_tsval: u32,
|
||||
/// TCP acknowledgement number
|
||||
pub tcp_tsecr: u32,
|
||||
/// Raw packet data
|
||||
pub packet_data: [u8; PACKET_OCTET_SIZE],
|
||||
}
|
||||
|
||||
|
@ -150,6 +150,13 @@ pub fn hyperfocus_on_target(ip: XdpIpAddress) -> Option<(usize, usize)> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Request a dump of the packet headers collected during a hyperfocus session.
|
||||
/// This will return `None` if the session id is invalid or the session has
|
||||
/// expired.
|
||||
/// ## Returns
|
||||
/// * Either `None` or a vector of packet headers.
|
||||
/// ## Arguments
|
||||
/// * `session_id` - The session id of the hyperfocus session.
|
||||
pub fn n_second_packet_dump(session_id: usize) -> Option<Vec<PacketHeader>> {
|
||||
if let Some(session) = FOCUS_SESSIONS.get(&session_id) {
|
||||
Some(session.data.iter().map(|e| e.as_header()).collect())
|
||||
@ -158,6 +165,14 @@ pub fn n_second_packet_dump(session_id: usize) -> Option<Vec<PacketHeader>> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Request a dump of the packet headers collected during a hyperfocus session,
|
||||
/// in LibPCAP format. This will return `None` if the session id is invalid or
|
||||
/// the session has expired, or the temporary filename used to store the dump
|
||||
/// if it is available.
|
||||
/// ## Returns
|
||||
/// * Either `None` or the filename of the dump.
|
||||
/// ## Arguments
|
||||
/// * `session_id` - The session id of the hyperfocus session.
|
||||
pub fn n_second_pcap(session_id: usize) -> Option<String> {
|
||||
if let Some(mut session) = FOCUS_SESSIONS.get_mut(&session_id) {
|
||||
let filename = format!("/tmp/cap_sess_{session_id}");
|
||||
|
@ -19,6 +19,8 @@ sysinfo = "0"
|
||||
default-net = "0"
|
||||
nix = "0"
|
||||
once_cell = "1"
|
||||
dns-lookup = "1"
|
||||
dashmap = "5"
|
||||
|
||||
# Support JemAlloc on supported platforms
|
||||
[target.'cfg(any(target_arch = "x86", target_arch = "x86_64"))'.dependencies]
|
||||
|
@ -77,6 +77,7 @@ fn rocket() -> _ {
|
||||
queue_info::packet_dump,
|
||||
queue_info::pcap,
|
||||
queue_info::request_analysis,
|
||||
queue_info::dns_query,
|
||||
config_control::get_nic_list,
|
||||
config_control::get_current_python_config,
|
||||
config_control::get_current_lqosd_config,
|
||||
|
@ -1,6 +1,6 @@
|
||||
use crate::auth_guard::AuthGuard;
|
||||
use crate::cache_control::NoCache;
|
||||
use crate::tracker::SHAPED_DEVICES;
|
||||
use crate::tracker::{SHAPED_DEVICES, lookup_dns};
|
||||
use lqos_bus::{bus_request, BusRequest, BusResponse, FlowTransport, PacketHeader, QueueStoreTransit};
|
||||
use rocket::fs::NamedFile;
|
||||
use rocket::http::Status;
|
||||
@ -162,6 +162,15 @@ pub async fn pcap(id: usize, filename: String) -> Result<NoCache<NamedFile>, Sta
|
||||
Err(Status::NotFound)
|
||||
}
|
||||
|
||||
#[get("/api/dns/<ip>")]
|
||||
pub async fn dns_query(ip: String) -> NoCache<String> {
|
||||
if let Ok(ip) = ip.parse::<IpAddr>() {
|
||||
NoCache::new(lookup_dns(ip))
|
||||
} else {
|
||||
NoCache::new(ip)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "equinix_tests")]
|
||||
#[get("/api/run_btest")]
|
||||
pub async fn run_btest() -> NoCache<RawJson<String>> {
|
||||
|
38
src/rust/lqos_node_manager/src/tracker/cache/dns_cache.rs
vendored
Normal file
38
src/rust/lqos_node_manager/src/tracker/cache/dns_cache.rs
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
//! Implements a lock-free DNS least-recently-used DNS cache.
|
||||
|
||||
use std::net::IpAddr;
|
||||
use dashmap::DashMap;
|
||||
use dns_lookup::lookup_addr;
|
||||
use lqos_utils::unix_time::unix_now;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
const CACHE_SIZE: usize = 1000;
|
||||
|
||||
struct DnsEntry {
|
||||
hostname: String,
|
||||
last_accessed: u64,
|
||||
}
|
||||
|
||||
static DNS_CACHE: Lazy<DashMap<IpAddr, DnsEntry>> = Lazy::new(|| DashMap::with_capacity(CACHE_SIZE));
|
||||
|
||||
pub fn lookup_dns(ip: IpAddr) -> String {
|
||||
// If the cached value exists, just return it
|
||||
if let Some(mut dns) = DNS_CACHE.get_mut(&ip) {
|
||||
if let Ok(now) = unix_now() {
|
||||
dns.last_accessed = now;
|
||||
}
|
||||
return dns.hostname.clone();
|
||||
}
|
||||
|
||||
// If it doesn't, we'll be adding it.
|
||||
if DNS_CACHE.len() >= CACHE_SIZE {
|
||||
let mut entries : Vec<(IpAddr, u64)> = DNS_CACHE.iter().map(|v| (*v.key(), v.last_accessed)).collect();
|
||||
entries.sort_by(|a,b| b.1.cmp(&a.1));
|
||||
DNS_CACHE.remove(&entries[0].0);
|
||||
}
|
||||
let hostname = lookup_addr(&ip).unwrap_or(ip.to_string());
|
||||
DNS_CACHE.insert(ip, DnsEntry { hostname, last_accessed: unix_now().unwrap_or(0) });
|
||||
|
||||
|
||||
String::new()
|
||||
}
|
@ -5,7 +5,9 @@
|
||||
mod cpu_ram;
|
||||
mod shaped_devices;
|
||||
mod throughput;
|
||||
mod dns_cache;
|
||||
|
||||
pub use cpu_ram::*;
|
||||
pub use shaped_devices::*;
|
||||
pub use throughput::THROUGHPUT_BUFFER;
|
||||
pub use dns_cache::lookup_dns;
|
||||
|
@ -10,6 +10,7 @@ pub use cache::SHAPED_DEVICES;
|
||||
pub use cache_manager::{update_tracking, update_total_throughput_buffer};
|
||||
use lqos_bus::{bus_request, BusRequest, BusResponse, IpStats, TcHandle};
|
||||
use rocket::serde::{Deserialize, Serialize, msgpack::MsgPack};
|
||||
pub use cache::lookup_dns;
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(crate = "rocket::serde")]
|
||||
|
@ -94,8 +94,9 @@
|
||||
</ul>
|
||||
</div>
|
||||
<div class="col-sm-2">
|
||||
<a href="#" class="btn btn-small btn-info" id="btnPause"><i class="fa fa-pause"></i> Pause</a>
|
||||
<a href="#" class="btn btn-small btn-info" id="btnSlow"><i class="fa fa-hourglass"></i> Slow Mode</a>
|
||||
<a href="#" class="btn btn-small btn-info" id="btnPause"><i class="fa fa-pause"></i> Pause</a>
|
||||
<a href="#" class="btn btn-small btn-info" id="btnSlow"><i class="fa fa-hourglass"></i> Slow
|
||||
Mode</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@ -385,16 +386,17 @@
|
||||
{ x: this.x_axis, y: this.backlog.tins[2].y, type: 'scattergl', mode: 'markers', name: 'Video', marker: { size: 4 } },
|
||||
{ x: this.x_axis, y: this.backlog.tins[3].y, type: 'scattergl', mode: 'markers', name: 'Voice', marker: { size: 4 } },
|
||||
];
|
||||
|
||||
|
||||
if (this.backlogPlotted == null) {
|
||||
this.backlogPlotted = true;
|
||||
Plotly.newPlot(
|
||||
graph,
|
||||
graphData,
|
||||
{
|
||||
margin: { l: 0, r: 0, b: 0, t: 0, pad: 4 },
|
||||
yaxis: { automargin: true, title: "Bytes" },
|
||||
xaxis: { automargin: true, title: "Time since now" } });
|
||||
graph,
|
||||
graphData,
|
||||
{
|
||||
margin: { l: 0, r: 0, b: 0, t: 0, pad: 4 },
|
||||
yaxis: { automargin: true, title: "Bytes" },
|
||||
xaxis: { automargin: true, title: "Time since now" }
|
||||
});
|
||||
} else {
|
||||
Plotly.redraw(graph, graphData);
|
||||
}
|
||||
@ -411,11 +413,13 @@
|
||||
|
||||
if (this.delaysPlotted == null) {
|
||||
Plotly.newPlot(
|
||||
graph,
|
||||
graphData,
|
||||
{ margin: { l: 8, r: 0, b: 0, t: 0, pad: 4 },
|
||||
yaxis: { automargin: true, title: "log10(ms)", range: [-1.0, 1.0] },
|
||||
xaxis: { automargin: true, title: "Time since now" } });
|
||||
graph,
|
||||
graphData,
|
||||
{
|
||||
margin: { l: 8, r: 0, b: 0, t: 0, pad: 4 },
|
||||
yaxis: { automargin: true, title: "log10(ms)", range: [-1.0, 1.0] },
|
||||
xaxis: { automargin: true, title: "Time since now" }
|
||||
});
|
||||
this.delaysPlotted = true;
|
||||
} else {
|
||||
Plotly.redraw(graph, graphData);
|
||||
@ -441,7 +445,7 @@
|
||||
let graphData = [
|
||||
{ x: this.x_axis, y: this.throughput.tins[tin].y, type: 'scatter', mode: 'markers' }
|
||||
];
|
||||
if (this.tinsPlotted == null) {
|
||||
if (this.tinsPlotted == null) {
|
||||
Plotly.newPlot(graph, graphData, { margin: { l: 0, r: 0, b: 0, t: 0, pad: 4 }, yaxis: { automargin: true, title: "Bits" }, xaxis: { automargin: true, title: "Time since now" } });
|
||||
} else {
|
||||
Plotly.redraw(graph, graphData);
|
||||
@ -502,7 +506,7 @@
|
||||
this.per_ip = {};
|
||||
this.y = {};
|
||||
this.x_axis = [];
|
||||
for (let i=0; i<capacity; ++i) {
|
||||
for (let i = 0; i < capacity; ++i) {
|
||||
this.x_axis.push(i);
|
||||
this.x_axis.push(i);
|
||||
}
|
||||
@ -514,7 +518,7 @@
|
||||
}
|
||||
|
||||
clearQuantiles() {
|
||||
for (let i=0; i<12; ++i) {
|
||||
for (let i = 0; i < 12; ++i) {
|
||||
this.quantiles[0][i] = 0;
|
||||
this.quantiles[1][i] = 0;
|
||||
}
|
||||
@ -526,7 +530,7 @@
|
||||
if (!this.per_ip.hasOwnProperty(ip)) {
|
||||
this.per_ip[ip] = [];
|
||||
this.y[ip] = [];
|
||||
for (let i=0; i<this.capacity; ++i) {
|
||||
for (let i = 0; i < this.capacity; ++i) {
|
||||
this.per_ip[ip].push(0);
|
||||
this.per_ip[ip].push(0);
|
||||
this.y[ip].push(0);
|
||||
@ -536,7 +540,7 @@
|
||||
this.per_ip[ip][this.head] = down;
|
||||
this.per_ip[ip][this.head + 1] = 0.0 - up;
|
||||
this.head += 2;
|
||||
if (this.head > this.capacity*2) {
|
||||
if (this.head > this.capacity * 2) {
|
||||
this.head = 0;
|
||||
}
|
||||
}
|
||||
@ -557,30 +561,30 @@
|
||||
prepare() {
|
||||
this.clearQuantiles();
|
||||
for (const ip in this.per_ip) {
|
||||
let counter = this.capacity*2;
|
||||
for (let i = this.head; i < this.capacity*2; i++) {
|
||||
this.y[ip][counter] = this.per_ip[ip][i];
|
||||
let counter = this.capacity * 2;
|
||||
for (let i = this.head; i < this.capacity * 2; i++) {
|
||||
this.y[ip][counter] = this.per_ip[ip][i];
|
||||
counter--;
|
||||
}
|
||||
for (let i = 0; i < this.head; i++) {
|
||||
this.y[ip][counter] = this.per_ip[ip][i];
|
||||
counter--;
|
||||
}
|
||||
for (let i=2; i<22; i+=2) {
|
||||
this.addQuantile(this.y[ip][i], this.y[ip][i+1]);
|
||||
for (let i = 2; i < 22; i += 2) {
|
||||
this.addQuantile(this.y[ip][i], this.y[ip][i + 1]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
plot() {
|
||||
let graph = document.getElementById("throughputGraph");
|
||||
plot(target) {
|
||||
let graph = document.getElementById(target);
|
||||
let graphData = [];
|
||||
for (const ip in this.per_ip) {
|
||||
graphData.push({ x: this.x_axis, y: this.y[ip], name: ip, mode: 'markers', type: 'scattergl', marker: { size: 3 } });
|
||||
}
|
||||
if (this.plotted == null) {
|
||||
Plotly.newPlot(graph, graphData, { margin: { l: 0, r: 0, b: 0, t: 0, pad: 4 }, yaxis: { automargin: true, title: "Traffic (bits)" }, xaxis: { automargin: true, title: "Time since now" } });
|
||||
this.plotted = true;
|
||||
if (!this.hasOwnProperty("plotted" + target)) {
|
||||
Plotly.newPlot(graph, graphData, { margin: { l: 0, r: 0, b: 0, t: 0, pad: 4 }, yaxis: { automargin: true, title: "Traffic (bits)" }, xaxis: { automargin: true, title: "Time since now" } });
|
||||
this["plotted" + target] = true;
|
||||
} else {
|
||||
Plotly.redraw(graph, graphData);
|
||||
}
|
||||
@ -608,7 +612,7 @@
|
||||
msgPackGet("/api/circuit_throughput/" + encodeURI(id), (data) => {
|
||||
if (tpData == null) tpData = new ThroughputMonitor(300);
|
||||
ips = [];
|
||||
for (let i=0; i < data.length; i++) {
|
||||
for (let i = 0; i < data.length; i++) {
|
||||
let ip = data[i][0];
|
||||
ips.push(ip);
|
||||
let down = data[i][1];
|
||||
@ -616,13 +620,13 @@
|
||||
tpData.ingest(ip, down, up);
|
||||
}
|
||||
tpData.prepare();
|
||||
tpData.plot();
|
||||
tpData.plot("throughputGraph");
|
||||
tpData.plotQuantiles();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let funnels = new MultiRingBuffer(300);
|
||||
let funnels = new ThroughputMonitor(300);
|
||||
let rtts = {};
|
||||
let circuitId = "";
|
||||
let builtFunnelDivs = false;
|
||||
@ -635,8 +639,24 @@
|
||||
circuitId = encodeURI(id);
|
||||
msgPackGet("/api/funnel_for_queue/" + circuitId, (data) => {
|
||||
let html = "";
|
||||
|
||||
// Add the client on top
|
||||
let row = "<div class='row row220'>";
|
||||
|
||||
row += "<div class='col-sm-12'>";
|
||||
row += "<div class='card bg-light'>";
|
||||
row += "<h5 class='card-title'><i class='fa fa-hourglass'></i> Client Throughput</h5>";
|
||||
row += "<div id='tp_client' class='graph98 graph150'></div>";
|
||||
row += "</div>";
|
||||
row += "</div>";
|
||||
|
||||
row += "</div>";
|
||||
html += row;
|
||||
|
||||
// Funnels
|
||||
for (let i = 0; i < data.length; ++i) {
|
||||
funnels.push(data[i][0], data[i][1][NetTrans.current_throughput][0] * 8, data[i][1][NetTrans.current_throughput][1] * 8);
|
||||
//funnels.push(data[i][0], data[i][1][NetTrans.current_throughput][0] * 8, data[i][1][NetTrans.current_throughput][1] * 8);
|
||||
funnels.ingest(data[i][0], data[i][1][NetTrans.current_throughput][0] * 8, data[i][1][NetTrans.current_throughput][1] * 8);
|
||||
rtts[data[i][0]] = new RttHistogram();
|
||||
|
||||
let row = "<div class='row row220'>";
|
||||
@ -660,33 +680,38 @@
|
||||
}
|
||||
$("#pills-funnel").html(html);
|
||||
builtFunnelDivs = true;
|
||||
setTimeout(plotFunnels, 10);
|
||||
});
|
||||
}
|
||||
|
||||
let plottedFunnels = {};
|
||||
|
||||
function plotFunnels() {
|
||||
if (tpData != null) tpData.plot("tp_client");
|
||||
funnels.prepare();
|
||||
msgPackGet("/api/funnel_for_queue/" + encodeURI(circuitId), (data) => {
|
||||
for (let i = 0; i < data.length; ++i) {
|
||||
funnels.push(data[i][0], data[i][1][NetTrans.current_throughput][0] * 8, data[i][1][NetTrans.current_throughput][1] * 8);
|
||||
for (const [k, v] of Object.entries(funnels.data)) {
|
||||
let target_div = "tp" + k;
|
||||
let graphData = v.toScatterGraphData();
|
||||
let graph = document.getElementById(target_div);
|
||||
if (!plotFunnels.hasOwnProperty(target_div)) {
|
||||
Plotly.newPlot(graph, graphData, { margin: { l: 0, r: 0, b: 0, t: 0, pad: 4 }, yaxis: { automargin: true }, xaxis: { automargin: true, title: "Time since now" } }, { responsive: true });
|
||||
} else {
|
||||
Plotly.redraw(graph, graphData);
|
||||
}
|
||||
}
|
||||
|
||||
rtts[data[i][0]].clear();
|
||||
funnels.ingest(data[i][0], data[i][1][NetTrans.current_throughput][0] * 8, data[i][1][NetTrans.current_throughput][1] * 8);
|
||||
for (let j = 0; j < data[i][1][NetTrans.rtts].length; j++) {
|
||||
rtts[data[i][0]].push(data[i][1][NetTrans.rtts][j]);
|
||||
}
|
||||
|
||||
rtts[data[i][0]].plot("rtt" + data[i][0]);
|
||||
}
|
||||
|
||||
for (const [k, v] of Object.entries(funnels.y)) {
|
||||
let target_div = "tp" + k;
|
||||
let graphData = [
|
||||
{ x: funnels.x_axis, y: v, type: 'scatter', mode: 'markers', marker: { size: 3 } }
|
||||
];
|
||||
let graph = document.getElementById(target_div);
|
||||
if (!plotFunnels.hasOwnProperty(target_div)) {
|
||||
Plotly.newPlot(graph, graphData, { margin: { l: 0, r: 0, b: 0, t: 0, pad: 4 }, yaxis: { automargin: true, title: "Traffic (bits)" }, xaxis: { automargin: true, title: "Time since now" } });
|
||||
} else {
|
||||
Plotly.redraw(graph, graphData);
|
||||
}
|
||||
}
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
@ -788,13 +813,13 @@
|
||||
}
|
||||
html += "<tr>";
|
||||
html += "<td>" + data[i][0][FlowTrans.proto] + "</td>";
|
||||
html += "<td>" + data[i][0][FlowTrans.src] + "</td>";
|
||||
html += "<td>" + ipToHostname(data[i][0][FlowTrans.src]) + "</td>";
|
||||
if (data[i][0].proto == "ICMP") {
|
||||
html += "<td>" + icmpType(data[i][0][FlowTrans.src_port]) + "</td>";
|
||||
} else {
|
||||
html += "<td>" + data[i][0][FlowTrans.src_port] + "</td>";
|
||||
}
|
||||
html += "<td>" + data[i][0][FlowTrans.dst] + "</td>";
|
||||
html += "<td>" + ipToHostname(data[i][0][FlowTrans.dst]) + "</td>";
|
||||
if (data[i][0][FlowTrans.proto] == "ICMP") {
|
||||
if (data[i][1] != null) {
|
||||
html += "<td>" + icmpType(data[i][1][FlowTrans.src_port]) + "</td>";
|
||||
@ -828,14 +853,14 @@
|
||||
var slowMode = false;
|
||||
|
||||
function showFps() {
|
||||
if(!lastCalledTime) {
|
||||
if (!lastCalledTime) {
|
||||
lastCalledTime = Date.now();
|
||||
fps = 0;
|
||||
return;
|
||||
}
|
||||
delta = (Date.now() - lastCalledTime)/1000;
|
||||
delta = (Date.now() - lastCalledTime) / 1000;
|
||||
lastCalledTime = Date.now();
|
||||
fps = 1/delta;
|
||||
fps = 1 / delta;
|
||||
//$("#fps").text(fps.toFixed(0));
|
||||
worstDelta = Math.max(delta, worstDelta);
|
||||
}
|
||||
@ -846,6 +871,7 @@
|
||||
switch (activeTab) {
|
||||
case "pills-funnel-tab": {
|
||||
getFunnel();
|
||||
getThroughput();
|
||||
} break;
|
||||
case "pills-flows-tab": {
|
||||
getFlows();
|
||||
|
@ -447,4 +447,20 @@ function zero_to_null(array) {
|
||||
for (let i=0; i<array.length; ++i) {
|
||||
if (array[i] == 0) array[i] = null;
|
||||
}
|
||||
}
|
||||
|
||||
var dnsCache = {};
|
||||
|
||||
function ipToHostname(ip) {
|
||||
if (dnsCache.hasOwnProperty(ip)) {
|
||||
if (dnsCache[ip] != ip) {
|
||||
return ip + "<br /><span style='font-size: 6pt'>" + dnsCache[ip] + "</span>";
|
||||
} else {
|
||||
return ip;
|
||||
}
|
||||
}
|
||||
$.get("/api/dns/" + encodeURI(ip), (hostname) => {
|
||||
dnsCache[ip] = hostname;
|
||||
})
|
||||
return ip;
|
||||
}
|
@ -3,6 +3,10 @@ use crate::{
|
||||
};
|
||||
use lqos_bus::BusResponse;
|
||||
|
||||
/// Retrieves the raw queue data for a given circuit ID.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `circuit_id` - The circuit ID to retrieve data for.
|
||||
pub fn get_raw_circuit_data(circuit_id: &str) -> BusResponse {
|
||||
still_watching(circuit_id);
|
||||
if let Some(circuit) = CIRCUIT_TO_QUEUE.get(circuit_id) {
|
||||
|
@ -2,6 +2,12 @@ use std::sync::atomic::AtomicU64;
|
||||
|
||||
pub(crate) static QUEUE_MONITOR_INTERVAL: AtomicU64 = AtomicU64::new(1000);
|
||||
|
||||
/// Sets the interval at which the queue monitor thread will poll the
|
||||
/// Linux `tc` shaper for queue statistics.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `interval_ms` - The interval, in milliseconds, at which the queue
|
||||
/// monitor thread will poll the Linux `tc` shaper for queue statistics.
|
||||
pub fn set_queue_refresh_interval(interval_ms: u64) {
|
||||
QUEUE_MONITOR_INTERVAL
|
||||
.store(interval_ms, std::sync::atomic::Ordering::Relaxed);
|
||||
|
@ -1,3 +1,10 @@
|
||||
//! Retrieves queue statistics from the Linux `tc` shaper, and stores
|
||||
//! them in a `QueueStore` for later retrieval. The `QueueStore` is
|
||||
//! thread-safe, and can be accessed from multiple threads. It is
|
||||
//! updated periodically by a separate thread, and accumulates statistics
|
||||
//! between polling periods.
|
||||
|
||||
#![warn(missing_docs)]
|
||||
mod bus;
|
||||
mod circuit_to_queue;
|
||||
mod interval;
|
||||
|
@ -76,6 +76,9 @@ fn track_queues() {
|
||||
expire_watched_queues();
|
||||
}
|
||||
|
||||
/// Spawns a thread that periodically reads the queue statistics from
|
||||
/// the Linux `tc` shaper, and stores them in a `QueueStore` for later
|
||||
/// retrieval.
|
||||
pub fn spawn_queue_monitor() {
|
||||
std::thread::spawn(|| {
|
||||
// Setup the queue monitor loop
|
||||
|
@ -31,6 +31,13 @@ pub fn expiration_in_the_future() -> u64 {
|
||||
unix_now().unwrap_or(0) + 10
|
||||
}
|
||||
|
||||
/// Start watching a queue. This will cause the queue to be read
|
||||
/// periodically, and its statistics stored in the `QueueStore`.
|
||||
/// If the queue is already being watched, this function will
|
||||
/// do nothing.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `circuit_id` - The circuit ID to watch
|
||||
pub fn add_watched_queue(circuit_id: &str) {
|
||||
//info!("Watching queue {circuit_id}");
|
||||
let max = num_possible_cpus().unwrap() * 2;
|
||||
@ -74,6 +81,11 @@ pub(crate) fn expire_watched_queues() {
|
||||
WATCHED_QUEUES.retain(|_,w| w.expires_unix_time > now);
|
||||
}
|
||||
|
||||
/// Indicates that a watched queue is still being watched. Update the
|
||||
/// expiration time for the queue.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `circuit_id` - The circuit ID to watch
|
||||
pub fn still_watching(circuit_id: &str) {
|
||||
if let Some(mut q) = WATCHED_QUEUES.get_mut(circuit_id) {
|
||||
//info!("Still watching circuit: {circuit_id}");
|
||||
|
@ -345,8 +345,7 @@ static __always_inline void snoop(struct dissector_t *dissector)
|
||||
|
||||
parse_tcp_ts(hdr, dissector->end, &dissector->tsval, &dissector->tsecr);
|
||||
}
|
||||
}
|
||||
break;
|
||||
} break;
|
||||
case IPPROTO_UDP:
|
||||
{
|
||||
struct udphdr *hdr = get_udp_header(dissector);
|
||||
@ -354,12 +353,13 @@ static __always_inline void snoop(struct dissector_t *dissector)
|
||||
{
|
||||
if (hdr + 1 > dissector->end)
|
||||
{
|
||||
bpf_debug("UDP header past end");
|
||||
return;
|
||||
}
|
||||
dissector->src_port = hdr->source;
|
||||
dissector->dst_port = hdr->dest;
|
||||
}
|
||||
}
|
||||
} break;
|
||||
case IPPROTO_ICMP:
|
||||
{
|
||||
struct icmphdr *hdr = get_icmp_header(dissector);
|
||||
@ -367,14 +367,14 @@ static __always_inline void snoop(struct dissector_t *dissector)
|
||||
{
|
||||
if ((char *)hdr + sizeof(struct icmphdr) > dissector->end)
|
||||
{
|
||||
bpf_debug("ICMP header past end");
|
||||
return;
|
||||
}
|
||||
dissector->ip_protocol = 1;
|
||||
dissector->src_port = bpf_ntohs(hdr->type);
|
||||
dissector->dst_port = bpf_ntohs(hdr->type);
|
||||
}
|
||||
}
|
||||
break;
|
||||
dissector->dst_port = bpf_ntohs(hdr->code);
|
||||
}
|
||||
} break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5,6 +5,13 @@ use nix::sys::{
|
||||
};
|
||||
use std::sync::atomic::AtomicBool;
|
||||
|
||||
/// `periodic` runs a function at a given interval.
|
||||
///
|
||||
/// ## Parameters
|
||||
///
|
||||
/// * `interval_ms`: the interval in milliseconds.
|
||||
/// * `task_name`: the name of the task to run.
|
||||
/// * `tick_function`: the function to run at the given interval.
|
||||
pub fn periodic(
|
||||
interval_ms: u64,
|
||||
task_name: &str,
|
||||
|
@ -145,10 +145,14 @@ impl FileWatcher {
|
||||
}
|
||||
}
|
||||
|
||||
/// Errors that can occur when watching a file.
|
||||
#[derive(Error, Debug)]
|
||||
pub enum WatchedFileError {
|
||||
/// Unable to create the file watcher.
|
||||
#[error("Unable to create watcher")]
|
||||
CreateWatcherError,
|
||||
|
||||
/// Unable to start the file watcher system.
|
||||
#[error("Unable to start watcher")]
|
||||
StartWatcherError,
|
||||
}
|
||||
|
@ -37,6 +37,7 @@ pub fn read_hex_string(s: &str) -> Result<u32, HexParseError> {
|
||||
/// parsing a string into a `u32` hex number.
|
||||
#[derive(Error, Debug)]
|
||||
pub enum HexParseError {
|
||||
/// The hex string could not be decoded
|
||||
#[error("Unable to decode string into valid hex")]
|
||||
ParseError,
|
||||
}
|
||||
|
@ -1,10 +1,24 @@
|
||||
//! Collection of utility functions for LibreQoS
|
||||
|
||||
#![warn(missing_docs)]
|
||||
mod commands;
|
||||
|
||||
/// Provides a Linux file-descriptor based timing service.
|
||||
pub mod fdtimer;
|
||||
|
||||
/// Wrapper for watching when a file has changed.
|
||||
pub mod file_watcher;
|
||||
|
||||
/// Utilities for handling strings in hex format
|
||||
pub mod hex_string;
|
||||
|
||||
/// Utilities for scaling bits and packets to human-readable format
|
||||
pub mod packet_scale;
|
||||
mod string_table_enum;
|
||||
|
||||
/// Utilities dealing with Unix Timestamps
|
||||
pub mod unix_time;
|
||||
mod xdp_ip_address;
|
||||
|
||||
pub use xdp_ip_address::XdpIpAddress;
|
||||
/// XDP compatible IP Address
|
||||
pub use xdp_ip_address::XdpIpAddress;
|
||||
|
@ -1,23 +1,49 @@
|
||||
/// Scale a number of packets to a human readable string.
|
||||
///
|
||||
/// ## Parameters
|
||||
/// * `n`: the number of packets to scale
|
||||
pub fn scale_packets(n: u64) -> String {
|
||||
if n > 1_000_000_000 {
|
||||
if n >= 1_000_000_000 {
|
||||
format!("{:.2} gpps", n as f32 / 1_000_000_000.0)
|
||||
} else if n > 1_000_000 {
|
||||
} else if n >= 1_000_000 {
|
||||
format!("{:.2} mpps", n as f32 / 1_000_000.0)
|
||||
} else if n > 1_000 {
|
||||
} else if n >= 1_000 {
|
||||
format!("{:.2} kpps", n as f32 / 1_000.0)
|
||||
} else {
|
||||
format!("{n} pps")
|
||||
format!("{n} pps")
|
||||
}
|
||||
}
|
||||
|
||||
/// Scale a number of bits to a human readable string.
|
||||
///
|
||||
/// ## Parameters
|
||||
/// * `n`: the number of bits to scale
|
||||
pub fn scale_bits(n: u64) -> String {
|
||||
if n > 1_000_000_000 {
|
||||
if n >= 1_000_000_000 {
|
||||
format!("{:.2} gbit/s", n as f32 / 1_000_000_000.0)
|
||||
} else if n > 1_000_000 {
|
||||
} else if n >= 1_000_000 {
|
||||
format!("{:.2} mbit/s", n as f32 / 1_000_000.0)
|
||||
} else if n > 1_000 {
|
||||
} else if n >= 1_000 {
|
||||
format!("{:.2} kbit/s", n as f32 / 1_000.0)
|
||||
} else {
|
||||
format!("{n} bit/s")
|
||||
format!("{n} bit/s")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
#[test]
|
||||
fn test_scale_packets() {
|
||||
assert_eq!(super::scale_packets(1), "1 pps");
|
||||
assert_eq!(super::scale_packets(1000), "1.00 kpps");
|
||||
assert_eq!(super::scale_packets(1000000), "1.00 mpps");
|
||||
assert_eq!(super::scale_packets(1000000000), "1.00 gpps");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scale_bits() {
|
||||
assert_eq!(super::scale_bits(1), "1 bit/s");
|
||||
assert_eq!(super::scale_bits(1000), "1.00 kbit/s");
|
||||
assert_eq!(super::scale_bits(1000000), "1.00 mbit/s");
|
||||
assert_eq!(super::scale_bits(1000000000), "1.00 gbit/s");}
|
||||
}
|
@ -1,3 +1,9 @@
|
||||
/// Helper macro to create an enum that can be serialized to a string
|
||||
/// and deserialized from a string.
|
||||
///
|
||||
/// ## Parameters
|
||||
/// * `$enum_name`: the name of the enum to create
|
||||
/// * `$($option:ident),*`: the options of the enum
|
||||
#[macro_export]
|
||||
macro_rules! string_table_enum {
|
||||
($enum_name: ident, $($option:ident),*) => {
|
||||
@ -36,6 +42,13 @@ macro_rules! string_table_enum {
|
||||
};
|
||||
}
|
||||
|
||||
/// Helper macro to create an enum that can be serialized to a string
|
||||
/// and deserialized from a string. Adds explicit support for dashes
|
||||
/// in identifiers.
|
||||
///
|
||||
/// ## Parameters
|
||||
/// * `$enum_name`: the name of the enum to create
|
||||
/// * `$($option:ident),*`: the options of the enum
|
||||
#[macro_export]
|
||||
macro_rules! dashy_table_enum {
|
||||
($enum_name: ident, $($option:ident),*) => {
|
||||
|
@ -32,8 +32,10 @@ pub fn time_since_boot() -> Result<TimeSpec, TimeError> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Error type for time functions.
|
||||
#[derive(Error, Debug)]
|
||||
pub enum TimeError {
|
||||
/// The clock isn't ready yet.
|
||||
#[error("Clock not ready")]
|
||||
ClockNotReady,
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user