mirror of
https://github.com/LibreQoE/LibreQoS.git
synced 2025-02-25 18:55:32 -06:00
Merge branch 'develop' of https://github.com/LibreQoE/LibreQoS into develop
This commit is contained in:
16
.gitignore
vendored
16
.gitignore
vendored
@@ -56,6 +56,14 @@ src/webusers.toml
|
||||
src/lqusers.toml
|
||||
src/dist
|
||||
src/rust/lqos_anonymous_stats_server/anonymous.sqlite
|
||||
src/rust/long_term_stats/license_server/lqkeys.bin
|
||||
src/rust/long_term_stats/lts_node/lqkeys.bin
|
||||
src/rust/long_term_stats/pgdb/.env
|
||||
src/rust/long_term_stats/site_build/node_modules
|
||||
src/rust/long_term_stats/site_build/output
|
||||
src/rust/long_term_stats/site_build/package-lock.json
|
||||
src/rust/long_term_stats/wasm_pipe/staging
|
||||
src/rust/long_term_stats/lts_node/deploy.sh
|
||||
|
||||
# Ignore Rust build artifacts
|
||||
src/rust/target
|
||||
@@ -105,3 +113,11 @@ tramp
|
||||
# virtual environments
|
||||
.venv
|
||||
venv
|
||||
src/integrationUISPbandwidths.template.csv
|
||||
src/lts_keys.bin
|
||||
src/network
|
||||
src/network.json.good
|
||||
src/network.pdf
|
||||
src/ShapedDevices.csv.good
|
||||
.gitignore
|
||||
src/rust/lqosd/lts_keys.bin
|
||||
|
||||
64
.readthedocs.yaml
Normal file
64
.readthedocs.yaml
Normal file
@@ -0,0 +1,64 @@
|
||||
# Read the Docs configuration file for Sphinx projects
|
||||
|
||||
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
|
||||
|
||||
|
||||
# Required
|
||||
|
||||
version: 2
|
||||
|
||||
|
||||
# Set the OS, Python version and other tools you might need
|
||||
|
||||
build:
|
||||
|
||||
os: ubuntu-22.04
|
||||
|
||||
tools:
|
||||
|
||||
python: "3.11"
|
||||
|
||||
# You can also specify other tool versions:
|
||||
|
||||
# nodejs: "20"
|
||||
|
||||
# rust: "1.70"
|
||||
|
||||
# golang: "1.20"
|
||||
|
||||
|
||||
# Build documentation in the "docs/" directory with Sphinx
|
||||
|
||||
sphinx:
|
||||
|
||||
configuration: conf.py
|
||||
|
||||
# You can configure Sphinx to use a different builder, for instance use the dirhtml builder for simpler URLs
|
||||
|
||||
# builder: "dirhtml"
|
||||
|
||||
# Fail on all warnings to avoid broken references
|
||||
|
||||
# fail_on_warning: true
|
||||
|
||||
|
||||
# Optionally build your docs in additional formats such as PDF and ePub
|
||||
|
||||
# formats:
|
||||
|
||||
# - pdf
|
||||
|
||||
# - epub
|
||||
|
||||
|
||||
# Optional but recommended, declare the Python requirements required
|
||||
|
||||
# to build your documentation
|
||||
|
||||
# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
|
||||
|
||||
python:
|
||||
|
||||
install:
|
||||
|
||||
- requirements: requirements.txt
|
||||
96
README.md
96
README.md
@@ -15,104 +15,12 @@ Learn more about [Equinix Metal here](https://deploy.equinix.com/metal/).
|
||||
|
||||
Please support the continued development of LibreQoS by sponsoring us via [GitHub Sponsors](https://github.com/sponsors/LibreQoE) or [Patreon](https://patreon.com/libreqos).
|
||||
|
||||
## Documentation
|
||||
## Documentation / Get Started / System Requirements
|
||||
|
||||
[Docs](https://libreqos.readthedocs.io)
|
||||
[ReadTheDocs](https://libreqos.readthedocs.io/en/latest/)
|
||||
|
||||
## Matrix Chat
|
||||
|
||||
Our Matrix chat channel is available at [https://matrix.to/#/#libreqos:matrix.org](https://matrix.to/#/#libreqos:matrix.org).
|
||||
|
||||
<img alt="LibreQoS" src="https://user-images.githubusercontent.com/22501920/223866474-603e1112-e2e6-4c67-93e4-44c17b1b7c43.png"></a>
|
||||
|
||||
## Features
|
||||
|
||||
### Flexible Hierarchical Shaping / Back-Haul Congestion Mitigation
|
||||
|
||||
<img src="https://raw.githubusercontent.com/LibreQoE/LibreQoS/main/docs/nestedHTB2.png" width="350"></img>
|
||||
|
||||
Starting in version v1.1+, operators can map their network hierarchy in LibreQoS. This enables both simple network hierarchies (Site>AP>Client) as well as much more complex ones (Site>Site>Micro-PoP>AP>Site>AP>Client). This can be used to ensure that a given site’s peak bandwidth will not exceed the capacity of its back-haul links (back-haul congestion control). Operators can support more users on the same network equipment with LibreQoS than with competing QoE solutions which only shape by AP and Client.
|
||||
|
||||
### CAKE
|
||||
|
||||
CAKE is the product of nearly a decade of development efforts to improve on fq\_codel. With the diffserv\_4 parameter enabled – CAKE groups traffic in to Bulk, Best Effort, Video, and Voice. This means that without having to fine-tune traffic priorities as you would with DPI products – CAKE automatically ensures your clients’ OS update downloads will not disrupt their zoom calls. It allows for multiple video conferences to operate on the same connection which might otherwise “fight” for upload bandwidth causing call disruptions. With work-from-home, remote learning, and tele-medicine becoming increasingly common – minimizing video call disruptions can save jobs, keep students engaged, and help ensure equitable access to medical care.
|
||||
|
||||
### XDP
|
||||
|
||||
Fast, multi-CPU queueing leveraging xdp-cpumap-tc and cpumap-pping. Currently tested in the real world past 11 Gbps (so far) with just 30% CPU use on a 16 core Intel Xeon Gold 6254. It's likely capable of 30Gbps or more.
|
||||
|
||||
### Graphing
|
||||
|
||||
You can graph bandwidth and TCP RTT by client and node (Site, AP, etc), using InfluxDB.
|
||||
|
||||
### CRM Integrations
|
||||
|
||||
## Server Recommendations
|
||||
It is most cost-effective to buy a used server with specifications matching your unique requirements, as laid out in the System Requirements section below.
|
||||
For those who do not have the time to do that, here are some off-the-shelf options to consider:
|
||||
* 1 Gbps | [Supermicro SuperServer E100-9W-L](https://www.thinkmate.com/system/superserver-e100-9w-l)
|
||||
* 10 Gbps | [Supermicro SuperServer 510T-ML (Choose E-2388G)](https://www.thinkmate.com/system/superserver-510t-ml)
|
||||
* 20 Gbps | [Dell R450 Config](https://www.dell.com/en-us/shop/servers-storage-and-networking/poweredge-r450-rack-server/spd/poweredge-r450/pe_r450_15127_vi_vp?configurationid=a7663c54-6e4a-4c96-9a21-bc5a69d637ba)
|
||||
|
||||
The [AsRock 1U4LW-B6502L2T](https://www.thinkmate.com/system/asrock-1u4lw-b6502l2t/635744) can be a great lower-cost option as well.
|
||||
|
||||
## System Requirements
|
||||
### VM or physical server
|
||||
* For VMs, NIC passthrough is required for optimal throughput and latency (XDP vs generic XDP). Using Virtio / bridging is much slower than NIC passthrough. Virtio / bridging should not be used for large amounts of traffic.
|
||||
|
||||
### CPU
|
||||
* 2 or more CPU cores
|
||||
* A CPU with solid [single-thread performance](https://www.cpubenchmark.net/singleThread.html#server-thread) within your budget. Queuing is very CPU-intensive, and requires high single-thread performance.
|
||||
|
||||
Single-thread CPU performance will determine the max throughput of a single HTB (cpu core), and in turn, what max speed plan you can offer customers.
|
||||
|
||||
| Customer Max Plan | Passmark Single-Thread |
|
||||
| --------------------| ------------------------ |
|
||||
| 100 Mbps | 1000 |
|
||||
| 250 Mbps | 1500 |
|
||||
| 500 Mbps | 2000 |
|
||||
| 1 Gbps | 2500 |
|
||||
| 2 Gbps | 3000 |
|
||||
|
||||
Below is a table of approximate aggregate throughput capacity, assuming a a CPU with a [single thread](https://www.cpubenchmark.net/singleThread.html#server-thread) performance of 2700 or greater:
|
||||
|
||||
| Aggregate Throughput | CPU Cores |
|
||||
| ------------------------| ------------- |
|
||||
| 500 Mbps | 2 |
|
||||
| 1 Gbps | 4 |
|
||||
| 5 Gbps | 6 |
|
||||
| 10 Gbps | 8 |
|
||||
| 20 Gbps | 16 |
|
||||
| 50 Gbps* | 32 |
|
||||
|
||||
(* Estimated)
|
||||
|
||||
So for example, an ISP delivering 1Gbps service plans with 10Gbps aggregate throughput would choose a CPU with a 2500+ single-thread score and 8 cores, such as the Intel Xeon E-2388G @ 3.20GHz.
|
||||
|
||||
### Memory
|
||||
* Minimum RAM = 2 + (0.002 x Subscriber Count) GB
|
||||
* Recommended RAM:
|
||||
|
||||
| Subscribers | RAM |
|
||||
| ------------- | ------------- |
|
||||
| 100 | 4 GB |
|
||||
| 1,000 | 8 GB |
|
||||
| 5,000 | 16 GB |
|
||||
| 10,000* | 18 GB |
|
||||
| 50,000* | 24 GB |
|
||||
|
||||
(* Estimated)
|
||||
|
||||
### Network Interface Requirements
|
||||
* One management network interface completely separate from the traffic shaping interfaces. Usually this would be the Ethernet interface built in to the motherboard.
|
||||
* Dedicated Network Interface Card for Shaping Interfaces
|
||||
* NIC must have 2 or more interfaces for traffic shaping.
|
||||
* NIC must have multiple TX/RX transmit queues. [Here's how to check from the command line](https://serverfault.com/questions/772380/how-to-tell-if-nic-has-multiqueue-enabled).
|
||||
* Known supported cards:
|
||||
* [NVIDIA Mellanox MCX512A-ACAT](https://www.fs.com/products/119649.html)
|
||||
* NVIDIA Mellanox MCX416A-CCAT
|
||||
* [Intel X710](https://www.fs.com/products/75600.html)
|
||||
* Intel X520
|
||||
|
||||
## Get Started
|
||||
- [ReadTheDocs](https://libreqos.readthedocs.io/en/develop/docs/SystemRequirements/Compute.html)
|
||||
|
||||
2
conf.py
2
conf.py
@@ -7,7 +7,7 @@
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
|
||||
|
||||
project = "LibreQoE"
|
||||
copyright = "2023, LibreQoE"
|
||||
copyright = "2023, LibreQoE, LLC"
|
||||
author = "Zach Biles"
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
|
||||
@@ -18,8 +18,8 @@ Run ```sudo crontab -e``` and remove any entries pertaining to LibreQoS from v1.
|
||||
Use the deb package from the [latest v1.4 release](https://github.com/LibreQoE/LibreQoS/releases/).
|
||||
|
||||
```shell
|
||||
sudo echo "deb http://stats.libreqos.io/ubuntu jammy main" > /etc/apt/sources.list.d/libreqos.list
|
||||
wget -O - -q http://stats.libreqos.io/repo.asc | apt-key add -
|
||||
sudo echo "deb http://stats.libreqos.io/ubuntu jammy main" | sudo tee -a /etc/apt/sources.list.d/libreqos.list
|
||||
sudo wget -O - -q http://stats.libreqos.io/repo.asc | sudo apt-key add -
|
||||
apt-get update
|
||||
apt-get install libreqos
|
||||
```
|
||||
|
||||
@@ -11,10 +11,11 @@ Single-thread CPU performance will determine the max throughput of a single HTB
|
||||
| Customer Max Plan | Passmark Single-Thread |
|
||||
| --------------------| ------------------------ |
|
||||
| 100 Mbps | 1000 |
|
||||
| 250 Mbps | 1500 |
|
||||
| 500 Mbps | 2000 |
|
||||
| 1 Gbps | 2500 |
|
||||
| 250 Mbps | 1250 |
|
||||
| 500 Mbps | 1500 |
|
||||
| 1 Gbps | 2000 |
|
||||
| 2 Gbps | 3000 |
|
||||
| 4 Gbps | 4000 |
|
||||
|
||||
Below is a table of approximate aggregate throughput capacity, assuming a a CPU with a [single thread](https://www.cpubenchmark.net/singleThread.html#server-thread) performance of 2700 or greater:
|
||||
|
||||
@@ -48,8 +49,12 @@ So for example, an ISP delivering 1Gbps service plans with 10Gbps aggregate thro
|
||||
### Server Recommendations
|
||||
It is most cost-effective to buy a used server with specifications matching your unique requirements, as laid out in the System Requirements section below.
|
||||
For those who do not have the time to do that, here are some off-the-shelf options to consider:
|
||||
* 1 Gbps | [Supermicro SuperServer E100-9W-L](https://www.thinkmate.com/system/superserver-e100-9w-l)
|
||||
* 10 Gbps | [Supermicro SuperServer 510T-ML (Choose E-2388G)](https://www.thinkmate.com/system/superserver-510t-ml)
|
||||
* 20 Gbps | [Dell R450 Config](https://www.dell.com/en-us/shop/servers-storage-and-networking/poweredge-r450-rack-server/spd/poweredge-r450/pe_r450_15127_vi_vp?configurationid=a7663c54-6e4a-4c96-9a21-bc5a69d637ba)
|
||||
|
||||
The [AsRock 1U4LW-B6502L2T](https://www.thinkmate.com/system/asrock-1u4lw-b6502l2t/635744) can be a great lower-cost option as well.
|
||||
| Aggregate | 100Mbps Plans | 1Gbps Plans | 4Gbps Plans |
|
||||
| ------------- | ------------- | ------------- | ------------- |
|
||||
| 1 Gbps Total | A | | |
|
||||
| 10 Gbps Total | | B | C |
|
||||
|
||||
* A | [Supermicro SuperServer E100-9W-L](https://www.thinkmate.com/system/superserver-e100-9w-l)
|
||||
* B | [Supermicro SuperServer 510T-ML](https://www.thinkmate.com/system/superserver-510t-ml) (Select E-2388G)
|
||||
* C | [Supermicro AS-1015A-MT](https://store.supermicro.com/us_en/as-1015a-mt.html) (Ryzen 9 7700X, 2x16GB DDR5 4800MHz ECC, 1xSupermicro 10-Gigabit XL710+ X557)
|
||||
|
||||
@@ -6,5 +6,5 @@
|
||||
* Known supported cards:
|
||||
* [NVIDIA Mellanox MCX512A-ACAT](https://www.fs.com/products/119649.html)
|
||||
* NVIDIA Mellanox MCX416A-CCAT
|
||||
* [Intel X710](https://www.fs.com/products/75600.html)
|
||||
* [Intel X710](https://www.fs.com/products/75600.html) * Note - possible i40e driver issue with XDP Redirect for high throughput 10G+
|
||||
* Intel X520
|
||||
|
||||
@@ -18,7 +18,7 @@ By specifying `libreqos` at the end, git will ensure the folder name is lowercas
|
||||
You need to have a few packages from `apt` installed:
|
||||
|
||||
```shell
|
||||
sudo apt-get install -y python3-pip clang gcc gcc-multilib llvm libelf-dev git nano graphviz curl screen llvm pkg-config linux-tools-common linux-tools-`uname -r` libbpf-dev
|
||||
sudo apt-get install -y python3-pip clang gcc gcc-multilib llvm libelf-dev git nano graphviz curl screen llvm pkg-config linux-tools-common linux-tools-`uname -r` libbpf-dev libssl-dev
|
||||
```
|
||||
|
||||
Then you need to install some Python dependencies:
|
||||
|
||||
@@ -20,6 +20,8 @@ You have the option to run integrationUISP.py automatically on boot and every 30
|
||||
|
||||
First, set the relevant parameters for Splynx (splynx_api_key, splynx_api_secret, etc.) in ispConfig.py.
|
||||
|
||||
The Splynx Integration uses Basic authentication. For using this type of authentication, please make sure you enable [Unsecure access](https://splynx.docs.apiary.io/#introduction/authentication) in your Splynx API key settings. Also the Splynx API key should be granted access to the necessary permissions.
|
||||
|
||||
To test the Splynx Integration, use
|
||||
|
||||
```shell
|
||||
|
||||
@@ -17,3 +17,20 @@ Node manager and scheduler are dependent on the `lqos.service` being in a health
|
||||
### RTNETLINK answers: Invalid argument
|
||||
|
||||
This tends to show up when the MQ qdisc cannot be added correctly to the NIC interface. This would suggest the NIC has insufficient RX/TX queues. Please make sure you are using the [recommended NICs](../SystemRequirements/Networking.md).
|
||||
|
||||
### InfluxDB "Failed to update bandwidth graphs"
|
||||
|
||||
The scheduler (scheduler.py) runs the InfluxDB integration within a try/except statement. If it fails to update InfluxDB, it will report "Failed to update bandwidth graphs".
|
||||
To find the exact cause of the failure, please run ```python3 graphInfluxDB.py``` which will provde more specific errors.
|
||||
|
||||
### All customer IPs are listed under Unknown IPs, rather than Shaped Devices in GUI
|
||||
```
|
||||
cd /opt/libreqos/src
|
||||
sudo systemctl stop lqos_scheduler
|
||||
sudo python3 LibreQoS.py
|
||||
```
|
||||
|
||||
The console output from running LibreQoS.py directly provides more specific errors regarding issues with ShapedDevices.csv and network.json
|
||||
Once you have identified the error and fixed ShapedDevices.csv and/or Network.json, please then run
|
||||
|
||||
```sudo systemctl start lqos_scheduler```
|
||||
|
||||
@@ -100,23 +100,23 @@ Let's attach some access points and point-of-presence sites:
|
||||
```python
|
||||
net.addRawNode(NetworkNode(id="AP_A", displayName="AP_A", parentId="Site_1", type=NodeType.ap, download=500, upload=500))
|
||||
net.addRawNode(NetworkNode(id="Site_3", displayName="Site_3", parentId="Site_1", type=NodeType.site, download=500, upload=500))
|
||||
net.addRawNode(NetworkNode(id="PoP_5", displayName="PoP_5", parentId="Site_3", type=NodeType.site, download=200, upload=200))
|
||||
net.addRawNode(NetworkNode(id="AP_9", displayName="AP_9", parentId="PoP_5", type=NodeType.ap, download=120, upload=120))
|
||||
net.addRawNode(NetworkNode(id="PoP_6", displayName="PoP_6", parentId="PoP_5", type=NodeType.site, download=60, upload=60))
|
||||
net.addRawNode(NetworkNode(id="AP_11", displayName="AP_11", parentId="PoP_6", type=NodeType.ap, download=30, upload=30))
|
||||
net.addRawNode(NetworkNode(id="PoP_1", displayName="PoP_1", parentId="Site_2", type=NodeType.site, download=200, upload=200))
|
||||
net.addRawNode(NetworkNode(id="AP_7", displayName="AP_7", parentId="PoP_1", type=NodeType.ap, download=100, upload=100))
|
||||
net.addRawNode(NetworkNode(id="Site_5", displayName="Site_5", parentId="Site_3", type=NodeType.site, download=200, upload=200))
|
||||
net.addRawNode(NetworkNode(id="AP_9", displayName="AP_9", parentId="Site_5", type=NodeType.ap, download=120, upload=120))
|
||||
net.addRawNode(NetworkNode(id="Site_6", displayName="Site_6", parentId="Site_5", type=NodeType.site, download=60, upload=60))
|
||||
net.addRawNode(NetworkNode(id="AP_11", displayName="AP_11", parentId="Site_6", type=NodeType.ap, download=30, upload=30))
|
||||
net.addRawNode(NetworkNode(id="Site_4", displayName="Site_4", parentId="Site_2", type=NodeType.site, download=200, upload=200))
|
||||
net.addRawNode(NetworkNode(id="AP_7", displayName="AP_7", parentId="Site_4", type=NodeType.ap, download=100, upload=100))
|
||||
net.addRawNode(NetworkNode(id="AP_1", displayName="AP_1", parentId="Site_2", type=NodeType.ap, download=150, upload=150))
|
||||
```
|
||||
|
||||
When you attach a customer, you can specify a tree entry (e.g. `PoP_5`) as a parent:
|
||||
When you attach a customer, you can specify a tree entry (e.g. `Site_5`) as a parent:
|
||||
|
||||
```python
|
||||
# Add the customer
|
||||
customer = NetworkNode(
|
||||
id="Unique Customer ID",
|
||||
displayName="The Doe Family",
|
||||
parentId="PoP_5",
|
||||
parentId="Site_5",
|
||||
type=NodeType.client,
|
||||
download=100, # Download is in Mbit/second
|
||||
upload=20, # Upload is in Mbit/second
|
||||
@@ -146,7 +146,7 @@ net.createShapedDevices() # Create the `ShapedDevices.csv` file.
|
||||
|
||||
You can also add a call to `net.plotNetworkGraph(False)` (use `True` to also include every customer; this can make for a HUGE file) to create a PDF file (currently named `network.pdf.pdf`) displaying your topology. The example shown here looks like this:
|
||||
|
||||

|
||||

|
||||
|
||||
## Longest Prefix Match Tip
|
||||
You could theoretically throttle all unknown IPs until they are associated with a client. For example, you could limit every unknown to 1.5x0.5 with single entry in ShapedDevices.csv, until you associate them with an account. IPs need to be non-exact matches. So you can't have two 192.168.1.1 entries, but you can have a 192.168.1.0/24 subnet and a 192.168.1.2/32 - they aren't duplicates, and the LPM search is smart enough to pick the most exact match.
|
||||
|
||||
@@ -90,34 +90,34 @@ def tearDown(interfaceA, interfaceB):
|
||||
clear_ip_mappings() # Use the bus
|
||||
clearPriorSettings(interfaceA, interfaceB)
|
||||
|
||||
def findQueuesAvailable():
|
||||
def findQueuesAvailable(interfaceName):
|
||||
# Find queues and CPU cores available. Use min between those two as queuesAvailable
|
||||
if enableActualShellCommands:
|
||||
if queuesAvailableOverride == 0:
|
||||
queuesAvailable = 0
|
||||
path = '/sys/class/net/' + interfaceA + '/queues/'
|
||||
path = '/sys/class/net/' + interfaceName + '/queues/'
|
||||
directory_contents = os.listdir(path)
|
||||
for item in directory_contents:
|
||||
if "tx-" in str(item):
|
||||
queuesAvailable += 1
|
||||
print("NIC queues:\t\t\t" + str(queuesAvailable))
|
||||
print(f"Interface {interfaceName} NIC queues:\t\t\t" + str(queuesAvailable))
|
||||
else:
|
||||
queuesAvailable = queuesAvailableOverride
|
||||
print("NIC queues (Override):\t\t\t" + str(queuesAvailable))
|
||||
print(f"Interface {interfaceName} NIC queues (Override):\t\t\t" + str(queuesAvailable))
|
||||
cpuCount = multiprocessing.cpu_count()
|
||||
print("CPU cores:\t\t\t" + str(cpuCount))
|
||||
if queuesAvailable < 2:
|
||||
raise SystemError('Only 1 NIC rx/tx queue available. You will need to use a NIC with 2 or more rx/tx queues available.')
|
||||
raise SystemError(f'Only 1 NIC rx/tx queue available for interface {interfaceName}. You will need to use a NIC with 2 or more rx/tx queues available.')
|
||||
if queuesAvailable < 2:
|
||||
raise SystemError('Only 1 CPU core available. You will need to use a CPU with 2 or more CPU cores.')
|
||||
queuesAvailable = min(queuesAvailable,cpuCount)
|
||||
print("queuesAvailable set to:\t" + str(queuesAvailable))
|
||||
print(f"queuesAvailable for interface {interfaceName} set to:\t" + str(queuesAvailable))
|
||||
else:
|
||||
print("As enableActualShellCommands is False, CPU core / queue count has been set to 16")
|
||||
logging.info("NIC queues:\t\t\t" + str(16))
|
||||
logging.info(f"Interface {interfaceName} NIC queues:\t\t\t" + str(16))
|
||||
cpuCount = multiprocessing.cpu_count()
|
||||
logging.info("CPU cores:\t\t\t" + str(16))
|
||||
logging.info("queuesAvailable set to:\t" + str(16))
|
||||
logging.info(f"queuesAvailable for interface {interfaceName} set to:\t" + str(16))
|
||||
queuesAvailable = 16
|
||||
return queuesAvailable
|
||||
|
||||
@@ -137,12 +137,28 @@ def validateNetworkAndDevices():
|
||||
devicesValidatedOrNot = False
|
||||
with open('network.json') as file:
|
||||
try:
|
||||
temporaryVariable = json.load(file) # put JSON-data to a variable
|
||||
data = json.load(file) # put JSON-data to a variable
|
||||
if data != {}:
|
||||
#Traverse
|
||||
observedNodes = {} # Will not be used later
|
||||
def traverseToVerifyValidity(data):
|
||||
for elem in data:
|
||||
if isinstance(elem, str):
|
||||
if (isinstance(data[elem], dict)) and (elem != 'children'):
|
||||
if elem not in observedNodes:
|
||||
observedNodes[elem] = {'downloadBandwidthMbps': data[elem]['uploadBandwidthMbps'], 'downloadBandwidthMbps': data[elem]['uploadBandwidthMbps']}
|
||||
if 'children' in data[elem]:
|
||||
traverseToVerifyValidity(data[elem]['children'])
|
||||
else:
|
||||
warnings.warn("Non-unique Node name in network.json: " + elem, stacklevel=2)
|
||||
networkValidatedOrNot = False
|
||||
traverseToVerifyValidity(data)
|
||||
if len(observedNodes) < 1:
|
||||
warnings.warn("network.json had 0 valid nodes. Only {} is accepted for that scenario.", stacklevel=2)
|
||||
networkValidatedOrNot = False
|
||||
except json.decoder.JSONDecodeError:
|
||||
warnings.warn("network.json is an invalid JSON file", stacklevel=2) # in case json is invalid
|
||||
networkValidatedOrNot
|
||||
if networkValidatedOrNot == True:
|
||||
print("network.json passed validation")
|
||||
networkValidatedOrNot = False
|
||||
rowNum = 2
|
||||
with open('ShapedDevices.csv') as csv_file:
|
||||
csv_reader = csv.reader(csv_file, delimiter=',')
|
||||
@@ -173,7 +189,7 @@ def validateNetworkAndDevices():
|
||||
for ipEntry in ipv4_list:
|
||||
if ipEntry in seenTheseIPsAlready:
|
||||
warnings.warn("Provided IPv4 '" + ipEntry + "' in ShapedDevices.csv at row " + str(rowNum) + " is duplicate.", stacklevel=2)
|
||||
devicesValidatedOrNot = False
|
||||
#devicesValidatedOrNot = False
|
||||
seenTheseIPsAlready.append(ipEntry)
|
||||
else:
|
||||
if (type(ipaddress.ip_network(ipEntry)) is ipaddress.IPv4Network) or (type(ipaddress.ip_address(ipEntry)) is ipaddress.IPv4Address):
|
||||
@@ -255,8 +271,11 @@ def validateNetworkAndDevices():
|
||||
print("ShapedDevices.csv passed validation")
|
||||
else:
|
||||
print("ShapedDevices.csv failed validation")
|
||||
|
||||
if (devicesValidatedOrNot == True) and (devicesValidatedOrNot == True):
|
||||
if networkValidatedOrNot == True:
|
||||
print("network.json passed validation")
|
||||
else:
|
||||
print("network.json failed validation")
|
||||
if (devicesValidatedOrNot == True) and (networkValidatedOrNot == True):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
@@ -454,7 +473,10 @@ def refreshShapers():
|
||||
|
||||
|
||||
# Pull rx/tx queues / CPU cores available
|
||||
queuesAvailable = findQueuesAvailable()
|
||||
# Handling the case when the number of queues for interfaces are different
|
||||
InterfaceAQueuesAvailable = findQueuesAvailable(interfaceA)
|
||||
InterfaceBQueuesAvailable = findQueuesAvailable(interfaceB)
|
||||
queuesAvailable = min(InterfaceAQueuesAvailable, InterfaceBQueuesAvailable)
|
||||
stickOffset = 0
|
||||
if OnAStick:
|
||||
print("On-a-stick override dividing queues")
|
||||
@@ -740,6 +762,26 @@ def refreshShapers():
|
||||
# Parse network structure. For each tier, generate commands to create corresponding HTB and leaf classes. Prepare commands for execution later
|
||||
# Define lists for hash filters
|
||||
def traverseNetwork(data):
|
||||
|
||||
# Cake needs help handling rates lower than 5 Mbps
|
||||
def sqmFixupRate(rate:int, sqm:str) -> str:
|
||||
# If we aren't using cake, just return the sqm string
|
||||
if not sqm.startswith("cake") or "rtt" in sqm:
|
||||
return sqm
|
||||
# If we are using cake, we need to fixup the rate
|
||||
# Based on: 1 MTU is 1500 bytes, or 12,000 bits.
|
||||
# At 1 Mbps, (1,000 bits per ms) transmitting an MTU takes 12ms. Add 3ms for overhead, and we get 15ms.
|
||||
# So 15ms divided by 5 (for 1%) multiplied by 100 yields 300ms.
|
||||
# The same formula gives 180ms at 2Mbps
|
||||
# 140ms at 3Mbps
|
||||
# 120ms at 4Mbps
|
||||
match rate:
|
||||
case 1: return sqm + " rtt 300"
|
||||
case 2: return sqm + " rtt 180"
|
||||
case 3: return sqm + " rtt 140"
|
||||
case 4: return sqm + " rtt 120"
|
||||
case _: return sqm
|
||||
|
||||
for node in data:
|
||||
command = 'class add dev ' + interfaceA + ' parent ' + data[node]['parentClassID'] + ' classid ' + data[node]['classMinor'] + ' htb rate '+ str(data[node]['downloadBandwidthMbpsMin']) + 'mbit ceil '+ str(data[node]['downloadBandwidthMbps']) + 'mbit prio 3'
|
||||
linuxTCcommands.append(command)
|
||||
@@ -760,14 +802,18 @@ def refreshShapers():
|
||||
command = 'class add dev ' + interfaceA + ' parent ' + data[node]['classid'] + ' classid ' + circuit['classMinor'] + ' htb rate '+ str(circuit['minDownload']) + 'mbit ceil '+ str(circuit['maxDownload']) + 'mbit prio 3' + tcComment
|
||||
linuxTCcommands.append(command)
|
||||
# Only add CAKE / fq_codel qdisc if monitorOnlyMode is Off
|
||||
if monitorOnlyMode == False:
|
||||
command = 'qdisc add dev ' + interfaceA + ' parent ' + circuit['classMajor'] + ':' + circuit['classMinor'] + ' ' + sqm
|
||||
if monitorOnlyMode == False:
|
||||
# SQM Fixup for lower rates
|
||||
useSqm = sqmFixupRate(circuit['maxDownload'], sqm)
|
||||
command = 'qdisc add dev ' + interfaceA + ' parent ' + circuit['classMajor'] + ':' + circuit['classMinor'] + ' ' + useSqm
|
||||
linuxTCcommands.append(command)
|
||||
command = 'class add dev ' + interfaceB + ' parent ' + data[node]['up_classid'] + ' classid ' + circuit['classMinor'] + ' htb rate '+ str(circuit['minUpload']) + 'mbit ceil '+ str(circuit['maxUpload']) + 'mbit prio 3'
|
||||
linuxTCcommands.append(command)
|
||||
# Only add CAKE / fq_codel qdisc if monitorOnlyMode is Off
|
||||
if monitorOnlyMode == False:
|
||||
command = 'qdisc add dev ' + interfaceB + ' parent ' + circuit['up_classMajor'] + ':' + circuit['classMinor'] + ' ' + sqm
|
||||
if monitorOnlyMode == False:
|
||||
# SQM Fixup for lower rates
|
||||
useSqm = sqmFixupRate(circuit['maxUpload'], sqm)
|
||||
command = 'qdisc add dev ' + interfaceB + ' parent ' + circuit['up_classMajor'] + ':' + circuit['classMinor'] + ' ' + useSqm
|
||||
linuxTCcommands.append(command)
|
||||
pass
|
||||
for device in circuit['devices']:
|
||||
|
||||
1
src/VERSION_STRING
Normal file
1
src/VERSION_STRING
Normal file
@@ -0,0 +1 @@
|
||||
1.4-rc10-devel
|
||||
@@ -60,6 +60,10 @@ for prog in $PROGS
|
||||
do
|
||||
pushd $prog > /dev/null
|
||||
cargo build $BUILD_FLAGS
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Cargo build failed. Exiting with code 1."
|
||||
exit 1
|
||||
fi
|
||||
popd > /dev/null
|
||||
done
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ from typing import List, Any
|
||||
from ispConfig import allowedSubnets, ignoreSubnets, generatedPNUploadMbps, generatedPNDownloadMbps, circuitNameUseAddress, upstreamBandwidthCapacityDownloadMbps, upstreamBandwidthCapacityUploadMbps
|
||||
import ipaddress
|
||||
import enum
|
||||
|
||||
import os
|
||||
|
||||
def isInAllowedSubnets(inputIP):
|
||||
# Check whether an IP address occurs inside the allowedSubnets list
|
||||
@@ -356,6 +356,10 @@ class NetworkGraph:
|
||||
def createShapedDevices(self):
|
||||
import csv
|
||||
from ispConfig import bandwidthOverheadFactor
|
||||
try:
|
||||
from ispConfig import committedBandwidthMultiplier
|
||||
except:
|
||||
committedBandwidthMultiplier = 0.98
|
||||
# Builds ShapedDevices.csv from the network tree.
|
||||
circuits = []
|
||||
for (i, node) in enumerate(self.nodes):
|
||||
@@ -366,7 +370,10 @@ class NetworkGraph:
|
||||
if circuitNameUseAddress:
|
||||
displayNameToUse = node.address
|
||||
else:
|
||||
displayNameToUse = node.customerName
|
||||
if node.type == NodeType.client:
|
||||
displayNameToUse = node.displayName
|
||||
else:
|
||||
displayNameToUse = node.customerName + " (" + nodeTypeToString(node.type) + ")"
|
||||
circuit = {
|
||||
"id": node.id,
|
||||
"name": displayNameToUse,
|
||||
@@ -413,13 +420,22 @@ class NetworkGraph:
|
||||
device["mac"],
|
||||
device["ipv4"],
|
||||
device["ipv6"],
|
||||
int(float(circuit["download"]) * 0.98),
|
||||
int(float(circuit["upload"]) * 0.98),
|
||||
int(float(circuit["download"]) * committedBandwidthMultiplier),
|
||||
int(float(circuit["upload"]) * committedBandwidthMultiplier),
|
||||
int(float(circuit["download"]) * bandwidthOverheadFactor),
|
||||
int(float(circuit["upload"]) * bandwidthOverheadFactor),
|
||||
""
|
||||
]
|
||||
wr.writerow(row)
|
||||
|
||||
# If we have an "appendToShapedDevices.csv" file, it gets appended to the end of the file.
|
||||
# This is useful for adding devices that are not in the network tree, such as a
|
||||
# "default" device that gets all the traffic that doesn't match any other device.
|
||||
if os.path.isfile('appendToShapedDevices.csv'):
|
||||
with open('appendToShapedDevices.csv', 'r') as f:
|
||||
reader = csv.reader(f)
|
||||
for row in reader:
|
||||
wr.writerow(row)
|
||||
|
||||
def plotNetworkGraph(self, showClients=False):
|
||||
# Requires `pip install graphviz` to function.
|
||||
|
||||
@@ -10,6 +10,10 @@ try:
|
||||
except:
|
||||
from ispConfig import uispSite, uispStrategy
|
||||
overwriteNetworkJSONalways = False
|
||||
try:
|
||||
from ispConfig import uispSuspendedStrategy
|
||||
except:
|
||||
uispSuspendedStrategy = "none"
|
||||
try:
|
||||
from ispConfig import airMax_capacity
|
||||
except:
|
||||
@@ -18,6 +22,10 @@ try:
|
||||
from ispConfig import ltu_capacity
|
||||
except:
|
||||
ltu_capacity = 0.90
|
||||
try:
|
||||
from ispConfig import usePtMPasParent
|
||||
except:
|
||||
usePtMPasParent = False
|
||||
|
||||
def uispRequest(target):
|
||||
# Sends an HTTP request to UISP and returns the
|
||||
@@ -337,8 +345,9 @@ def findNodesBranchedOffPtMP(siteList, dataLinks, sites, rootSite, foundAirFiber
|
||||
'upload': upload,
|
||||
parent: apID
|
||||
}
|
||||
site['parent'] = apID
|
||||
print('Site ' + name + ' will use PtMP AP as parent.')
|
||||
if usePtMPasParent:
|
||||
site['parent'] = apID
|
||||
print('Site ' + name + ' will use PtMP AP as parent.')
|
||||
return siteList, nodeOffPtMP
|
||||
|
||||
def handleMultipleInternetNodes(sites, dataLinks, uispSite):
|
||||
@@ -355,7 +364,7 @@ def handleMultipleInternetNodes(sites, dataLinks, uispSite):
|
||||
uispSite = 'Internet'
|
||||
for link in dataLinks:
|
||||
if link['canDelete'] == False:
|
||||
if link['from']['device']['identification']['id'] == link['to']['device']['identification']['id']:
|
||||
if link['from']['device'] is not None and link['to']['device'] is not None and link['from']['device']['identification']['id'] == link['to']['device']['identification']['id']:
|
||||
link['from']['site']['identification']['id'] = '001'
|
||||
link['from']['site']['identification']['name'] = 'Internet'
|
||||
# Found internet link
|
||||
@@ -429,6 +438,7 @@ def buildFullGraph():
|
||||
match type:
|
||||
case "site":
|
||||
nodeType = NodeType.site
|
||||
customerName = name
|
||||
if name in siteBandwidth:
|
||||
# Use the CSV bandwidth values
|
||||
download = siteBandwidth[name]["download"]
|
||||
@@ -458,6 +468,18 @@ def buildFullGraph():
|
||||
if (site['qos']['downloadSpeed']) and (site['qos']['uploadSpeed']):
|
||||
download = int(round(site['qos']['downloadSpeed']/1000000))
|
||||
upload = int(round(site['qos']['uploadSpeed']/1000000))
|
||||
if site['identification'] is not None and site['identification']['suspended'] is not None and site['identification']['suspended'] == True:
|
||||
if uispSuspendedStrategy == "ignore":
|
||||
print("WARNING: Site " + name + " is suspended")
|
||||
continue
|
||||
if uispSuspendedStrategy == "slow":
|
||||
print("WARNING: Site " + name + " is suspended")
|
||||
download = 1
|
||||
upload = 1
|
||||
|
||||
if site['identification']['status'] == "disconnected":
|
||||
print("WARNING: Site " + name + " is disconnected")
|
||||
continue
|
||||
|
||||
node = NetworkNode(id=id, displayName=name, type=nodeType,
|
||||
parentId=parent, download=download, upload=upload, address=address, customerName=customerName)
|
||||
|
||||
@@ -17,6 +17,7 @@ upstreamBandwidthCapacityUploadMbps = 1000
|
||||
# Devices in ShapedDevices.csv without a defined ParentNode (such as if you have a flat {} network)
|
||||
# will be placed under one of these generated parent node, evenly spread out across CPU cores.
|
||||
# This defines the bandwidth limit for each of those generated parent nodes.
|
||||
# If you are not sure what values to use, simply use the same values as upstreamBandwidthCapacityDownloadMbps and upstreamBandwidthCapacityUploadMbps
|
||||
generatedPNDownloadMbps = 1000
|
||||
generatedPNUploadMbps = 1000
|
||||
|
||||
@@ -97,6 +98,11 @@ uispSite = ''
|
||||
# or site options.
|
||||
# * "full" - build a complete network map
|
||||
uispStrategy = "full"
|
||||
# Handling of UISP suspensions:
|
||||
# * "none" - do not handle suspensions
|
||||
# * "ignore" - do not add suspended customers to the network map
|
||||
# * "slow" - limit suspended customers to 1mbps
|
||||
uispSuspendedStrategy = "none"
|
||||
# Assumed capacity of AirMax and LTU radios vs reported capacity by UISP. For example, 65% would be 0.65.
|
||||
# For AirMax, this applies to flexible frame only. AirMax fixed frame will have capacity based on ratio.
|
||||
airMax_capacity = 0.65
|
||||
@@ -109,6 +115,8 @@ findIPv6usingMikrotik = False
|
||||
# If you want to provide a safe cushion for speed test results to prevent customer complains, you can set this to
|
||||
# 1.15 (15% above plan rate). If not, you can leave as 1.0
|
||||
bandwidthOverheadFactor = 1.0
|
||||
# Number to multiply the maximum/ceiling bandwidth with to determine the minimum bandwidth.
|
||||
committedBandwidthMultiplier = 0.98
|
||||
# For edge cases, set the respective ParentNode for these CPEs
|
||||
exceptionCPEs = {}
|
||||
# exceptionCPEs = {
|
||||
|
||||
1531
src/rust/Cargo.lock
generated
1531
src/rust/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -27,4 +27,8 @@ members = [
|
||||
"lqos_anonymous_stats_server", # The server for gathering anonymous usage data.
|
||||
"lqos_heimdall", # Library for managing Heimdall flow watching
|
||||
"lqos_map_perf", # A CLI tool for testing eBPF map performance
|
||||
"lqstats", # A CLI utility for retrieving long-term statistics
|
||||
"lts_client", # Shared data and client-side code for long-term stats
|
||||
"lqos_map_perf", # A CLI tool for testing eBPF map performance
|
||||
"uisp", # REST support for the UISP API
|
||||
]
|
||||
|
||||
@@ -12,5 +12,5 @@ log = "0"
|
||||
lqos_bus = { path = "../lqos_bus" }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_cbor = "0" # For RFC8949/7409 format C binary objects
|
||||
sqlite = "0"
|
||||
sqlite = "0.30.4"
|
||||
axum = "0.6"
|
||||
|
||||
@@ -14,7 +14,8 @@ bincode = "1"
|
||||
thiserror = "1"
|
||||
lqos_config = { path = "../lqos_config" }
|
||||
lqos_utils = { path = "../lqos_utils" }
|
||||
tokio = { version = "1", features = [ "rt", "macros", "net", "io-util", "time" ] }
|
||||
lts_client = { path = "../lts_client" }
|
||||
tokio = { version = "1", features = [ "full" ] }
|
||||
log = "0"
|
||||
nix = "0"
|
||||
serde_cbor = "0" # For RFC8949/7409 format C binary objects
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
use super::PREALLOCATE_CLIENT_BUFFER_BYTES;
|
||||
use crate::{
|
||||
bus::BusClientError, decode_response, encode_request, BusRequest,
|
||||
BusResponse, BusSession, BUS_SOCKET_PATH,
|
||||
bus::BusClientError, decode_response, encode_request, BusRequest, BusResponse, BusSession,
|
||||
BUS_SOCKET_PATH,
|
||||
};
|
||||
use log::error;
|
||||
use tokio::{
|
||||
io::{AsyncReadExt, AsyncWriteExt},
|
||||
net::UnixStream,
|
||||
io::{AsyncReadExt, AsyncWriteExt},
|
||||
net::UnixStream,
|
||||
};
|
||||
|
||||
/// Convenient wrapper for accessing the bus
|
||||
@@ -16,42 +16,43 @@ use tokio::{
|
||||
/// * `requests` a vector of `BusRequest` requests to make.
|
||||
///
|
||||
/// **Returns** Either an error, or a vector of `BusResponse` replies
|
||||
pub async fn bus_request(
|
||||
requests: Vec<BusRequest>,
|
||||
) -> Result<Vec<BusResponse>, BusClientError> {
|
||||
let stream = UnixStream::connect(BUS_SOCKET_PATH).await;
|
||||
if let Err(e) = &stream {
|
||||
if e.kind() == std::io::ErrorKind::NotFound {
|
||||
error!("Unable to access {BUS_SOCKET_PATH}. Check that lqosd is running and you have appropriate permissions.");
|
||||
return Err(BusClientError::SocketNotFound);
|
||||
pub async fn bus_request(requests: Vec<BusRequest>) -> Result<Vec<BusResponse>, BusClientError> {
|
||||
let stream = UnixStream::connect(BUS_SOCKET_PATH).await;
|
||||
if let Err(e) = &stream {
|
||||
if e.kind() == std::io::ErrorKind::NotFound {
|
||||
error!("Unable to access {BUS_SOCKET_PATH}. Check that lqosd is running and you have appropriate permissions.");
|
||||
return Err(BusClientError::SocketNotFound);
|
||||
}
|
||||
}
|
||||
}
|
||||
let mut stream = stream.unwrap(); // This unwrap is safe, we checked that it exists previously
|
||||
let test = BusSession { persist: false, requests };
|
||||
let msg = encode_request(&test);
|
||||
if msg.is_err() {
|
||||
error!("Unable to encode request {:?}", test);
|
||||
return Err(BusClientError::EncodingError);
|
||||
}
|
||||
let msg = msg.unwrap();
|
||||
let ret = stream.write(&msg).await;
|
||||
if ret.is_err() {
|
||||
error!("Unable to write to {BUS_SOCKET_PATH} stream.");
|
||||
error!("{:?}", ret);
|
||||
return Err(BusClientError::StreamWriteError);
|
||||
}
|
||||
let mut buf = Vec::with_capacity(PREALLOCATE_CLIENT_BUFFER_BYTES);
|
||||
let ret = stream.read_to_end(&mut buf).await;
|
||||
if ret.is_err() {
|
||||
error!("Unable to read from {BUS_SOCKET_PATH} stream.");
|
||||
error!("{:?}", ret);
|
||||
return Err(BusClientError::StreamReadError);
|
||||
}
|
||||
let reply = decode_response(&buf);
|
||||
if reply.is_err() {
|
||||
error!("Unable to decode response from socket.");
|
||||
return Err(BusClientError::DecodingError);
|
||||
}
|
||||
let reply = reply.unwrap();
|
||||
Ok(reply.responses)
|
||||
let mut stream = stream.unwrap(); // This unwrap is safe, we checked that it exists previously
|
||||
let test = BusSession {
|
||||
persist: false,
|
||||
requests,
|
||||
};
|
||||
let msg = encode_request(&test);
|
||||
if msg.is_err() {
|
||||
error!("Unable to encode request {:?}", test);
|
||||
return Err(BusClientError::EncodingError);
|
||||
}
|
||||
let msg = msg.unwrap();
|
||||
let ret = stream.write(&msg).await;
|
||||
if ret.is_err() {
|
||||
error!("Unable to write to {BUS_SOCKET_PATH} stream.");
|
||||
error!("{:?}", ret);
|
||||
return Err(BusClientError::StreamWriteError);
|
||||
}
|
||||
let mut buf = Vec::with_capacity(PREALLOCATE_CLIENT_BUFFER_BYTES);
|
||||
let ret = stream.read_to_end(&mut buf).await;
|
||||
if ret.is_err() {
|
||||
error!("Unable to read from {BUS_SOCKET_PATH} stream.");
|
||||
error!("{:?}", ret);
|
||||
return Err(BusClientError::StreamReadError);
|
||||
}
|
||||
let reply = decode_response(&buf);
|
||||
if reply.is_err() {
|
||||
error!("Unable to decode response from socket.");
|
||||
return Err(BusClientError::DecodingError);
|
||||
}
|
||||
let reply = reply.unwrap();
|
||||
Ok(reply.responses)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ pub use client::bus_request;
|
||||
use log::error;
|
||||
pub use persistent_client::BusClient;
|
||||
pub use reply::BusReply;
|
||||
pub use request::BusRequest;
|
||||
pub use request::{BusRequest, StatsRequest};
|
||||
pub use response::BusResponse;
|
||||
pub use session::BusSession;
|
||||
use thiserror::Error;
|
||||
|
||||
@@ -145,8 +145,22 @@ pub enum BusRequest {
|
||||
/// Give me a libpcap format packet dump (shortened) of the last 10 seconds
|
||||
GetPcapDump(usize),
|
||||
|
||||
/// Request data from the long-term stats system
|
||||
GetLongTermStats(StatsRequest),
|
||||
|
||||
/// If running on Equinix (the `equinix_test` feature is enabled),
|
||||
/// display a "run bandwidht test" link.
|
||||
#[cfg(feature = "equinix_tests")]
|
||||
RequestLqosEquinixTest,
|
||||
}
|
||||
|
||||
/// Specific requests from the long-term stats system
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
|
||||
pub enum StatsRequest {
|
||||
/// Retrieve the current totals for all hosts
|
||||
CurrentTotals,
|
||||
/// Retrieve the values for all hosts
|
||||
AllHosts,
|
||||
/// Get the network tree
|
||||
Tree,
|
||||
}
|
||||
@@ -1,7 +1,10 @@
|
||||
use crate::{IpMapping, IpStats, XdpPpingResult, FlowTransport, ip_stats::PacketHeader};
|
||||
use super::QueueStoreTransit;
|
||||
use crate::{
|
||||
ip_stats::PacketHeader, FlowTransport, IpMapping, IpStats, XdpPpingResult,
|
||||
};
|
||||
use lts_client::transport_data::{StatsTotals, StatsHost, StatsTreeNode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::net::IpAddr;
|
||||
use super::QueueStoreTransit;
|
||||
|
||||
/// A `BusResponse` object represents a single
|
||||
/// reply generated from a `BusRequest`, and batched
|
||||
@@ -77,7 +80,7 @@ pub enum BusResponse {
|
||||
NodeNames(Vec<(usize, String)>),
|
||||
|
||||
/// Statistics from lqosd
|
||||
LqosdStats{
|
||||
LqosdStats {
|
||||
/// Number of bus requests handled
|
||||
bus_requests: u64,
|
||||
/// Us to poll hosts
|
||||
@@ -92,11 +95,11 @@ pub enum BusResponse {
|
||||
FlowData(Vec<(FlowTransport, Option<FlowTransport>)>),
|
||||
|
||||
/// The index of the new packet collection session
|
||||
PacketCollectionSession{
|
||||
PacketCollectionSession {
|
||||
/// The identifier of the capture session
|
||||
session_id: usize,
|
||||
session_id: usize,
|
||||
/// Number of seconds for which data will be captured
|
||||
countdown: usize
|
||||
countdown: usize,
|
||||
},
|
||||
|
||||
/// Packet header dump
|
||||
@@ -104,4 +107,13 @@ pub enum BusResponse {
|
||||
|
||||
/// Pcap format dump
|
||||
PcapDump(Option<String>),
|
||||
|
||||
/// Long-term stats top-level totals
|
||||
LongTermTotals(StatsTotals),
|
||||
|
||||
/// Long-term stats host totals
|
||||
LongTermHosts(Vec<StatsHost>),
|
||||
|
||||
/// Long-term stats tree
|
||||
LongTermTree(Vec<StatsTreeNode>),
|
||||
}
|
||||
|
||||
@@ -21,9 +21,19 @@ pub use bus::{
|
||||
bus_request, decode_request, decode_response, encode_request,
|
||||
encode_response, BusClient, BusReply, BusRequest, BusResponse, BusSession,
|
||||
CakeDiffTinTransit, CakeDiffTransit, CakeTransit, QueueStoreTransit,
|
||||
UnixSocketServer, BUS_SOCKET_PATH,
|
||||
UnixSocketServer, BUS_SOCKET_PATH, StatsRequest
|
||||
};
|
||||
pub use tc_handle::TcHandle;
|
||||
|
||||
/// Anonymous Usage Statistics Data Types
|
||||
pub mod anonymous;
|
||||
|
||||
/// Re-export bincode
|
||||
pub mod bincode {
|
||||
pub use bincode::*;
|
||||
}
|
||||
|
||||
/// Re-export CBOR
|
||||
pub mod cbor {
|
||||
pub use serde_cbor::*;
|
||||
}
|
||||
@@ -141,7 +141,7 @@ impl WebUsers {
|
||||
role: UserRole,
|
||||
) -> Result<String, AuthenticationError> {
|
||||
let token; // Assigned in a branch
|
||||
if let Some(mut user) =
|
||||
if let Some(user) =
|
||||
self.users.iter_mut().find(|u| u.username == username)
|
||||
{
|
||||
user.password_hash = Self::hash_password(password);
|
||||
|
||||
@@ -18,10 +18,12 @@ pub struct EtcLqos {
|
||||
|
||||
/// If present, provides a unique ID for the node. Used for
|
||||
/// anonymous stats (to identify nodes without providing an actual
|
||||
/// identity), and will be used for long-term data retention to
|
||||
/// disambiguate cluster or multi-head-end nodes.
|
||||
/// identity), and long-term stas.
|
||||
pub node_id: Option<String>,
|
||||
|
||||
/// If present, provide a name for the node.
|
||||
pub node_name: Option<String>,
|
||||
|
||||
/// If present, defines how the Bifrost XDP bridge operates.
|
||||
pub bridge: Option<BridgeConfig>,
|
||||
|
||||
@@ -36,6 +38,9 @@ pub struct EtcLqos {
|
||||
/// run. Short times are good, there's a real performance penalty to
|
||||
/// capturing high-throughput streams. Defaults to 10 seconds.
|
||||
pub packet_capture_time: Option<usize>,
|
||||
|
||||
/// Long-term statistics retention settings.
|
||||
pub long_term_stats: Option<LongTermStats>,
|
||||
}
|
||||
|
||||
/// Represents a set of `sysctl` and `ethtool` tweaks that may be
|
||||
@@ -129,6 +134,27 @@ pub struct UsageStats {
|
||||
pub anonymous_server: String,
|
||||
}
|
||||
|
||||
/// Long Term Data Retention
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct LongTermStats {
|
||||
/// Should we store long-term stats at all?
|
||||
pub gather_stats: bool,
|
||||
|
||||
/// How frequently should stats be accumulated into a long-term
|
||||
/// min/max/avg format per tick?
|
||||
pub collation_period_seconds: u32,
|
||||
|
||||
/// The license key for submitting stats to a LibreQoS hosted
|
||||
/// statistics server
|
||||
pub license_key: Option<String>,
|
||||
|
||||
/// UISP reporting period (in seconds). UISP queries can be slow,
|
||||
/// so hitting it every second or 10 seconds is going to cause problems
|
||||
/// for some people. A good default may be 5 minutes. Not specifying this
|
||||
/// disabled UISP integration.
|
||||
pub uisp_reporting_interval_seconds: Option<u64>,
|
||||
}
|
||||
|
||||
impl EtcLqos {
|
||||
/// Loads `/etc/lqos.conf`.
|
||||
pub fn load() -> Result<Self, EtcLqosError> {
|
||||
@@ -185,6 +211,55 @@ impl EtcLqos {
|
||||
}
|
||||
}
|
||||
|
||||
/// Run this if you've received the OK from the licensing server, and been
|
||||
/// sent a license key. This appends a [long_term_stats] section to your
|
||||
/// config file - ONLY if one doesn't already exist.
|
||||
pub fn enable_long_term_stats(license_key: String) {
|
||||
if let Ok(raw) = std::fs::read_to_string("/etc/lqos.conf") {
|
||||
let document = raw.parse::<Document>();
|
||||
match document {
|
||||
Err(e) => {
|
||||
error!("Unable to parse TOML from /etc/lqos.conf");
|
||||
error!("Full error: {:?}", e);
|
||||
return;
|
||||
}
|
||||
Ok(mut config_doc) => {
|
||||
let cfg = toml_edit::de::from_document::<EtcLqos>(config_doc.clone());
|
||||
match cfg {
|
||||
Ok(cfg) => {
|
||||
// Now we enable LTS if its not present
|
||||
if let Ok(isp_config) = crate::LibreQoSConfig::load() {
|
||||
if cfg.long_term_stats.is_none() {
|
||||
|
||||
let mut new_section = toml_edit::table();
|
||||
new_section["gather_stats"] = value(true);
|
||||
new_section["collation_period_seconds"] = value(60);
|
||||
new_section["license_key"] = value(license_key);
|
||||
if isp_config.automatic_import_uisp {
|
||||
new_section["uisp_reporting_interval_seconds"] = value(300);
|
||||
}
|
||||
config_doc["long_term_stats"] = new_section;
|
||||
|
||||
let new_cfg = config_doc.to_string();
|
||||
if let Err(e) = fs::write(Path::new("/etc/lqos.conf"), new_cfg) {
|
||||
log::error!("Unable to write to /etc/lqos.conf");
|
||||
log::error!("{e:?}");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Unable to parse TOML from /etc/lqos.conf");
|
||||
error!("Full error: {:?}", e);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn check_config(cfg_doc: &mut Document, cfg: &mut EtcLqos) {
|
||||
use sha2::digest::Update;
|
||||
use sha2::Digest;
|
||||
|
||||
@@ -14,7 +14,7 @@ mod program_control;
|
||||
mod shaped_devices;
|
||||
|
||||
pub use authentication::{UserRole, WebUsers};
|
||||
pub use etc::{BridgeConfig, BridgeInterface, BridgeVlan, EtcLqos, Tunables};
|
||||
pub use etc::{BridgeConfig, BridgeInterface, BridgeVlan, EtcLqos, Tunables, enable_long_term_stats};
|
||||
pub use libre_qos_config::LibreQoSConfig;
|
||||
pub use network_json::{NetworkJson, NetworkJsonNode, NetworkJsonTransport};
|
||||
pub use program_control::load_libreqos;
|
||||
|
||||
@@ -2,17 +2,19 @@
|
||||
//! reads, writes and maps values from the Python file.
|
||||
|
||||
use crate::etc;
|
||||
use ip_network::IpNetwork;
|
||||
use log::error;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
fs::{self, read_to_string, remove_file, OpenOptions},
|
||||
io::Write,
|
||||
net::IpAddr,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
use thiserror::Error;
|
||||
|
||||
/// Represents the contents of an `ispConfig.py` file.
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct LibreQoSConfig {
|
||||
/// Interface facing the Internet
|
||||
pub internet_interface: String,
|
||||
@@ -58,6 +60,39 @@ pub struct LibreQoSConfig {
|
||||
|
||||
/// WARNING: generally don't touch this.
|
||||
pub override_queue_count: u32,
|
||||
|
||||
/// Is UISP integration enabled?
|
||||
pub automatic_import_uisp: bool,
|
||||
|
||||
/// UISP Authentication Token
|
||||
pub uisp_auth_token: String,
|
||||
|
||||
/// UISP Base URL (e.g. billing.myisp.com)
|
||||
pub uisp_base_url: String,
|
||||
|
||||
/// Root site for UISP tree generation
|
||||
pub uisp_root_site: String,
|
||||
|
||||
/// Circuit names use address?
|
||||
pub circuit_name_use_address: bool,
|
||||
|
||||
/// UISP Strategy
|
||||
pub uisp_strategy: String,
|
||||
|
||||
/// UISP Suspension Strategy
|
||||
pub uisp_suspended_strategy: String,
|
||||
|
||||
/// Bandwidth Overhead Factor
|
||||
pub bandwidth_overhead_factor: f32,
|
||||
|
||||
/// Subnets allowed to be included in device lists
|
||||
pub allowed_subnets: String,
|
||||
|
||||
/// Subnets explicitly ignored from device lists
|
||||
pub ignored_subnets: String,
|
||||
|
||||
/// Overwrite network.json even if it exists
|
||||
pub overwrite_network_json_always: bool,
|
||||
}
|
||||
|
||||
impl LibreQoSConfig {
|
||||
@@ -107,6 +142,17 @@ impl LibreQoSConfig {
|
||||
enable_shell_commands: true,
|
||||
run_as_sudo: false,
|
||||
override_queue_count: 0,
|
||||
automatic_import_uisp: false,
|
||||
uisp_auth_token: "".to_string(),
|
||||
uisp_base_url: "".to_string(),
|
||||
uisp_root_site: "".to_string(),
|
||||
circuit_name_use_address: false,
|
||||
uisp_strategy: "".to_string(),
|
||||
uisp_suspended_strategy: "".to_string(),
|
||||
bandwidth_overhead_factor: 1.0,
|
||||
allowed_subnets: "".to_string(),
|
||||
ignored_subnets: "".to_string(),
|
||||
overwrite_network_json_always: false,
|
||||
};
|
||||
result.parse_isp_config(path)?;
|
||||
Ok(result)
|
||||
@@ -228,6 +274,49 @@ impl LibreQoSConfig {
|
||||
self.override_queue_count =
|
||||
split_at_equals(line).parse().unwrap_or(0);
|
||||
}
|
||||
if line.starts_with("automaticImportUISP") {
|
||||
let mode = split_at_equals(line);
|
||||
if mode == "True" {
|
||||
self.automatic_import_uisp = true;
|
||||
}
|
||||
}
|
||||
if line.starts_with("uispAuthToken") {
|
||||
self.uisp_auth_token = split_at_equals(line);
|
||||
}
|
||||
if line.starts_with("UISPbaseURL") {
|
||||
self.uisp_base_url = split_at_equals(line);
|
||||
}
|
||||
if line.starts_with("uispSite") {
|
||||
self.uisp_root_site = split_at_equals(line);
|
||||
}
|
||||
if line.starts_with("circuitNameUseAddress") {
|
||||
let mode = split_at_equals(line);
|
||||
if mode == "True" {
|
||||
self.circuit_name_use_address = true;
|
||||
}
|
||||
}
|
||||
if line.starts_with("uispStrategy") {
|
||||
self.uisp_strategy = split_at_equals(line);
|
||||
}
|
||||
if line.starts_with("uispSuspendedStrategy") {
|
||||
self.uisp_suspended_strategy = split_at_equals(line);
|
||||
}
|
||||
if line.starts_with("bandwidthOverheadFactor") {
|
||||
self.bandwidth_overhead_factor =
|
||||
split_at_equals(line).parse().unwrap_or(1.0);
|
||||
}
|
||||
if line.starts_with("allowedSubnets") {
|
||||
self.allowed_subnets = split_at_equals(line);
|
||||
}
|
||||
if line.starts_with("ignoreSubnets") {
|
||||
self.ignored_subnets = split_at_equals(line);
|
||||
}
|
||||
if line.starts_with("overwriteNetworkJSONalways") {
|
||||
let mode = split_at_equals(line);
|
||||
if mode == "True" {
|
||||
self.overwrite_network_json_always = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -361,6 +450,20 @@ impl LibreQoSConfig {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Convert the Allowed Subnets list into a Trie for fast search
|
||||
pub fn allowed_subnets_trie(&self) -> ip_network_table::IpNetworkTable<usize> {
|
||||
let ip_list = ip_list_to_ips(&self.allowed_subnets).unwrap();
|
||||
//println!("{ip_list:#?}");
|
||||
ip_list_to_trie(&ip_list)
|
||||
}
|
||||
|
||||
/// Convert the Ignored Subnets list into a Trie for fast search
|
||||
pub fn ignored_subnets_trie(&self) -> ip_network_table::IpNetworkTable<usize> {
|
||||
let ip_list = ip_list_to_ips(&self.ignored_subnets).unwrap();
|
||||
//println!("{ip_list:#?}");
|
||||
ip_list_to_trie(&ip_list)
|
||||
}
|
||||
}
|
||||
|
||||
fn split_at_equals(line: &str) -> String {
|
||||
@@ -387,4 +490,52 @@ pub enum LibreQoSConfigError {
|
||||
CannotOpenForWrite,
|
||||
#[error("Unable to write to ispConfig.py")]
|
||||
CannotWrite,
|
||||
#[error("Unable to read IP")]
|
||||
CannotReadIP,
|
||||
}
|
||||
|
||||
fn ip_list_to_ips(
|
||||
source: &str,
|
||||
) -> Result<Vec<(IpAddr, u8)>, LibreQoSConfigError> {
|
||||
// Remove any square brackets, spaces
|
||||
let source = source.replace(['[', ']', ' '], "");
|
||||
|
||||
// Split at commas
|
||||
Ok(
|
||||
source
|
||||
.split(',')
|
||||
.map(|raw| {
|
||||
let split: Vec<&str> = raw.split('/').collect();
|
||||
let cidr = split[1].parse::<u8>().unwrap();
|
||||
let addr = split[0].parse::<IpAddr>().unwrap();
|
||||
(addr, cidr)
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
|
||||
fn ip_list_to_trie(
|
||||
source: &[(IpAddr, u8)],
|
||||
) -> ip_network_table::IpNetworkTable<usize> {
|
||||
let mut table = ip_network_table::IpNetworkTable::new();
|
||||
source
|
||||
.iter()
|
||||
.map(|(ip, subnet)| {
|
||||
(
|
||||
match ip {
|
||||
IpAddr::V4(ip) => ip.to_ipv6_mapped(),
|
||||
IpAddr::V6(ip) => *ip,
|
||||
},
|
||||
match ip {
|
||||
IpAddr::V4(..) => *subnet + 96,
|
||||
IpAddr::V6(..) => *subnet
|
||||
},
|
||||
)
|
||||
})
|
||||
.map(|(ip, cidr)| IpNetwork::new(ip, cidr).unwrap())
|
||||
.enumerate()
|
||||
.for_each(|(id, net)| {
|
||||
table.insert(net, id);
|
||||
});
|
||||
table
|
||||
}
|
||||
|
||||
@@ -32,6 +32,9 @@ pub struct NetworkJsonNode {
|
||||
|
||||
/// The immediate parent node
|
||||
pub immediate_parent: Option<usize>,
|
||||
|
||||
/// The node type
|
||||
pub node_type: Option<String>,
|
||||
}
|
||||
|
||||
impl NetworkJsonNode {
|
||||
@@ -48,7 +51,7 @@ impl NetworkJsonNode {
|
||||
rtts: self.rtts.iter().map(|n| *n as f32 / 100.0).collect(),
|
||||
parents: self.parents.clone(),
|
||||
immediate_parent: self.immediate_parent,
|
||||
node_type: None,
|
||||
node_type: self.node_type.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -125,6 +128,7 @@ impl NetworkJson {
|
||||
parents: Vec::new(),
|
||||
immediate_parent: None,
|
||||
rtts: DashSet::new(),
|
||||
node_type: None,
|
||||
}];
|
||||
if !Self::exists() {
|
||||
return Err(NetworkJsonError::FileNotFound);
|
||||
@@ -272,6 +276,7 @@ fn recurse_node(
|
||||
name: name.to_string(),
|
||||
immediate_parent: Some(immediate_parent),
|
||||
rtts: DashSet::new(),
|
||||
node_type: json.get("type").map(|v| v.as_str().unwrap().to_string()),
|
||||
};
|
||||
|
||||
if node.name != "children" {
|
||||
|
||||
@@ -21,6 +21,7 @@ nix = "0"
|
||||
once_cell = "1"
|
||||
dns-lookup = "1"
|
||||
dashmap = "5"
|
||||
reqwest = { version = "0.11.20", features = ["json"] }
|
||||
|
||||
# Support JemAlloc on supported platforms
|
||||
[target.'cfg(any(target_arch = "x86", target_arch = "x86_64"))'.dependencies]
|
||||
|
||||
7
src/rust/lqos_node_manager/build.rs
Normal file
7
src/rust/lqos_node_manager/build.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
use std::process::Command;
|
||||
fn main() {
|
||||
// Adds a git commit hash to the program
|
||||
let output = Command::new("git").args(["rev-parse", "HEAD"]).output().unwrap();
|
||||
let git_hash = String::from_utf8(output.stdout).unwrap();
|
||||
println!("cargo:rustc-env=GIT_HASH={}", git_hash);
|
||||
}
|
||||
@@ -11,6 +11,7 @@ mod auth_guard;
|
||||
mod config_control;
|
||||
mod network_tree;
|
||||
mod queue_info;
|
||||
mod toasts;
|
||||
|
||||
// Use JemAllocator only on supported platforms
|
||||
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
|
||||
@@ -105,6 +106,9 @@ fn rocket() -> _ {
|
||||
static_pages::fontawesome_solid,
|
||||
static_pages::fontawesome_webfont,
|
||||
static_pages::fontawesome_woff,
|
||||
// Front page toast checks
|
||||
toasts::version_check,
|
||||
toasts::stats_check,
|
||||
],
|
||||
);
|
||||
|
||||
|
||||
113
src/rust/lqos_node_manager/src/toasts.rs
Normal file
113
src/rust/lqos_node_manager/src/toasts.rs
Normal file
@@ -0,0 +1,113 @@
|
||||
use lqos_config::EtcLqos;
|
||||
use lqos_utils::unix_time::unix_now;
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::serde::{Deserialize, Serialize};
|
||||
|
||||
static LAST_VERSION_CHECK: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0);
|
||||
const ONE_HOUR_SECONDS: u64 = 60 * 60;
|
||||
const VERSION_STRING: &str = include_str!("../../../VERSION_STRING");
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(crate = "rocket::serde")]
|
||||
struct VersionCheckRequest {
|
||||
current_git_hash: String,
|
||||
version_string: String,
|
||||
node_id: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[serde(crate = "rocket::serde")]
|
||||
pub struct VersionCheckResponse {
|
||||
update_available: bool,
|
||||
}
|
||||
|
||||
async fn send_version_check() -> anyhow::Result<VersionCheckResponse> {
|
||||
if let Ok(cfg) = EtcLqos::load() {
|
||||
let current_hash = env!("GIT_HASH");
|
||||
let request = VersionCheckRequest {
|
||||
current_git_hash: current_hash.to_string(),
|
||||
version_string: VERSION_STRING.to_string(),
|
||||
node_id: cfg.node_id.unwrap_or("(not configured)".to_string()),
|
||||
};
|
||||
let response = reqwest::Client::new()
|
||||
.post("https://stats.libreqos.io/api/version_check")
|
||||
.json(&request)
|
||||
.send()
|
||||
.await?
|
||||
.json()
|
||||
.await?;
|
||||
|
||||
Ok(response)
|
||||
} else {
|
||||
anyhow::bail!("No config");
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/api/version_check")]
|
||||
pub async fn version_check() -> Json<String> {
|
||||
let last_check = LAST_VERSION_CHECK.load(std::sync::atomic::Ordering::Relaxed);
|
||||
if let Ok(now) = unix_now() {
|
||||
if now > last_check + ONE_HOUR_SECONDS {
|
||||
let res = send_version_check().await;
|
||||
if let Ok(response) = send_version_check().await {
|
||||
LAST_VERSION_CHECK.store(now, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
if response.update_available {
|
||||
return Json(String::from("Update available"));
|
||||
}
|
||||
} else {
|
||||
error!("Unable to send version check");
|
||||
error!("{res:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
Json(String::from("All Good"))
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(crate = "rocket::serde")]
|
||||
pub enum StatsCheckResponse {
|
||||
DoNothing,
|
||||
NotSetup,
|
||||
Disabled,
|
||||
GoodToGo,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(crate = "rocket::serde")]
|
||||
pub struct StatsCheckAction {
|
||||
action: StatsCheckResponse,
|
||||
node_id: String,
|
||||
}
|
||||
|
||||
#[get("/api/stats_check")]
|
||||
pub async fn stats_check() -> Json<StatsCheckAction> {
|
||||
let mut response = StatsCheckAction {
|
||||
action: StatsCheckResponse::DoNothing,
|
||||
node_id: String::new(),
|
||||
};
|
||||
|
||||
if let Ok(cfg) = EtcLqos::load() {
|
||||
if let Some(lts) = &cfg.long_term_stats {
|
||||
if !lts.gather_stats {
|
||||
response = StatsCheckAction {
|
||||
action: StatsCheckResponse::Disabled,
|
||||
node_id: cfg.node_id.unwrap_or("(not configured)".to_string()),
|
||||
};
|
||||
} else {
|
||||
// Stats are enabled
|
||||
response = StatsCheckAction {
|
||||
action: StatsCheckResponse::GoodToGo,
|
||||
node_id: cfg.node_id.unwrap_or("(not configured)".to_string()),
|
||||
};
|
||||
}
|
||||
} else {
|
||||
response = StatsCheckAction {
|
||||
action: StatsCheckResponse::NotSetup,
|
||||
node_id: cfg.node_id.unwrap_or("(not configured)".to_string()),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
Json(response)
|
||||
}
|
||||
@@ -58,11 +58,11 @@ pub async fn unknown_devices_range(
|
||||
|
||||
#[get("/api/unknown_devices_csv")]
|
||||
pub async fn unknown_devices_csv(_auth: AuthGuard) -> NoCache<String> {
|
||||
let mut result = String::new();
|
||||
let mut result = "IP Address,Download,Upload\n".to_string();
|
||||
let reader = unknown_devices().await;
|
||||
|
||||
for unknown in reader.iter() {
|
||||
result += &format!("{}\n", unknown.ip_address);
|
||||
result += &format!("{},{},{}\n", unknown.ip_address, unknown.bits_per_second.0, unknown.bits_per_second.1);
|
||||
}
|
||||
NoCache::new(result)
|
||||
}
|
||||
|
||||
@@ -46,10 +46,7 @@
|
||||
|
||||
<ul class="navbar-nav ms-auto">
|
||||
<li class="nav-item" id="currentLogin"></li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="#" id="startTest"><i class="fa fa-flag-checkered"></i> Run Bandwidth
|
||||
Test</a>
|
||||
</li>
|
||||
<li class="nav-item" id="statsLink"></li>
|
||||
<li class="nav-item ms-auto">
|
||||
<a class="nav-link" href="/config"><i class="fa fa-gear"></i> Configuration</a>
|
||||
</li>
|
||||
|
||||
@@ -37,9 +37,7 @@
|
||||
|
||||
<ul class="navbar-nav ms-auto">
|
||||
<li class="nav-item" id="currentLogin"></li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="#" id="startTest"><i class="fa fa-flag-checkered"></i> Run Bandwidth Test</a>
|
||||
</li>
|
||||
<li class="nav-item" id="statsLink"></li>
|
||||
<li class="nav-item ms-auto">
|
||||
<a class="nav-link" href="/config"><i class="fa fa-gear"></i> Configuration</a>
|
||||
</li>
|
||||
|
||||
@@ -160,8 +160,28 @@ function updateHostCounts() {
|
||||
}
|
||||
$("#currentLogin").html(html);
|
||||
});
|
||||
$("#startTest").on('click', () => {
|
||||
/*$("#startTest").on('click', () => {
|
||||
$.get("/api/run_btest", () => { });
|
||||
});*/
|
||||
// LTS Check
|
||||
$.get("/api/stats_check", (data) => {
|
||||
console.log(data);
|
||||
let template = "<a class='nav-link' href='$URL$'><i class='fa fa-dashboard'></i> $TEXT$</a>";
|
||||
switch (data.action) {
|
||||
case "Disabled": {
|
||||
template = template.replace("$URL$", "#")
|
||||
.replace("$TEXT$", "<span style='color: red'>Stats Disabled</span>");
|
||||
}
|
||||
case "NotSetup": {
|
||||
template = template.replace("$URL$", "https://stats.libreqos.io/trial1/" + encodeURI(data.node_id))
|
||||
.replace("$TEXT$", "<span class='badge badge-pill badge-success green-badge'>Statistics Free Trial</span>");
|
||||
} break;
|
||||
default: {
|
||||
template = template.replace("$URL$", "https://stats.libreqos.io/")
|
||||
.replace("$TEXT$", "Statistics");
|
||||
}
|
||||
}
|
||||
$("#statsLink").html(template);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -44,10 +44,7 @@
|
||||
|
||||
<ul class="navbar-nav ms-auto">
|
||||
<li class="nav-item" id="currentLogin"></li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="#" id="startTest"><i class="fa fa-flag-checkered"></i> Run Bandwidth
|
||||
Test</a>
|
||||
</li>
|
||||
<li class="nav-item" id="statsLink"></li>
|
||||
<li class="nav-item ms-auto">
|
||||
<a class="nav-link" href="/config"><i class="fa fa-gear"></i> Configuration</a>
|
||||
</li>
|
||||
@@ -61,6 +58,8 @@
|
||||
|
||||
<div id="container" class="pad4">
|
||||
|
||||
<div id="toasts"></div>
|
||||
|
||||
<!-- Dashboard Row 1 -->
|
||||
<div class="row mbot8">
|
||||
<!-- THROUGHPUT -->
|
||||
@@ -351,6 +350,17 @@
|
||||
updateHostCounts();
|
||||
updateSiteFunnel();
|
||||
OneSecondCadence();
|
||||
|
||||
// Version Check
|
||||
$.get("/api/version_check", (data) => {
|
||||
if (data != "All Good") {
|
||||
let html = "<div class='alert alert-info alert-dismissible fade show' role='alert'>";
|
||||
html += "<strong>LibreQoS Update Available!</strong>";
|
||||
html += "<button type='button' class='btn-close' data-bs-dismiss='alert' aria-label='Close'></button>";
|
||||
html += "</div>";
|
||||
$("#toasts").append(html);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
$(document).ready(start);
|
||||
|
||||
@@ -37,9 +37,7 @@
|
||||
|
||||
<ul class="navbar-nav ms-auto">
|
||||
<li class="nav-item" id="currentLogin"></li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="#" id="startTest"><i class="fa fa-flag-checkered"></i> Run Bandwidth Test</a>
|
||||
</li>
|
||||
<li class="nav-item" id="statsLink"></li>
|
||||
<li class="nav-item ms-auto">
|
||||
<a class="nav-link" href="/config"><i class="fa fa-gear"></i> Configuration</a>
|
||||
</li>
|
||||
|
||||
@@ -44,10 +44,7 @@
|
||||
|
||||
<ul class="navbar-nav ms-auto">
|
||||
<li class="nav-item" id="currentLogin"></li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="#" id="startTest"><i class="fa fa-flag-checkered"></i> Run Bandwidth
|
||||
Test</a>
|
||||
</li>
|
||||
<li class="nav-item" id="statsLink"></li>
|
||||
<li class="nav-item ms-auto">
|
||||
<a class="nav-link" href="/config"><i class="fa fa-gear"></i> Configuration</a>
|
||||
</li>
|
||||
|
||||
@@ -12,7 +12,7 @@ crate-type = ["cdylib"]
|
||||
pyo3 = "0"
|
||||
lqos_bus = { path = "../lqos_bus" }
|
||||
lqos_utils = { path = "../lqos_utils" }
|
||||
tokio = { version = "1", features = [ "rt", "macros", "net", "io-util", "time" ] }
|
||||
tokio = { version = "1", features = [ "full" ] }
|
||||
anyhow = "1"
|
||||
sysinfo = "0"
|
||||
nix = "0"
|
||||
|
||||
@@ -220,7 +220,7 @@ impl QueueNode {
|
||||
log::warn!("{:?}", value);
|
||||
}
|
||||
}
|
||||
"idForCircuitsWithoutParentNodes" => {
|
||||
"idForCircuitsWithoutParentNodes" | "type" => {
|
||||
// Ignore
|
||||
}
|
||||
_ => log::error!("I don't know how to parse key: [{key}]"),
|
||||
|
||||
@@ -359,7 +359,7 @@ int throughput_reader(struct bpf_iter__bpf_map_elem *ctx)
|
||||
|
||||
bpf_seq_write(seq, ip, sizeof(struct in6_addr));
|
||||
for (__u32 i=0; i<NUM_CPUS; i++) {
|
||||
struct host_counter * content = counter+(i*48);
|
||||
struct host_counter * content = counter+(i*sizeof(struct host_counter));
|
||||
bpf_seq_write(seq, content, sizeof(struct host_counter));
|
||||
}
|
||||
|
||||
|
||||
@@ -102,7 +102,8 @@ pub(crate) fn xps_setup_default_disable(interface: &str) -> Result<()> {
|
||||
fn sorted_txq_xps_cpus(interface: &str) -> Result<Vec<String>> {
|
||||
let mut result = Vec::new();
|
||||
let paths =
|
||||
std::fs::read_dir(&format!("/sys/class/net/{interface}/queues/"))?;
|
||||
std::fs::read_dir(&format!("/sys/class/net/{interface}/queues/"))
|
||||
.map_err(|_| anyhow::anyhow!("/sys/class/net/{interface}/queues/ does not exist. Does this card only support one queue (not supported)?"))?;
|
||||
for path in paths {
|
||||
if let Ok(path) = &path {
|
||||
if path.path().is_dir() {
|
||||
|
||||
@@ -6,7 +6,7 @@ license = "GPL-2.0-only"
|
||||
|
||||
[dependencies]
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
nix = "0"
|
||||
nix = { version = "0", features = ["time"] }
|
||||
log = "0"
|
||||
notify = { version = "5.0.0", default-features = false } # Not using crossbeam because of Tokio
|
||||
thiserror = "1"
|
||||
|
||||
@@ -15,6 +15,7 @@ lqos_sys = { path = "../lqos_sys" }
|
||||
lqos_queue_tracker = { path = "../lqos_queue_tracker" }
|
||||
lqos_utils = { path = "../lqos_utils" }
|
||||
lqos_heimdall = { path = "../lqos_heimdall" }
|
||||
lts_client = { path = "../lts_client" }
|
||||
tokio = { version = "1", features = [ "full", "parking_lot" ] }
|
||||
once_cell = "1.17.1"
|
||||
lqos_bus = { path = "../lqos_bus" }
|
||||
@@ -26,6 +27,8 @@ log = "0"
|
||||
nix = "0"
|
||||
sysinfo = "0"
|
||||
dashmap = "5"
|
||||
num-traits = "0.2"
|
||||
thiserror = "1"
|
||||
|
||||
# Support JemAlloc on supported platforms
|
||||
[target.'cfg(any(target_arch = "x86", target_arch = "x86_64"))'.dependencies]
|
||||
|
||||
@@ -44,7 +44,7 @@ pub(crate) fn get_nic_info() -> anyhow::Result<Vec<Nic>> {
|
||||
current_nic = Some(Nic::default());
|
||||
}
|
||||
|
||||
if let Some(mut nic) = current_nic.as_mut() {
|
||||
if let Some(nic) = current_nic.as_mut() {
|
||||
if let Some(d) = trimmed.strip_prefix("description: ") {
|
||||
nic.description = d.to_string();
|
||||
}
|
||||
|
||||
50
src/rust/lqosd/src/long_term_stats/mod.rs
Normal file
50
src/rust/lqosd/src/long_term_stats/mod.rs
Normal file
@@ -0,0 +1,50 @@
|
||||
//! Most of this functionality is now in the` lts_stats` crate.
|
||||
use crate::shaped_devices_tracker::NETWORK_JSON;
|
||||
use lqos_bus::BusResponse;
|
||||
use lts_client::{
|
||||
collector::NetworkTreeEntry, submission_queue::get_current_stats
|
||||
};
|
||||
|
||||
pub(crate) fn get_network_tree() -> Vec<(usize, NetworkTreeEntry)> {
|
||||
if let Ok(reader) = NETWORK_JSON.read() {
|
||||
let result = reader
|
||||
.nodes
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, n)| (idx, n.into()))
|
||||
.collect::<Vec<(usize, NetworkTreeEntry)>>();
|
||||
//println!("{result:#?}");
|
||||
return result;
|
||||
}
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
pub fn get_stats_totals() -> BusResponse {
|
||||
let current = get_current_stats();
|
||||
if let Some(c) = current {
|
||||
if let Some(totals) = &c.totals {
|
||||
return BusResponse::LongTermTotals(totals.clone());
|
||||
}
|
||||
}
|
||||
BusResponse::Fail("No Data".to_string())
|
||||
}
|
||||
|
||||
pub fn get_stats_host() -> BusResponse {
|
||||
let current = get_current_stats();
|
||||
if let Some(c) = current {
|
||||
if let Some(hosts) = c.hosts {
|
||||
return BusResponse::LongTermHosts(hosts);
|
||||
}
|
||||
}
|
||||
BusResponse::Fail("No Data".to_string())
|
||||
}
|
||||
|
||||
pub fn get_stats_tree() -> BusResponse {
|
||||
let current = get_current_stats();
|
||||
if let Some(c) = current {
|
||||
if let Some(tree) = c.tree {
|
||||
return BusResponse::LongTermTree(tree);
|
||||
}
|
||||
}
|
||||
BusResponse::Fail("No Data".to_string())
|
||||
}
|
||||
@@ -8,15 +8,15 @@ mod throughput_tracker;
|
||||
mod anonymous_usage;
|
||||
mod tuning;
|
||||
mod validation;
|
||||
mod long_term_stats;
|
||||
use std::net::IpAddr;
|
||||
|
||||
use crate::{
|
||||
file_lock::FileLock,
|
||||
ip_mapping::{clear_ip_flows, del_ip_flow, list_mapped_ips, map_ip_to_flow},
|
||||
};
|
||||
use anyhow::Result;
|
||||
use log::{info, warn};
|
||||
use lqos_bus::{BusRequest, BusResponse, UnixSocketServer};
|
||||
use lqos_bus::{BusRequest, BusResponse, UnixSocketServer, StatsRequest};
|
||||
use lqos_config::LibreQoSConfig;
|
||||
use lqos_heimdall::{n_second_packet_dump, perf_interface::heimdall_handle_events, start_heimdall};
|
||||
use lqos_queue_tracker::{
|
||||
@@ -24,6 +24,7 @@ use lqos_queue_tracker::{
|
||||
spawn_queue_structure_monitor,
|
||||
};
|
||||
use lqos_sys::LibreQoSKernels;
|
||||
use lts_client::collector::start_long_term_stats;
|
||||
use signal_hook::{
|
||||
consts::{SIGHUP, SIGINT, SIGTERM},
|
||||
iterator::Signals,
|
||||
@@ -72,14 +73,15 @@ async fn main() -> Result<()> {
|
||||
};
|
||||
|
||||
// Spawn tracking sub-systems
|
||||
let long_term_stats_tx = start_long_term_stats().await;
|
||||
join!(
|
||||
start_heimdall(),
|
||||
spawn_queue_structure_monitor(),
|
||||
shaped_devices_tracker::shaped_devices_watcher(),
|
||||
shaped_devices_tracker::network_json_watcher(),
|
||||
anonymous_usage::start_anonymous_usage(),
|
||||
throughput_tracker::spawn_throughput_monitor(long_term_stats_tx.clone()),
|
||||
);
|
||||
throughput_tracker::spawn_throughput_monitor();
|
||||
spawn_queue_monitor();
|
||||
|
||||
// Handle signals
|
||||
@@ -95,6 +97,9 @@ async fn main() -> Result<()> {
|
||||
warn!("This should never happen - terminating on unknown signal")
|
||||
}
|
||||
}
|
||||
let _ = tokio::runtime::Runtime::new()
|
||||
.unwrap()
|
||||
.block_on(long_term_stats_tx.send(lts_client::collector::StatsUpdateMessage::Quit));
|
||||
std::mem::drop(kernels);
|
||||
UnixSocketServer::signal_cleanup();
|
||||
std::mem::drop(file_lock);
|
||||
@@ -214,6 +219,15 @@ fn handle_bus_requests(
|
||||
BusResponse::Fail("Invalid IP".to_string())
|
||||
}
|
||||
}
|
||||
BusRequest::GetLongTermStats(StatsRequest::CurrentTotals) => {
|
||||
long_term_stats::get_stats_totals()
|
||||
}
|
||||
BusRequest::GetLongTermStats(StatsRequest::AllHosts) => {
|
||||
long_term_stats::get_stats_host()
|
||||
}
|
||||
BusRequest::GetLongTermStats(StatsRequest::Tree) => {
|
||||
long_term_stats::get_stats_tree()
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,129 +4,130 @@ use lqos_bus::BusResponse;
|
||||
use lqos_config::{ConfigShapedDevices, NetworkJsonTransport};
|
||||
use lqos_utils::file_watcher::FileWatcher;
|
||||
use once_cell::sync::Lazy;
|
||||
use std::sync::RwLock;
|
||||
use std::sync::{RwLock, atomic::AtomicBool};
|
||||
use tokio::task::spawn_blocking;
|
||||
mod netjson;
|
||||
pub use netjson::*;
|
||||
|
||||
pub static SHAPED_DEVICES: Lazy<RwLock<ConfigShapedDevices>> =
|
||||
Lazy::new(|| RwLock::new(ConfigShapedDevices::default()));
|
||||
Lazy::new(|| RwLock::new(ConfigShapedDevices::default()));
|
||||
pub static STATS_NEEDS_NEW_SHAPED_DEVICES: AtomicBool = AtomicBool::new(false);
|
||||
|
||||
fn load_shaped_devices() {
|
||||
info!("ShapedDevices.csv has changed. Attempting to load it.");
|
||||
let shaped_devices = ConfigShapedDevices::load();
|
||||
if let Ok(new_file) = shaped_devices {
|
||||
info!("ShapedDevices.csv loaded");
|
||||
*SHAPED_DEVICES.write().unwrap() = new_file;
|
||||
crate::throughput_tracker::THROUGHPUT_TRACKER
|
||||
.refresh_circuit_ids();
|
||||
} else {
|
||||
warn!("ShapedDevices.csv failed to load, see previous error messages. Reverting to empty set.");
|
||||
*SHAPED_DEVICES.write().unwrap() = ConfigShapedDevices::default();
|
||||
}
|
||||
info!("ShapedDevices.csv has changed. Attempting to load it.");
|
||||
let shaped_devices = ConfigShapedDevices::load();
|
||||
if let Ok(new_file) = shaped_devices {
|
||||
info!("ShapedDevices.csv loaded");
|
||||
*SHAPED_DEVICES.write().unwrap() = new_file;
|
||||
crate::throughput_tracker::THROUGHPUT_TRACKER.refresh_circuit_ids();
|
||||
STATS_NEEDS_NEW_SHAPED_DEVICES.store(true, std::sync::atomic::Ordering::Relaxed);
|
||||
} else {
|
||||
warn!("ShapedDevices.csv failed to load, see previous error messages. Reverting to empty set.");
|
||||
*SHAPED_DEVICES.write().unwrap() = ConfigShapedDevices::default();
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn shaped_devices_watcher() {
|
||||
spawn_blocking(|| {
|
||||
info!("Watching for ShapedDevices.csv changes");
|
||||
let _ = watch_for_shaped_devices_changing();
|
||||
});
|
||||
spawn_blocking(|| {
|
||||
info!("Watching for ShapedDevices.csv changes");
|
||||
let _ = watch_for_shaped_devices_changing();
|
||||
});
|
||||
}
|
||||
|
||||
/// Fires up a Linux file system watcher than notifies
|
||||
/// when `ShapedDevices.csv` changes, and triggers a reload.
|
||||
fn watch_for_shaped_devices_changing() -> Result<()> {
|
||||
let watch_path = ConfigShapedDevices::path();
|
||||
if watch_path.is_err() {
|
||||
error!("Unable to generate path for ShapedDevices.csv");
|
||||
return Err(anyhow::Error::msg(
|
||||
"Unable to create path for ShapedDevices.csv",
|
||||
));
|
||||
}
|
||||
let watch_path = watch_path.unwrap();
|
||||
let watch_path = ConfigShapedDevices::path();
|
||||
if watch_path.is_err() {
|
||||
error!("Unable to generate path for ShapedDevices.csv");
|
||||
return Err(anyhow::Error::msg(
|
||||
"Unable to create path for ShapedDevices.csv",
|
||||
));
|
||||
}
|
||||
let watch_path = watch_path.unwrap();
|
||||
|
||||
let mut watcher = FileWatcher::new("ShapedDevices.csv", watch_path);
|
||||
watcher.set_file_exists_callback(load_shaped_devices);
|
||||
watcher.set_file_created_callback(load_shaped_devices);
|
||||
watcher.set_file_changed_callback(load_shaped_devices);
|
||||
loop {
|
||||
let result = watcher.watch();
|
||||
info!("ShapedDevices watcher returned: {result:?}");
|
||||
}
|
||||
let mut watcher = FileWatcher::new("ShapedDevices.csv", watch_path);
|
||||
watcher.set_file_exists_callback(load_shaped_devices);
|
||||
watcher.set_file_created_callback(load_shaped_devices);
|
||||
watcher.set_file_changed_callback(load_shaped_devices);
|
||||
loop {
|
||||
let result = watcher.watch();
|
||||
info!("ShapedDevices watcher returned: {result:?}");
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_one_network_map_layer(parent_idx: usize) -> BusResponse {
|
||||
let net_json = NETWORK_JSON.read().unwrap();
|
||||
if let Some(parent) = net_json.get_cloned_entry_by_index(parent_idx) {
|
||||
let mut nodes = vec![(parent_idx, parent)];
|
||||
nodes.extend_from_slice(&net_json.get_cloned_children(parent_idx));
|
||||
BusResponse::NetworkMap(nodes)
|
||||
} else {
|
||||
BusResponse::Fail("No such node".to_string())
|
||||
}
|
||||
let net_json = NETWORK_JSON.read().unwrap();
|
||||
if let Some(parent) = net_json.get_cloned_entry_by_index(parent_idx) {
|
||||
let mut nodes = vec![(parent_idx, parent)];
|
||||
nodes.extend_from_slice(&net_json.get_cloned_children(parent_idx));
|
||||
BusResponse::NetworkMap(nodes)
|
||||
} else {
|
||||
BusResponse::Fail("No such node".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_top_n_root_queues(n_queues: usize) -> BusResponse {
|
||||
let net_json = NETWORK_JSON.read().unwrap();
|
||||
if let Some(parent) = net_json.get_cloned_entry_by_index(0) {
|
||||
let mut nodes = vec![(0, parent)];
|
||||
nodes.extend_from_slice(&net_json.get_cloned_children(0));
|
||||
// Remove the top-level entry for root
|
||||
nodes.remove(0);
|
||||
// Sort by total bandwidth (up + down) descending
|
||||
nodes.sort_by(|a, b| {
|
||||
let total_a = a.1.current_throughput.0 + a.1.current_throughput.1;
|
||||
let total_b = b.1.current_throughput.0 + b.1.current_throughput.1;
|
||||
total_b.cmp(&total_a)
|
||||
});
|
||||
// Summarize everything after n_queues
|
||||
if nodes.len() > n_queues {
|
||||
let mut other_bw = (0, 0);
|
||||
nodes.drain(n_queues..).for_each(|n| {
|
||||
other_bw.0 += n.1.current_throughput.0;
|
||||
other_bw.1 += n.1.current_throughput.1;
|
||||
});
|
||||
let net_json = NETWORK_JSON.read().unwrap();
|
||||
if let Some(parent) = net_json.get_cloned_entry_by_index(0) {
|
||||
let mut nodes = vec![(0, parent)];
|
||||
nodes.extend_from_slice(&net_json.get_cloned_children(0));
|
||||
// Remove the top-level entry for root
|
||||
nodes.remove(0);
|
||||
// Sort by total bandwidth (up + down) descending
|
||||
nodes.sort_by(|a, b| {
|
||||
let total_a = a.1.current_throughput.0 + a.1.current_throughput.1;
|
||||
let total_b = b.1.current_throughput.0 + b.1.current_throughput.1;
|
||||
total_b.cmp(&total_a)
|
||||
});
|
||||
// Summarize everything after n_queues
|
||||
if nodes.len() > n_queues {
|
||||
let mut other_bw = (0, 0);
|
||||
nodes.drain(n_queues..).for_each(|n| {
|
||||
other_bw.0 += n.1.current_throughput.0;
|
||||
other_bw.1 += n.1.current_throughput.1;
|
||||
});
|
||||
|
||||
nodes.push((
|
||||
0,
|
||||
NetworkJsonTransport {
|
||||
name: "Others".into(),
|
||||
max_throughput: (0, 0),
|
||||
current_throughput: other_bw,
|
||||
rtts: Vec::new(),
|
||||
parents: Vec::new(),
|
||||
immediate_parent: None,
|
||||
node_type: None,
|
||||
},
|
||||
));
|
||||
nodes.push((
|
||||
0,
|
||||
NetworkJsonTransport {
|
||||
name: "Others".into(),
|
||||
max_throughput: (0, 0),
|
||||
current_throughput: other_bw,
|
||||
rtts: Vec::new(),
|
||||
parents: Vec::new(),
|
||||
immediate_parent: None,
|
||||
node_type: None,
|
||||
},
|
||||
));
|
||||
}
|
||||
BusResponse::NetworkMap(nodes)
|
||||
} else {
|
||||
BusResponse::Fail("No such node".to_string())
|
||||
}
|
||||
BusResponse::NetworkMap(nodes)
|
||||
} else {
|
||||
BusResponse::Fail("No such node".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn map_node_names(nodes: &[usize]) -> BusResponse {
|
||||
let mut result = Vec::new();
|
||||
let reader = NETWORK_JSON.read().unwrap();
|
||||
nodes.iter().for_each(|id| {
|
||||
if let Some(node) = reader.nodes.get(*id) {
|
||||
result.push((*id, node.name.clone()));
|
||||
}
|
||||
});
|
||||
BusResponse::NodeNames(result)
|
||||
let mut result = Vec::new();
|
||||
let reader = NETWORK_JSON.read().unwrap();
|
||||
nodes.iter().for_each(|id| {
|
||||
if let Some(node) = reader.nodes.get(*id) {
|
||||
result.push((*id, node.name.clone()));
|
||||
}
|
||||
});
|
||||
BusResponse::NodeNames(result)
|
||||
}
|
||||
|
||||
pub fn get_funnel(circuit_id: &str) -> BusResponse {
|
||||
let reader = NETWORK_JSON.read().unwrap();
|
||||
if let Some(index) = reader.get_index_for_name(circuit_id) {
|
||||
// Reverse the scanning order and skip the last entry (the parent)
|
||||
let mut result = Vec::new();
|
||||
for idx in reader.nodes[index].parents.iter().rev().skip(1) {
|
||||
result.push((*idx, reader.nodes[*idx].clone_to_transit()));
|
||||
let reader = NETWORK_JSON.read().unwrap();
|
||||
if let Some(index) = reader.get_index_for_name(circuit_id) {
|
||||
// Reverse the scanning order and skip the last entry (the parent)
|
||||
let mut result = Vec::new();
|
||||
for idx in reader.nodes[index].parents.iter().rev().skip(1) {
|
||||
result.push((*idx, reader.nodes[*idx].clone_to_transit()));
|
||||
}
|
||||
return BusResponse::NetworkMap(result);
|
||||
}
|
||||
return BusResponse::NetworkMap(result);
|
||||
}
|
||||
|
||||
BusResponse::Fail("Unknown Node".into())
|
||||
BusResponse::Fail("Unknown Node".into())
|
||||
}
|
||||
|
||||
@@ -1,344 +1,445 @@
|
||||
mod heimdall_data;
|
||||
mod throughput_entry;
|
||||
mod tracking_data;
|
||||
mod heimdall_data;
|
||||
pub use heimdall_data::get_flow_stats;
|
||||
use crate::{
|
||||
shaped_devices_tracker::NETWORK_JSON,
|
||||
throughput_tracker::tracking_data::ThroughputTracker, stats::TIME_TO_POLL_HOSTS,
|
||||
shaped_devices_tracker::{NETWORK_JSON, STATS_NEEDS_NEW_SHAPED_DEVICES, SHAPED_DEVICES}, stats::TIME_TO_POLL_HOSTS,
|
||||
throughput_tracker::tracking_data::ThroughputTracker, long_term_stats::get_network_tree,
|
||||
};
|
||||
pub use heimdall_data::get_flow_stats;
|
||||
use log::{info, warn};
|
||||
use lqos_bus::{BusResponse, IpStats, TcHandle, XdpPpingResult};
|
||||
use lqos_utils::{fdtimer::periodic, unix_time::time_since_boot, XdpIpAddress};
|
||||
use lqos_utils::{unix_time::time_since_boot, XdpIpAddress};
|
||||
use lts_client::collector::{StatsUpdateMessage, ThroughputSummary, HostSummary};
|
||||
use once_cell::sync::Lazy;
|
||||
use std::time::Duration;
|
||||
use tokio::{
|
||||
sync::mpsc::Sender,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
const RETIRE_AFTER_SECONDS: u64 = 30;
|
||||
|
||||
pub static THROUGHPUT_TRACKER: Lazy<ThroughputTracker> =
|
||||
Lazy::new(ThroughputTracker::new);
|
||||
pub static THROUGHPUT_TRACKER: Lazy<ThroughputTracker> = Lazy::new(ThroughputTracker::new);
|
||||
|
||||
pub fn spawn_throughput_monitor() {
|
||||
info!("Starting the bandwidth monitor thread.");
|
||||
let interval_ms = 1000; // 1 second
|
||||
info!("Bandwidth check period set to {interval_ms} ms.");
|
||||
/// Create the throughput monitor thread, and begin polling for
|
||||
/// throughput data every second.
|
||||
///
|
||||
/// ## Arguments
|
||||
///
|
||||
/// * `long_term_stats_tx` - an optional MPSC sender to notify the
|
||||
/// collection thread that there is fresh data.
|
||||
pub async fn spawn_throughput_monitor(long_term_stats_tx: Sender<StatsUpdateMessage>) {
|
||||
info!("Starting the bandwidth monitor thread.");
|
||||
let interval_ms = 1000; // 1 second
|
||||
info!("Bandwidth check period set to {interval_ms} ms.");
|
||||
tokio::spawn(throughput_task(interval_ms, long_term_stats_tx));
|
||||
}
|
||||
|
||||
std::thread::spawn(move || {
|
||||
periodic(interval_ms, "Throughput Monitor", &mut || {
|
||||
let start = std::time::Instant::now();
|
||||
{
|
||||
let net_json = NETWORK_JSON.read().unwrap();
|
||||
net_json.zero_throughput_and_rtt();
|
||||
} // Scope to end the lock
|
||||
THROUGHPUT_TRACKER.copy_previous_and_reset_rtt();
|
||||
THROUGHPUT_TRACKER.apply_new_throughput_counters();
|
||||
THROUGHPUT_TRACKER.apply_rtt_data();
|
||||
THROUGHPUT_TRACKER.update_totals();
|
||||
THROUGHPUT_TRACKER.next_cycle();
|
||||
let duration_ms = start.elapsed().as_micros();
|
||||
TIME_TO_POLL_HOSTS.store(duration_ms as u64, std::sync::atomic::Ordering::Relaxed);
|
||||
});
|
||||
});
|
||||
async fn throughput_task(interval_ms: u64, long_term_stats_tx: Sender<StatsUpdateMessage>) {
|
||||
loop {
|
||||
let start = Instant::now();
|
||||
|
||||
// Perform the stats collection in a blocking thread, ensuring that
|
||||
// the tokio runtime is not blocked.
|
||||
if let Err(e) = tokio::task::spawn_blocking(move || {
|
||||
|
||||
{
|
||||
let net_json = NETWORK_JSON.read().unwrap();
|
||||
net_json.zero_throughput_and_rtt();
|
||||
} // Scope to end the lock
|
||||
THROUGHPUT_TRACKER.copy_previous_and_reset_rtt();
|
||||
THROUGHPUT_TRACKER.apply_new_throughput_counters();
|
||||
THROUGHPUT_TRACKER.apply_rtt_data();
|
||||
THROUGHPUT_TRACKER.update_totals();
|
||||
THROUGHPUT_TRACKER.next_cycle();
|
||||
let duration_ms = start.elapsed().as_micros();
|
||||
TIME_TO_POLL_HOSTS.store(duration_ms as u64, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
}).await {
|
||||
log::error!("Error polling network. {e:?}");
|
||||
}
|
||||
tokio::spawn(submit_throughput_stats(long_term_stats_tx.clone()));
|
||||
|
||||
let elapsed = start.elapsed();
|
||||
if elapsed.as_secs_f32() < 1.0 {
|
||||
let sleep_duration = Duration::from_millis(interval_ms) - start.elapsed();
|
||||
tokio::time::sleep(sleep_duration).await;
|
||||
} else {
|
||||
log::error!("Throughput monitor thread is running behind. It took {elapsed} to poll the network.", elapsed=elapsed.as_secs_f32());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn submit_throughput_stats(long_term_stats_tx: Sender<StatsUpdateMessage>) {
|
||||
// If ShapedDevices has changed, notify the stats thread
|
||||
if let Ok(changed) = STATS_NEEDS_NEW_SHAPED_DEVICES.compare_exchange(
|
||||
true,
|
||||
false,
|
||||
std::sync::atomic::Ordering::Relaxed,
|
||||
std::sync::atomic::Ordering::Relaxed,
|
||||
) {
|
||||
if changed {
|
||||
let shaped_devices = SHAPED_DEVICES.read().unwrap().devices.clone();
|
||||
let _ = long_term_stats_tx
|
||||
.send(StatsUpdateMessage::ShapedDevicesChanged(shaped_devices))
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
// Gather Global Stats
|
||||
let packets_per_second = (
|
||||
THROUGHPUT_TRACKER
|
||||
.packets_per_second
|
||||
.0
|
||||
.load(std::sync::atomic::Ordering::Relaxed),
|
||||
THROUGHPUT_TRACKER
|
||||
.packets_per_second
|
||||
.1
|
||||
.load(std::sync::atomic::Ordering::Relaxed),
|
||||
);
|
||||
let bits_per_second = THROUGHPUT_TRACKER.bits_per_second();
|
||||
let shaped_bits_per_second = THROUGHPUT_TRACKER.shaped_bits_per_second();
|
||||
let hosts = THROUGHPUT_TRACKER
|
||||
.raw_data
|
||||
.iter()
|
||||
.filter(|host| host.median_latency().is_some())
|
||||
.map(|host| HostSummary {
|
||||
ip: host.key().as_ip(),
|
||||
circuit_id: host.circuit_id.clone(),
|
||||
bits_per_second: (host.bytes_per_second.0 * 8, host.bytes_per_second.1 * 8),
|
||||
median_rtt: host.median_latency().unwrap_or(0.0),
|
||||
})
|
||||
.collect();
|
||||
|
||||
let summary = Box::new((ThroughputSummary{
|
||||
bits_per_second,
|
||||
shaped_bits_per_second,
|
||||
packets_per_second,
|
||||
hosts,
|
||||
}, get_network_tree()));
|
||||
|
||||
// Send the stats
|
||||
let result = long_term_stats_tx
|
||||
.send(StatsUpdateMessage::ThroughputReady(summary))
|
||||
.await;
|
||||
if let Err(e) = result {
|
||||
warn!("Error sending message to stats collection system. {e:?}");
|
||||
}
|
||||
}
|
||||
|
||||
pub fn current_throughput() -> BusResponse {
|
||||
let (bits_per_second, packets_per_second, shaped_bits_per_second) = {
|
||||
(
|
||||
THROUGHPUT_TRACKER.bits_per_second(),
|
||||
THROUGHPUT_TRACKER.packets_per_second(),
|
||||
THROUGHPUT_TRACKER.shaped_bits_per_second(),
|
||||
)
|
||||
};
|
||||
BusResponse::CurrentThroughput {
|
||||
bits_per_second,
|
||||
packets_per_second,
|
||||
shaped_bits_per_second,
|
||||
}
|
||||
let (bits_per_second, packets_per_second, shaped_bits_per_second) = {
|
||||
(
|
||||
THROUGHPUT_TRACKER.bits_per_second(),
|
||||
THROUGHPUT_TRACKER.packets_per_second(),
|
||||
THROUGHPUT_TRACKER.shaped_bits_per_second(),
|
||||
)
|
||||
};
|
||||
BusResponse::CurrentThroughput {
|
||||
bits_per_second,
|
||||
packets_per_second,
|
||||
shaped_bits_per_second,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn host_counters() -> BusResponse {
|
||||
let mut result = Vec::new();
|
||||
THROUGHPUT_TRACKER.raw_data.iter().for_each(|v| {
|
||||
let ip = v.key().as_ip();
|
||||
let (down, up) = v.bytes_per_second;
|
||||
result.push((ip, down, up));
|
||||
});
|
||||
BusResponse::HostCounters(result)
|
||||
let mut result = Vec::new();
|
||||
THROUGHPUT_TRACKER.raw_data.iter().for_each(|v| {
|
||||
let ip = v.key().as_ip();
|
||||
let (down, up) = v.bytes_per_second;
|
||||
result.push((ip, down, up));
|
||||
});
|
||||
BusResponse::HostCounters(result)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn retire_check(cycle: u64, recent_cycle: u64) -> bool {
|
||||
cycle < recent_cycle + RETIRE_AFTER_SECONDS
|
||||
cycle < recent_cycle + RETIRE_AFTER_SECONDS
|
||||
}
|
||||
|
||||
type TopList = (XdpIpAddress, (u64, u64), (u64, u64), f32, TcHandle, String);
|
||||
|
||||
pub fn top_n(start: u32, end: u32) -> BusResponse {
|
||||
let mut full_list: Vec<TopList> = {
|
||||
let tp_cycle = THROUGHPUT_TRACKER.cycle.load(std::sync::atomic::Ordering::Relaxed);
|
||||
THROUGHPUT_TRACKER.raw_data
|
||||
let mut full_list: Vec<TopList> = {
|
||||
let tp_cycle = THROUGHPUT_TRACKER.cycle.load(std::sync::atomic::Ordering::Relaxed);
|
||||
THROUGHPUT_TRACKER.raw_data
|
||||
.iter()
|
||||
.filter(|v| !v.key().as_ip().is_loopback())
|
||||
.filter(|d| retire_check(tp_cycle, d.most_recent_cycle))
|
||||
.map(|te| {
|
||||
(
|
||||
*te.key(),
|
||||
te.bytes_per_second,
|
||||
te.packets_per_second,
|
||||
te.median_latency().unwrap_or(0.0),
|
||||
te.tc_handle,
|
||||
te.circuit_id.as_ref().unwrap_or(&String::new()).clone(),
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
full_list.sort_by(|a, b| b.1 .0.cmp(&a.1 .0));
|
||||
let result = full_list
|
||||
.iter()
|
||||
.filter(|v| !v.key().as_ip().is_loopback())
|
||||
.filter(|d| retire_check(tp_cycle, d.most_recent_cycle))
|
||||
.map(|te| {
|
||||
(
|
||||
*te.key(),
|
||||
te.bytes_per_second,
|
||||
te.packets_per_second,
|
||||
te.median_latency().unwrap_or(0.0),
|
||||
te.tc_handle,
|
||||
te.circuit_id.as_ref().unwrap_or(&String::new()).clone(),
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
full_list.sort_by(|a, b| b.1 .0.cmp(&a.1 .0));
|
||||
let result = full_list
|
||||
.iter()
|
||||
.skip(start as usize)
|
||||
.take((end as usize) - (start as usize))
|
||||
.map(
|
||||
|(
|
||||
ip,
|
||||
(bytes_dn, bytes_up),
|
||||
(packets_dn, packets_up),
|
||||
median_rtt,
|
||||
tc_handle,
|
||||
circuit_id,
|
||||
)| IpStats {
|
||||
ip_address: ip.as_ip().to_string(),
|
||||
circuit_id: circuit_id.clone(),
|
||||
bits_per_second: (bytes_dn * 8, bytes_up * 8),
|
||||
packets_per_second: (*packets_dn, *packets_up),
|
||||
median_tcp_rtt: *median_rtt,
|
||||
tc_handle: *tc_handle,
|
||||
},
|
||||
)
|
||||
.collect();
|
||||
BusResponse::TopDownloaders(result)
|
||||
}
|
||||
.skip(start as usize)
|
||||
.take((end as usize) - (start as usize))
|
||||
.map(
|
||||
|(
|
||||
ip,
|
||||
(bytes_dn, bytes_up),
|
||||
(packets_dn, packets_up),
|
||||
median_rtt,
|
||||
tc_handle,
|
||||
circuit_id,
|
||||
)| IpStats {
|
||||
ip_address: ip.as_ip().to_string(),
|
||||
circuit_id: circuit_id.clone(),
|
||||
bits_per_second: (bytes_dn * 8, bytes_up * 8),
|
||||
packets_per_second: (*packets_dn, *packets_up),
|
||||
median_tcp_rtt: *median_rtt,
|
||||
tc_handle: *tc_handle,
|
||||
},
|
||||
)
|
||||
.collect();
|
||||
BusResponse::TopDownloaders(result)
|
||||
}
|
||||
|
||||
pub fn worst_n(start: u32, end: u32) -> BusResponse {
|
||||
let mut full_list: Vec<TopList> = {
|
||||
let tp_cycle = THROUGHPUT_TRACKER.cycle.load(std::sync::atomic::Ordering::Relaxed);
|
||||
THROUGHPUT_TRACKER.raw_data
|
||||
pub fn worst_n(start: u32, end: u32) -> BusResponse {
|
||||
let mut full_list: Vec<TopList> = {
|
||||
let tp_cycle = THROUGHPUT_TRACKER.cycle.load(std::sync::atomic::Ordering::Relaxed);
|
||||
THROUGHPUT_TRACKER.raw_data
|
||||
.iter()
|
||||
.filter(|v| !v.key().as_ip().is_loopback())
|
||||
.filter(|d| retire_check(tp_cycle, d.most_recent_cycle))
|
||||
.filter(|te| te.median_latency().is_some())
|
||||
.map(|te| {
|
||||
(
|
||||
*te.key(),
|
||||
te.bytes_per_second,
|
||||
te.packets_per_second,
|
||||
te.median_latency().unwrap_or(0.0),
|
||||
te.tc_handle,
|
||||
te.circuit_id.as_ref().unwrap_or(&String::new()).clone(),
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
full_list.sort_by(|a, b| b.3.partial_cmp(&a.3).unwrap());
|
||||
let result = full_list
|
||||
.iter()
|
||||
.filter(|v| !v.key().as_ip().is_loopback())
|
||||
.filter(|d| retire_check(tp_cycle, d.most_recent_cycle))
|
||||
.filter(|te| te.median_latency().is_some())
|
||||
.map(|te| {
|
||||
(
|
||||
*te.key(),
|
||||
te.bytes_per_second,
|
||||
te.packets_per_second,
|
||||
te.median_latency().unwrap_or(0.0),
|
||||
te.tc_handle,
|
||||
te.circuit_id.as_ref().unwrap_or(&String::new()).clone(),
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
full_list.sort_by(|a, b| b.3.partial_cmp(&a.3).unwrap());
|
||||
let result = full_list
|
||||
.iter()
|
||||
.skip(start as usize)
|
||||
.take((end as usize) - (start as usize))
|
||||
.map(
|
||||
|(
|
||||
ip,
|
||||
(bytes_dn, bytes_up),
|
||||
(packets_dn, packets_up),
|
||||
median_rtt,
|
||||
tc_handle,
|
||||
circuit_id,
|
||||
)| IpStats {
|
||||
ip_address: ip.as_ip().to_string(),
|
||||
circuit_id: circuit_id.clone(),
|
||||
bits_per_second: (bytes_dn * 8, bytes_up * 8),
|
||||
packets_per_second: (*packets_dn, *packets_up),
|
||||
median_tcp_rtt: *median_rtt,
|
||||
tc_handle: *tc_handle,
|
||||
},
|
||||
)
|
||||
.collect();
|
||||
BusResponse::WorstRtt(result)
|
||||
}
|
||||
.skip(start as usize)
|
||||
.take((end as usize) - (start as usize))
|
||||
.map(
|
||||
|(
|
||||
ip,
|
||||
(bytes_dn, bytes_up),
|
||||
(packets_dn, packets_up),
|
||||
median_rtt,
|
||||
tc_handle,
|
||||
circuit_id,
|
||||
)| IpStats {
|
||||
ip_address: ip.as_ip().to_string(),
|
||||
circuit_id: circuit_id.clone(),
|
||||
bits_per_second: (bytes_dn * 8, bytes_up * 8),
|
||||
packets_per_second: (*packets_dn, *packets_up),
|
||||
median_tcp_rtt: *median_rtt,
|
||||
tc_handle: *tc_handle,
|
||||
},
|
||||
)
|
||||
.collect();
|
||||
BusResponse::WorstRtt(result)
|
||||
}
|
||||
|
||||
pub fn best_n(start: u32, end: u32) -> BusResponse {
|
||||
let mut full_list: Vec<TopList> = {
|
||||
let tp_cycle = THROUGHPUT_TRACKER.cycle.load(std::sync::atomic::Ordering::Relaxed);
|
||||
THROUGHPUT_TRACKER.raw_data
|
||||
pub fn best_n(start: u32, end: u32) -> BusResponse {
|
||||
let mut full_list: Vec<TopList> = {
|
||||
let tp_cycle = THROUGHPUT_TRACKER.cycle.load(std::sync::atomic::Ordering::Relaxed);
|
||||
THROUGHPUT_TRACKER.raw_data
|
||||
.iter()
|
||||
.filter(|v| !v.key().as_ip().is_loopback())
|
||||
.filter(|d| retire_check(tp_cycle, d.most_recent_cycle))
|
||||
.filter(|te| te.median_latency().is_some())
|
||||
.map(|te| {
|
||||
(
|
||||
*te.key(),
|
||||
te.bytes_per_second,
|
||||
te.packets_per_second,
|
||||
te.median_latency().unwrap_or(0.0),
|
||||
te.tc_handle,
|
||||
te.circuit_id.as_ref().unwrap_or(&String::new()).clone(),
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
full_list.sort_by(|a, b| b.3.partial_cmp(&a.3).unwrap());
|
||||
full_list.reverse();
|
||||
let result = full_list
|
||||
.iter()
|
||||
.filter(|v| !v.key().as_ip().is_loopback())
|
||||
.filter(|d| retire_check(tp_cycle, d.most_recent_cycle))
|
||||
.filter(|te| te.median_latency().is_some())
|
||||
.map(|te| {
|
||||
(
|
||||
*te.key(),
|
||||
te.bytes_per_second,
|
||||
te.packets_per_second,
|
||||
te.median_latency().unwrap_or(0.0),
|
||||
te.tc_handle,
|
||||
te.circuit_id.as_ref().unwrap_or(&String::new()).clone(),
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
full_list.sort_by(|a, b| b.3.partial_cmp(&a.3).unwrap());
|
||||
full_list.reverse();
|
||||
let result = full_list
|
||||
.iter()
|
||||
.skip(start as usize)
|
||||
.take((end as usize) - (start as usize))
|
||||
.map(
|
||||
|(
|
||||
ip,
|
||||
(bytes_dn, bytes_up),
|
||||
(packets_dn, packets_up),
|
||||
median_rtt,
|
||||
tc_handle,
|
||||
circuit_id,
|
||||
)| IpStats {
|
||||
ip_address: ip.as_ip().to_string(),
|
||||
circuit_id: circuit_id.clone(),
|
||||
bits_per_second: (bytes_dn * 8, bytes_up * 8),
|
||||
packets_per_second: (*packets_dn, *packets_up),
|
||||
median_tcp_rtt: *median_rtt,
|
||||
tc_handle: *tc_handle,
|
||||
},
|
||||
)
|
||||
.collect();
|
||||
BusResponse::BestRtt(result)
|
||||
}
|
||||
.skip(start as usize)
|
||||
.take((end as usize) - (start as usize))
|
||||
.map(
|
||||
|(
|
||||
ip,
|
||||
(bytes_dn, bytes_up),
|
||||
(packets_dn, packets_up),
|
||||
median_rtt,
|
||||
tc_handle,
|
||||
circuit_id,
|
||||
)| IpStats {
|
||||
ip_address: ip.as_ip().to_string(),
|
||||
circuit_id: circuit_id.clone(),
|
||||
bits_per_second: (bytes_dn * 8, bytes_up * 8),
|
||||
packets_per_second: (*packets_dn, *packets_up),
|
||||
median_tcp_rtt: *median_rtt,
|
||||
tc_handle: *tc_handle,
|
||||
},
|
||||
)
|
||||
.collect();
|
||||
BusResponse::BestRtt(result)
|
||||
}
|
||||
|
||||
pub fn xdp_pping_compat() -> BusResponse {
|
||||
let raw_cycle = THROUGHPUT_TRACKER.cycle.load(std::sync::atomic::Ordering::Relaxed);
|
||||
let result = THROUGHPUT_TRACKER
|
||||
.raw_data
|
||||
.iter()
|
||||
.filter(|d| retire_check(raw_cycle, d.most_recent_cycle))
|
||||
.filter_map(|data| {
|
||||
if data.tc_handle.as_u32() > 0 {
|
||||
let mut valid_samples: Vec<u32> =
|
||||
data.recent_rtt_data.iter().filter(|d| **d > 0).copied().collect();
|
||||
let samples = valid_samples.len() as u32;
|
||||
if samples > 0 {
|
||||
valid_samples.sort_by(|a, b| (*a).cmp(b));
|
||||
let median = valid_samples[valid_samples.len() / 2] as f32 / 100.0;
|
||||
let max = *(valid_samples.iter().max().unwrap()) as f32 / 100.0;
|
||||
let min = *(valid_samples.iter().min().unwrap()) as f32 / 100.0;
|
||||
let sum = valid_samples.iter().sum::<u32>() as f32 / 100.0;
|
||||
let avg = sum / samples as f32;
|
||||
let raw_cycle = THROUGHPUT_TRACKER
|
||||
.cycle
|
||||
.load(std::sync::atomic::Ordering::Relaxed);
|
||||
let result = THROUGHPUT_TRACKER
|
||||
.raw_data
|
||||
.iter()
|
||||
.filter(|d| retire_check(raw_cycle, d.most_recent_cycle))
|
||||
.filter_map(|data| {
|
||||
if data.tc_handle.as_u32() > 0 {
|
||||
let mut valid_samples: Vec<u32> = data
|
||||
.recent_rtt_data
|
||||
.iter()
|
||||
.filter(|d| **d > 0)
|
||||
.copied()
|
||||
.collect();
|
||||
let samples = valid_samples.len() as u32;
|
||||
if samples > 0 {
|
||||
valid_samples.sort_by(|a, b| (*a).cmp(b));
|
||||
let median = valid_samples[valid_samples.len() / 2] as f32 / 100.0;
|
||||
let max = *(valid_samples.iter().max().unwrap()) as f32 / 100.0;
|
||||
let min = *(valid_samples.iter().min().unwrap()) as f32 / 100.0;
|
||||
let sum = valid_samples.iter().sum::<u32>() as f32 / 100.0;
|
||||
let avg = sum / samples as f32;
|
||||
|
||||
Some(XdpPpingResult {
|
||||
tc: data.tc_handle.to_string(),
|
||||
median,
|
||||
avg,
|
||||
max,
|
||||
min,
|
||||
samples,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
BusResponse::XdpPping(result)
|
||||
Some(XdpPpingResult {
|
||||
tc: data.tc_handle.to_string(),
|
||||
median,
|
||||
avg,
|
||||
max,
|
||||
min,
|
||||
samples,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
BusResponse::XdpPping(result)
|
||||
}
|
||||
|
||||
pub fn rtt_histogram() -> BusResponse {
|
||||
let mut result = vec![0; 20];
|
||||
let reader_cycle = THROUGHPUT_TRACKER.cycle.load(std::sync::atomic::Ordering::Relaxed);
|
||||
for data in THROUGHPUT_TRACKER
|
||||
.raw_data
|
||||
.iter()
|
||||
.filter(|d| retire_check(reader_cycle, d.most_recent_cycle))
|
||||
{
|
||||
let valid_samples: Vec<u32> =
|
||||
data.recent_rtt_data.iter().filter(|d| **d > 0).copied().collect();
|
||||
let samples = valid_samples.len() as u32;
|
||||
if samples > 0 {
|
||||
let median = valid_samples[valid_samples.len() / 2] as f32 / 100.0;
|
||||
let median = f32::min(200.0, median);
|
||||
let column = (median / 10.0) as usize;
|
||||
result[usize::min(column, 19)] += 1;
|
||||
let mut result = vec![0; 20];
|
||||
let reader_cycle = THROUGHPUT_TRACKER
|
||||
.cycle
|
||||
.load(std::sync::atomic::Ordering::Relaxed);
|
||||
for data in THROUGHPUT_TRACKER
|
||||
.raw_data
|
||||
.iter()
|
||||
.filter(|d| retire_check(reader_cycle, d.most_recent_cycle))
|
||||
{
|
||||
let valid_samples: Vec<u32> = data
|
||||
.recent_rtt_data
|
||||
.iter()
|
||||
.filter(|d| **d > 0)
|
||||
.copied()
|
||||
.collect();
|
||||
let samples = valid_samples.len() as u32;
|
||||
if samples > 0 {
|
||||
let median = valid_samples[valid_samples.len() / 2] as f32 / 100.0;
|
||||
let median = f32::min(200.0, median);
|
||||
let column = (median / 10.0) as usize;
|
||||
result[usize::min(column, 19)] += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
BusResponse::RttHistogram(result)
|
||||
BusResponse::RttHistogram(result)
|
||||
}
|
||||
|
||||
pub fn host_counts() -> BusResponse {
|
||||
let mut total = 0;
|
||||
let mut shaped = 0;
|
||||
let tp_cycle = THROUGHPUT_TRACKER.cycle.load(std::sync::atomic::Ordering::Relaxed);
|
||||
THROUGHPUT_TRACKER.raw_data
|
||||
.iter()
|
||||
.filter(|d| retire_check(tp_cycle, d.most_recent_cycle))
|
||||
.for_each(|d| {
|
||||
total += 1;
|
||||
if d.tc_handle.as_u32() != 0 {
|
||||
shaped += 1;
|
||||
}
|
||||
});
|
||||
BusResponse::HostCounts((total, shaped))
|
||||
let mut total = 0;
|
||||
let mut shaped = 0;
|
||||
let tp_cycle = THROUGHPUT_TRACKER
|
||||
.cycle
|
||||
.load(std::sync::atomic::Ordering::Relaxed);
|
||||
THROUGHPUT_TRACKER
|
||||
.raw_data
|
||||
.iter()
|
||||
.filter(|d| retire_check(tp_cycle, d.most_recent_cycle))
|
||||
.for_each(|d| {
|
||||
total += 1;
|
||||
if d.tc_handle.as_u32() != 0 {
|
||||
shaped += 1;
|
||||
}
|
||||
});
|
||||
BusResponse::HostCounts((total, shaped))
|
||||
}
|
||||
|
||||
type FullList = (XdpIpAddress, (u64, u64), (u64, u64), f32, TcHandle, u64);
|
||||
|
||||
pub fn all_unknown_ips() -> BusResponse {
|
||||
let boot_time = time_since_boot();
|
||||
if boot_time.is_err() {
|
||||
warn!("The Linux system clock isn't available to provide time since boot, yet.");
|
||||
warn!("This only happens immediately after a reboot.");
|
||||
return BusResponse::NotReadyYet;
|
||||
}
|
||||
let boot_time = boot_time.unwrap();
|
||||
let time_since_boot = Duration::from(boot_time);
|
||||
let five_minutes_ago =
|
||||
time_since_boot.saturating_sub(Duration::from_secs(300));
|
||||
let five_minutes_ago_nanoseconds = five_minutes_ago.as_nanos();
|
||||
|
||||
let mut full_list: Vec<FullList> = {
|
||||
THROUGHPUT_TRACKER.raw_data
|
||||
let boot_time = time_since_boot();
|
||||
if boot_time.is_err() {
|
||||
warn!("The Linux system clock isn't available to provide time since boot, yet.");
|
||||
warn!("This only happens immediately after a reboot.");
|
||||
return BusResponse::NotReadyYet;
|
||||
}
|
||||
let boot_time = boot_time.unwrap();
|
||||
let time_since_boot = Duration::from(boot_time);
|
||||
let five_minutes_ago =
|
||||
time_since_boot.saturating_sub(Duration::from_secs(300));
|
||||
let five_minutes_ago_nanoseconds = five_minutes_ago.as_nanos();
|
||||
|
||||
let mut full_list: Vec<FullList> = {
|
||||
THROUGHPUT_TRACKER.raw_data
|
||||
.iter()
|
||||
.filter(|v| !v.key().as_ip().is_loopback())
|
||||
.filter(|d| d.tc_handle.as_u32() == 0)
|
||||
.filter(|d| d.last_seen as u128 > five_minutes_ago_nanoseconds)
|
||||
.map(|te| {
|
||||
(
|
||||
*te.key(),
|
||||
te.bytes,
|
||||
te.packets,
|
||||
te.median_latency().unwrap_or(0.0),
|
||||
te.tc_handle,
|
||||
te.most_recent_cycle,
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
full_list.sort_by(|a, b| b.5.partial_cmp(&a.5).unwrap());
|
||||
let result = full_list
|
||||
.iter()
|
||||
.filter(|v| !v.key().as_ip().is_loopback())
|
||||
.filter(|d| d.tc_handle.as_u32() == 0)
|
||||
.filter(|d| d.last_seen as u128 > five_minutes_ago_nanoseconds)
|
||||
.map(|te| {
|
||||
(
|
||||
*te.key(),
|
||||
te.bytes,
|
||||
te.packets,
|
||||
te.median_latency().unwrap_or(0.0),
|
||||
te.tc_handle,
|
||||
te.most_recent_cycle,
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
full_list.sort_by(|a, b| b.5.partial_cmp(&a.5).unwrap());
|
||||
let result = full_list
|
||||
.iter()
|
||||
.map(
|
||||
|(
|
||||
ip,
|
||||
(bytes_dn, bytes_up),
|
||||
(packets_dn, packets_up),
|
||||
median_rtt,
|
||||
tc_handle,
|
||||
_last_seen,
|
||||
)| IpStats {
|
||||
ip_address: ip.as_ip().to_string(),
|
||||
circuit_id: String::new(),
|
||||
bits_per_second: (bytes_dn * 8, bytes_up * 8),
|
||||
packets_per_second: (*packets_dn, *packets_up),
|
||||
median_tcp_rtt: *median_rtt,
|
||||
tc_handle: *tc_handle,
|
||||
},
|
||||
)
|
||||
.collect();
|
||||
BusResponse::AllUnknownIps(result)
|
||||
}
|
||||
.map(
|
||||
|(
|
||||
ip,
|
||||
(bytes_dn, bytes_up),
|
||||
(packets_dn, packets_up),
|
||||
median_rtt,
|
||||
tc_handle,
|
||||
_last_seen,
|
||||
)| IpStats {
|
||||
ip_address: ip.as_ip().to_string(),
|
||||
circuit_id: String::new(),
|
||||
bits_per_second: (bytes_dn * 8, bytes_up * 8),
|
||||
packets_per_second: (*packets_dn, *packets_up),
|
||||
median_tcp_rtt: *median_rtt,
|
||||
tc_handle: *tc_handle,
|
||||
},
|
||||
)
|
||||
.collect();
|
||||
BusResponse::AllUnknownIps(result)
|
||||
}
|
||||
@@ -30,8 +30,6 @@ impl ThroughputTracker {
|
||||
|
||||
pub(crate) fn copy_previous_and_reset_rtt(&self) {
|
||||
// Copy previous byte/packet numbers and reset RTT data
|
||||
// We're using Rayon's "par_iter_mut" to spread the operation across
|
||||
// all CPU cores.
|
||||
let self_cycle = self.cycle.load(std::sync::atomic::Ordering::Relaxed);
|
||||
self.raw_data.iter_mut().for_each(|mut v| {
|
||||
if v.first_cycle < self_cycle {
|
||||
@@ -43,9 +41,10 @@ impl ThroughputTracker {
|
||||
u64::checked_sub(v.packets.0, v.prev_packets.0).unwrap_or(0);
|
||||
v.packets_per_second.1 =
|
||||
u64::checked_sub(v.packets.1, v.prev_packets.1).unwrap_or(0);
|
||||
v.prev_bytes = v.bytes;
|
||||
v.prev_packets = v.packets;
|
||||
}
|
||||
v.prev_bytes = v.bytes;
|
||||
v.prev_packets = v.packets;
|
||||
|
||||
// Roll out stale RTT data
|
||||
if self_cycle > RETIRE_AFTER_SECONDS
|
||||
&& v.last_fresh_rtt_data_cycle < self_cycle - RETIRE_AFTER_SECONDS
|
||||
@@ -195,17 +194,29 @@ impl ThroughputTracker {
|
||||
|
||||
#[inline(always)]
|
||||
fn add_atomic_tuple(tuple: &(AtomicU64, AtomicU64), n: (u64, u64)) {
|
||||
tuple.0.fetch_add(n.0, std::sync::atomic::Ordering::Relaxed);
|
||||
tuple.1.fetch_add(n.1, std::sync::atomic::Ordering::Relaxed);
|
||||
let n0 = tuple.0.load(std::sync::atomic::Ordering::Relaxed);
|
||||
if let Some(n) = n0.checked_add(n.0) {
|
||||
tuple.0.store(n, std::sync::atomic::Ordering::Relaxed);
|
||||
}
|
||||
|
||||
let n1 = tuple.1.load(std::sync::atomic::Ordering::Relaxed);
|
||||
if let Some(n) = n1.checked_add(n.1) {
|
||||
tuple.1.store(n, std::sync::atomic::Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn update_totals(&self) {
|
||||
let current_cycle = self.cycle.load(std::sync::atomic::Ordering::Relaxed);
|
||||
Self::set_atomic_tuple_to_zero(&self.bytes_per_second);
|
||||
Self::set_atomic_tuple_to_zero(&self.packets_per_second);
|
||||
Self::set_atomic_tuple_to_zero(&self.shaped_bytes_per_second);
|
||||
self
|
||||
.raw_data
|
||||
.iter()
|
||||
.filter(|v|
|
||||
v.most_recent_cycle == current_cycle &&
|
||||
v.first_cycle + 2 < current_cycle
|
||||
)
|
||||
.map(|v| {
|
||||
(
|
||||
v.bytes.0.saturating_sub(v.prev_bytes.0),
|
||||
|
||||
10
src/rust/lqstats/Cargo.toml
Normal file
10
src/rust/lqstats/Cargo.toml
Normal file
@@ -0,0 +1,10 @@
|
||||
[package]
|
||||
name = "lqstats"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "GPL-2.0-only"
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1", features = [ "full" ] }
|
||||
anyhow = "1"
|
||||
lqos_bus = { path = "../lqos_bus" }
|
||||
22
src/rust/lqstats/src/main.rs
Normal file
22
src/rust/lqstats/src/main.rs
Normal file
@@ -0,0 +1,22 @@
|
||||
use anyhow::Result;
|
||||
use lqos_bus::{bus_request, BusRequest, BusResponse, StatsRequest};
|
||||
|
||||
#[tokio::main(flavor = "current_thread")]
|
||||
pub async fn main() -> Result<()> {
|
||||
for resp in bus_request(vec![BusRequest::GetLongTermStats(StatsRequest::CurrentTotals)]).await? {
|
||||
if let BusResponse::LongTermTotals(stats) = resp {
|
||||
println!("{stats:?}");
|
||||
}
|
||||
}
|
||||
for resp in bus_request(vec![BusRequest::GetLongTermStats(StatsRequest::AllHosts)]).await? {
|
||||
if let BusResponse::LongTermHosts(stats) = resp {
|
||||
println!("{stats:?}");
|
||||
}
|
||||
}
|
||||
for resp in bus_request(vec![BusRequest::GetLongTermStats(StatsRequest::Tree)]).await? {
|
||||
if let BusResponse::LongTermTree(stats) = resp {
|
||||
println!("{stats:?}");
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -5,7 +5,7 @@ edition = "2021"
|
||||
license = "GPL-2.0-only"
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1", features = [ "rt", "macros", "net", "io-util", "time" ] }
|
||||
tokio = { version = "1", features = [ "full" ] }
|
||||
lqos_bus = { path = "../lqos_bus" }
|
||||
lqos_utils = { path = "../lqos_utils" }
|
||||
anyhow = "1"
|
||||
|
||||
24
src/rust/lts_client/Cargo.toml
Normal file
24
src/rust/lts_client/Cargo.toml
Normal file
@@ -0,0 +1,24 @@
|
||||
[package]
|
||||
name = "lts_client"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
lqos_config = { path = "../lqos_config" }
|
||||
lqos_utils = { path = "../lqos_utils" }
|
||||
uisp = { path = "../uisp" }
|
||||
dryoc = { version = "0.5", features = ["serde"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
thiserror = "1"
|
||||
tokio = { version = "1", features = [ "full" ] }
|
||||
serde_cbor = "0" # For RFC8949/7409 format C binary objects
|
||||
log = "0"
|
||||
bincode = "1"
|
||||
once_cell = "1"
|
||||
sysinfo = "0"
|
||||
num-traits = "0.2"
|
||||
miniz_oxide = "0.7.1"
|
||||
dashmap = "5.4"
|
||||
serde_json = "1"
|
||||
61
src/rust/lts_client/src/collector/collation/min_max.rs
Normal file
61
src/rust/lts_client/src/collector/collation/min_max.rs
Normal file
@@ -0,0 +1,61 @@
|
||||
use num_traits::{Bounded, CheckedDiv, NumCast, Zero};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct MinMaxAvg<T> {
|
||||
pub(crate) min: T,
|
||||
pub(crate) max: T,
|
||||
pub(crate) avg: T,
|
||||
}
|
||||
|
||||
impl<
|
||||
T: Bounded
|
||||
+ Zero
|
||||
+ std::ops::AddAssign<T>
|
||||
+ Copy
|
||||
+ std::cmp::Ord
|
||||
+ CheckedDiv
|
||||
+ NumCast,
|
||||
> MinMaxAvg<T>
|
||||
{
|
||||
pub(crate) fn from_slice(stats: &[T]) -> Self {
|
||||
let mut min = T::max_value();
|
||||
let mut max = T::min_value();
|
||||
let mut avg = T::zero();
|
||||
|
||||
stats.iter().for_each(|n| {
|
||||
avg += *n;
|
||||
min = T::min(min, *n);
|
||||
max = T::max(max, *n);
|
||||
});
|
||||
let len = T::from(stats.len()).unwrap();
|
||||
avg = avg.checked_div(&len).unwrap_or(T::zero());
|
||||
|
||||
Self { max, min, avg }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct MinMaxAvgPair<T> {
|
||||
pub(crate) down: MinMaxAvg<T>,
|
||||
pub(crate) up: MinMaxAvg<T>,
|
||||
}
|
||||
|
||||
impl<
|
||||
T: Bounded
|
||||
+ Zero
|
||||
+ std::ops::AddAssign<T>
|
||||
+ Copy
|
||||
+ std::cmp::Ord
|
||||
+ CheckedDiv
|
||||
+ NumCast,
|
||||
> MinMaxAvgPair<T>
|
||||
{
|
||||
pub(crate) fn from_slice(stats: &[(T, T)]) -> Self {
|
||||
let down: Vec<T> = stats.iter().map(|(down, _up)| *down).collect();
|
||||
let up: Vec<T> = stats.iter().map(|(_down, up)| *up).collect();
|
||||
Self {
|
||||
down: MinMaxAvg::<T>::from_slice(&down),
|
||||
up: MinMaxAvg::<T>::from_slice(&up),
|
||||
}
|
||||
}
|
||||
}
|
||||
152
src/rust/lts_client/src/collector/collation/mod.rs
Normal file
152
src/rust/lts_client/src/collector/collation/mod.rs
Normal file
@@ -0,0 +1,152 @@
|
||||
mod session_buffer;
|
||||
mod min_max;
|
||||
mod system_stats;
|
||||
use crate::{transport_data::{StatsHost, StatsSummary, StatsRttSummary, StatsTreeNode, StatsSubmission, StatsTotals}, submission_queue::{new_submission, comm_channel::SenderChannelMessage}};
|
||||
use self::min_max::{MinMaxAvgPair, MinMaxAvg};
|
||||
pub(crate) use session_buffer::{StatsSession, SESSION_BUFFER};
|
||||
use lqos_utils::unix_time::unix_now;
|
||||
use tokio::sync::mpsc::Sender;
|
||||
use std::{collections::HashMap, net::IpAddr};
|
||||
use super::{HostSummary, NetworkTreeEntry};
|
||||
|
||||
pub(crate) async fn collate_stats(comm_tx: Sender<SenderChannelMessage>) {
|
||||
let timestamp = unix_now().unwrap_or(0);
|
||||
if timestamp == 0 {
|
||||
return; // We're not ready
|
||||
}
|
||||
|
||||
let mut writer = SESSION_BUFFER.lock().await;
|
||||
if writer.is_empty() {
|
||||
return; // Nothing to do
|
||||
}
|
||||
|
||||
// Collate total stats for the period
|
||||
let bps: Vec<(u64, u64)> = writer
|
||||
.iter()
|
||||
.map(|e| e.throughput.bits_per_second)
|
||||
.collect();
|
||||
let pps: Vec<(u64, u64)> = writer
|
||||
.iter()
|
||||
.map(|e| e.throughput.packets_per_second)
|
||||
.collect();
|
||||
let sbps: Vec<(u64, u64)> = writer
|
||||
.iter()
|
||||
.map(|e| e.throughput.shaped_bits_per_second)
|
||||
.collect();
|
||||
let bits_per_second = MinMaxAvgPair::from_slice(&bps);
|
||||
let packets_per_second = MinMaxAvgPair::from_slice(&pps);
|
||||
let shaped_bits_per_second = MinMaxAvgPair::from_slice(&sbps);
|
||||
|
||||
// Iterate hosts gathering min/max data
|
||||
let mut hosts_accumulator: HashMap<IpAddr, Vec<&HostSummary>> = HashMap::new();
|
||||
let mut tree_accumulator: HashMap<String, Vec<(usize, &NetworkTreeEntry)>> = HashMap::new();
|
||||
writer.iter().for_each(|e| {
|
||||
e.throughput.hosts.iter().for_each(|host| {
|
||||
if let Some(hosts) = hosts_accumulator.get_mut(&host.ip) {
|
||||
hosts.push(host);
|
||||
} else {
|
||||
hosts_accumulator.insert(host.ip, vec![host]);
|
||||
}
|
||||
});
|
||||
|
||||
e.network_tree.iter().for_each(|(index, node)| {
|
||||
if let Some(t) = tree_accumulator.get_mut(&node.name) {
|
||||
t.push((*index, node));
|
||||
} else {
|
||||
tree_accumulator.insert(node.name.clone(), vec![(*index, node)]);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// Get min/max data per IP
|
||||
let mut stats_hosts = Vec::new();
|
||||
for (ip, host) in hosts_accumulator.into_iter() {
|
||||
let bits = MinMaxAvgPair::from_slice(
|
||||
&host
|
||||
.iter()
|
||||
.map(|h| (h.bits_per_second.0, h.bits_per_second.1))
|
||||
.collect::<Vec<(u64, u64)>>(),
|
||||
);
|
||||
let rtt = MinMaxAvg::from_slice(
|
||||
&host
|
||||
.iter()
|
||||
.map(|h| (h.median_rtt * 100.0) as u32)
|
||||
.collect::<Vec<u32>>(),
|
||||
);
|
||||
|
||||
let sh = StatsHost {
|
||||
ip_address: ip.to_string(),
|
||||
circuit_id: host[0].circuit_id.clone(),
|
||||
bits: StatsSummary{ min: (bits.down.min, bits.up.min), max: (bits.down.max, bits.up.max), avg: (bits.down.avg, bits.up.avg) },
|
||||
rtt: StatsRttSummary{ min: rtt.min, max: rtt.max, avg: rtt.avg },
|
||||
};
|
||||
stats_hosts.push(sh);
|
||||
}
|
||||
|
||||
// Get network tree min/max data
|
||||
let mut tree_entries = Vec::new();
|
||||
for (name, nodes) in tree_accumulator.into_iter() {
|
||||
let bits = MinMaxAvgPair::from_slice(
|
||||
&nodes
|
||||
.iter()
|
||||
.map(|(_i, n)| (n.current_throughput.0, n.current_throughput.1))
|
||||
.collect::<Vec<(u32, u32)>>(),
|
||||
);
|
||||
let rtt = MinMaxAvg::from_slice(
|
||||
&nodes
|
||||
.iter()
|
||||
.map(|(_i, n)| (n.rtts.2) as u32)
|
||||
.collect::<Vec<u32>>(),
|
||||
);
|
||||
|
||||
let n = StatsTreeNode {
|
||||
index: nodes[0].0,
|
||||
name: name.to_string(),
|
||||
max_throughput: nodes[0].1.max_throughput,
|
||||
current_throughput: StatsSummary{ min: (bits.down.min.into(), bits.up.min.into()), max: (bits.down.max.into(), bits.up.max.into()), avg: (bits.down.avg.into(), bits.up.avg.into()) },
|
||||
rtt: StatsRttSummary{ min: rtt.min, max: rtt.max, avg: rtt.avg },
|
||||
parents: nodes[0].1.parents.clone(),
|
||||
immediate_parent: nodes[0].1.immediate_parent,
|
||||
node_type: nodes[0].1.node_type.clone(),
|
||||
};
|
||||
tree_entries.push(n);
|
||||
}
|
||||
|
||||
// Obtain the CPU/RAM utilization
|
||||
let (cpu, ram) = system_stats::get_cpu_ram().await;
|
||||
|
||||
// Obtain queue stats
|
||||
let cake_stats = super::update_cake_stats().await;
|
||||
|
||||
|
||||
// Add to the submissions queue
|
||||
new_submission(StatsSubmission {
|
||||
timestamp,
|
||||
totals: Some(StatsTotals {
|
||||
bits: StatsSummary {
|
||||
min: (bits_per_second.down.min, bits_per_second.up.min),
|
||||
max: (bits_per_second.down.max, bits_per_second.up.max),
|
||||
avg: (bits_per_second.down.avg, bits_per_second.up.avg),
|
||||
},
|
||||
shaped_bits: StatsSummary {
|
||||
min: (shaped_bits_per_second.down.min, shaped_bits_per_second.up.min),
|
||||
max: (shaped_bits_per_second.down.max, shaped_bits_per_second.up.max),
|
||||
avg: (shaped_bits_per_second.down.avg, shaped_bits_per_second.up.avg),
|
||||
},
|
||||
packets: StatsSummary {
|
||||
min: (packets_per_second.down.min, packets_per_second.up.min),
|
||||
max: (packets_per_second.down.max, packets_per_second.up.max),
|
||||
avg: (packets_per_second.down.avg, packets_per_second.up.avg),
|
||||
},
|
||||
}),
|
||||
cpu_usage: Some(cpu),
|
||||
ram_percent: Some(ram),
|
||||
hosts: Some(stats_hosts),
|
||||
tree: Some(tree_entries),
|
||||
uisp_devices: None,
|
||||
cake_stats,
|
||||
}, comm_tx).await;
|
||||
|
||||
// Clear the collection buffer
|
||||
writer.clear();
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
use once_cell::sync::Lazy;
|
||||
use tokio::sync::Mutex;
|
||||
use crate::collector::{ThroughputSummary, network_tree::NetworkTreeEntry};
|
||||
|
||||
pub(crate) static SESSION_BUFFER: Lazy<Mutex<Vec<StatsSession>>> =
|
||||
Lazy::new(|| Mutex::new(Vec::new()));
|
||||
|
||||
pub(crate) struct StatsSession {
|
||||
pub(crate) throughput: ThroughputSummary,
|
||||
pub(crate) network_tree: Vec<(usize, NetworkTreeEntry)>,
|
||||
}
|
||||
24
src/rust/lts_client/src/collector/collation/system_stats.rs
Normal file
24
src/rust/lts_client/src/collector/collation/system_stats.rs
Normal file
@@ -0,0 +1,24 @@
|
||||
use once_cell::sync::Lazy;
|
||||
use sysinfo::{System, SystemExt};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
static SYS: Lazy<Mutex<System>> = Lazy::new(|| Mutex::new(System::new_all()));
|
||||
|
||||
pub(crate) async fn get_cpu_ram() -> (Vec<u32>, u32) {
|
||||
use sysinfo::CpuExt;
|
||||
let mut lock = SYS.lock().await;
|
||||
lock.refresh_cpu();
|
||||
lock.refresh_memory();
|
||||
|
||||
let cpus: Vec<u32> = lock
|
||||
.cpus()
|
||||
.iter()
|
||||
.map(|cpu| cpu.cpu_usage() as u32) // Always rounds down
|
||||
.collect();
|
||||
|
||||
let memory = (lock.used_memory() as f32 / lock.total_memory() as f32) * 100.0;
|
||||
|
||||
//println!("cpu: {:?}, ram: {}", cpus, memory);
|
||||
|
||||
(cpus, memory as u32)
|
||||
}
|
||||
132
src/rust/lts_client/src/collector/collection_manager.rs
Normal file
132
src/rust/lts_client/src/collector/collection_manager.rs
Normal file
@@ -0,0 +1,132 @@
|
||||
//! Provides a thread that runs in the background for `lqosd`. It is
|
||||
//! triggered whenever fresh throughput data is ready to be collected.
|
||||
//! The data is stored in a "session buffer", to be collated when the
|
||||
//! collation period timer fires.
|
||||
//!
|
||||
//! This is designed to ensure that even long averaging periods don't
|
||||
//! lose min/max values.
|
||||
|
||||
use super::StatsUpdateMessage;
|
||||
use crate::{collector::{collation::{collate_stats, StatsSession}, SESSION_BUFFER, uisp_ext::gather_uisp_data}, submission_queue::{enqueue_shaped_devices_if_allowed, comm_channel::{SenderChannelMessage, start_communication_channel}}};
|
||||
use lqos_config::EtcLqos;
|
||||
use once_cell::sync::Lazy;
|
||||
use std::{sync::atomic::AtomicU64, time::Duration};
|
||||
use tokio::sync::mpsc::{self, Receiver, Sender};
|
||||
use dashmap::DashSet;
|
||||
|
||||
static STATS_COUNTER: AtomicU64 = AtomicU64::new(0);
|
||||
pub(crate) static DEVICE_ID_LIST: Lazy<DashSet<String>> = Lazy::new(DashSet::new);
|
||||
|
||||
/// Launches the long-term statistics manager task. Returns immediately,
|
||||
/// because it creates the channel and then spawns listener threads.
|
||||
///
|
||||
/// Returns a channel that may be used to notify of data availability.
|
||||
pub async fn start_long_term_stats() -> Sender<StatsUpdateMessage> {
|
||||
let (update_tx, update_rx): (Sender<StatsUpdateMessage>, Receiver<StatsUpdateMessage>) = mpsc::channel(10);
|
||||
let (comm_tx, comm_rx): (Sender<SenderChannelMessage>, Receiver<SenderChannelMessage>) = mpsc::channel(10);
|
||||
|
||||
tokio::spawn(lts_manager(update_rx, comm_tx));
|
||||
tokio::spawn(collation_scheduler(update_tx.clone()));
|
||||
tokio::spawn(uisp_collection_manager(update_tx.clone()));
|
||||
tokio::spawn(start_communication_channel(comm_rx));
|
||||
|
||||
// Return the channel, for notifications
|
||||
update_tx
|
||||
}
|
||||
|
||||
async fn collation_scheduler(tx: Sender<StatsUpdateMessage>) {
|
||||
log::info!("Starting collation scheduler");
|
||||
loop {
|
||||
let collation_period = get_collation_period();
|
||||
log::info!("Collation period: {}s", collation_period.as_secs());
|
||||
if tx.send(StatsUpdateMessage::CollationTime).await.is_err() {
|
||||
log::warn!("Unable to send collation time message");
|
||||
}
|
||||
log::info!("Sent collation time message. Sleeping.");
|
||||
tokio::time::sleep(collation_period).await;
|
||||
log::info!("Collation scheduler woke up.");
|
||||
}
|
||||
}
|
||||
|
||||
async fn lts_manager(mut rx: Receiver<StatsUpdateMessage>, comm_tx: Sender<SenderChannelMessage>) {
|
||||
log::info!("Long-term stats gathering thread started");
|
||||
loop {
|
||||
let msg = rx.recv().await;
|
||||
match msg {
|
||||
Some(StatsUpdateMessage::ThroughputReady(throughput)) => {
|
||||
let counter = STATS_COUNTER.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
|
||||
if counter > 5 {
|
||||
log::info!("Enqueueing throughput data for collation");
|
||||
SESSION_BUFFER.lock().await.push(StatsSession {
|
||||
throughput: throughput.0,
|
||||
network_tree: throughput.1,
|
||||
});
|
||||
}
|
||||
}
|
||||
Some(StatsUpdateMessage::ShapedDevicesChanged(shaped_devices)) => {
|
||||
log::info!("Enqueueing shaped devices for collation");
|
||||
// Update the device id list
|
||||
DEVICE_ID_LIST.clear();
|
||||
shaped_devices.iter().for_each(|d| {
|
||||
DEVICE_ID_LIST.insert(d.device_id.clone());
|
||||
});
|
||||
tokio::spawn(enqueue_shaped_devices_if_allowed(shaped_devices, comm_tx.clone()));
|
||||
}
|
||||
Some(StatsUpdateMessage::CollationTime) => {
|
||||
log::info!("Collation time reached");
|
||||
tokio::spawn(collate_stats(comm_tx.clone()));
|
||||
}
|
||||
Some(StatsUpdateMessage::UispCollationTime) => {
|
||||
log::info!("UISP Collation time reached");
|
||||
tokio::spawn(gather_uisp_data(comm_tx.clone()));
|
||||
}
|
||||
Some(StatsUpdateMessage::Quit) => {
|
||||
// The daemon is exiting, terminate
|
||||
let _ = comm_tx.send(SenderChannelMessage::Quit).await;
|
||||
break;
|
||||
}
|
||||
None => {
|
||||
log::warn!("Long-term stats thread received a None message");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_collation_period() -> Duration {
|
||||
if let Ok(cfg) = EtcLqos::load() {
|
||||
if let Some(lts) = &cfg.long_term_stats {
|
||||
return Duration::from_secs(lts.collation_period_seconds.into());
|
||||
}
|
||||
}
|
||||
|
||||
Duration::from_secs(60)
|
||||
}
|
||||
|
||||
fn get_uisp_collation_period() -> Option<Duration> {
|
||||
if let Ok(cfg) = EtcLqos::load() {
|
||||
if let Some(lts) = &cfg.long_term_stats {
|
||||
return Some(Duration::from_secs(lts.uisp_reporting_interval_seconds.unwrap_or(300)));
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
async fn uisp_collection_manager(control_tx: Sender<StatsUpdateMessage>) {
|
||||
// Outer loop: If UISP is disabled, check hourly to see if it
|
||||
// was enabled. If it is enabled, start the inner loop.
|
||||
loop {
|
||||
// Inner loop - if there's a collation period set for UISP,
|
||||
// poll it.
|
||||
if let Some(period) = get_uisp_collation_period() {
|
||||
log::info!("Starting UISP poller with period {:?}", period);
|
||||
loop {
|
||||
control_tx.send(StatsUpdateMessage::UispCollationTime).await.unwrap();
|
||||
tokio::time::sleep(period).await;
|
||||
}
|
||||
} else {
|
||||
// Sleep for one hour - then we'll check again
|
||||
tokio::time::sleep(Duration::from_secs(3600)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
16
src/rust/lts_client/src/collector/mod.rs
Normal file
16
src/rust/lts_client/src/collector/mod.rs
Normal file
@@ -0,0 +1,16 @@
|
||||
//! Provides stats collection services for `lqosd`.
|
||||
|
||||
mod collection_manager;
|
||||
mod stats_availability;
|
||||
mod throughput_summary;
|
||||
mod collation;
|
||||
mod network_tree;
|
||||
mod uisp_ext;
|
||||
mod quick_drops;
|
||||
pub use stats_availability::StatsUpdateMessage;
|
||||
pub use collection_manager::start_long_term_stats;
|
||||
pub use throughput_summary::{ThroughputSummary, HostSummary};
|
||||
pub(crate) use collation::SESSION_BUFFER;
|
||||
pub use network_tree::NetworkTreeEntry;
|
||||
pub(crate) use quick_drops::*;
|
||||
pub use quick_drops::CakeStats;
|
||||
48
src/rust/lts_client/src/collector/network_tree.rs
Normal file
48
src/rust/lts_client/src/collector/network_tree.rs
Normal file
@@ -0,0 +1,48 @@
|
||||
use lqos_config::NetworkJsonNode;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct NetworkTreeEntry {
|
||||
pub name: String,
|
||||
pub max_throughput: (u32, u32),
|
||||
pub current_throughput: (u32, u32),
|
||||
pub rtts: (u16, u16, u16),
|
||||
pub parents: Vec<usize>,
|
||||
pub immediate_parent: Option<usize>,
|
||||
pub node_type: Option<String>,
|
||||
}
|
||||
|
||||
impl From<&NetworkJsonNode> for NetworkTreeEntry {
|
||||
fn from(value: &NetworkJsonNode) -> Self {
|
||||
let mut max = 0;
|
||||
let mut min = if value.rtts.is_empty() {
|
||||
0
|
||||
} else {
|
||||
u64::MAX
|
||||
};
|
||||
let mut sum: u64 = 0;
|
||||
let mut count = 0;
|
||||
for n in value.rtts.iter() {
|
||||
let n = *n as u64;
|
||||
if n > 0 {
|
||||
sum += n;
|
||||
if n < min { min = n; }
|
||||
if n > max { max = n; }
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
let avg = sum.checked_div(count).unwrap_or(0);
|
||||
|
||||
Self {
|
||||
name: value.name.clone(),
|
||||
max_throughput: value.max_throughput,
|
||||
parents: value.parents.clone(),
|
||||
immediate_parent: value.immediate_parent,
|
||||
current_throughput: (
|
||||
value.current_throughput.0.load(std::sync::atomic::Ordering::Relaxed) as u32,
|
||||
value.current_throughput.1.load(std::sync::atomic::Ordering::Relaxed) as u32,
|
||||
),
|
||||
node_type: value.node_type.clone(),
|
||||
rtts: (min as u16, max as u16, avg as u16),
|
||||
}
|
||||
}
|
||||
}
|
||||
17
src/rust/lts_client/src/collector/quick_drops/mod.rs
Normal file
17
src/rust/lts_client/src/collector/quick_drops/mod.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
//! Provides a quick'n'dirty 10 second snapshot of the TC queue
|
||||
//! status. This is used by the LTS system to provide a quick'n'dirty
|
||||
//! summary of drops and marks for the last 10 seconds.
|
||||
|
||||
mod queue_structure;
|
||||
mod retriever;
|
||||
mod stats_diff;
|
||||
pub(crate) use retriever::*;
|
||||
use serde::{Serialize, Deserialize};
|
||||
pub(crate) use stats_diff::*;
|
||||
|
||||
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct CakeStats {
|
||||
pub circuit_id: String,
|
||||
pub drops: u64,
|
||||
pub marks: u64,
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
mod tc_handle;
|
||||
mod queue_network;
|
||||
mod queue_node;
|
||||
use log::error;
|
||||
use queue_network::QueueNetwork;
|
||||
use thiserror::Error;
|
||||
pub(crate) use queue_node::QueueNode;
|
||||
|
||||
pub(crate) fn read_queueing_structure(
|
||||
) -> Result<Vec<QueueNode>, QueueStructureError> {
|
||||
// Note: the ? is allowed because the sub-types return a QueueStructureError and handle logging.
|
||||
let network = QueueNetwork::from_json()?;
|
||||
let flattened = network.to_flat();
|
||||
Ok(flattened)
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum QueueStructureError {
|
||||
#[error("unable to parse u64")]
|
||||
U64Parse(String),
|
||||
#[error("Unable to retrieve string from JSON")]
|
||||
StringParse(String),
|
||||
#[error("Unable to convert string to TC Handle")]
|
||||
TcHandle(String),
|
||||
#[error("Unable to convert string to u32 via hex")]
|
||||
HexParse(String),
|
||||
#[error("Error reading child circuit")]
|
||||
Circuit,
|
||||
#[error("Error reading child device")]
|
||||
Device,
|
||||
#[error("Error reading child's children")]
|
||||
Children,
|
||||
#[error("Unable to read configuration from /etc/lqos.conf")]
|
||||
LqosConf,
|
||||
#[error("Unable to access queueingStructure.json")]
|
||||
FileNotFound,
|
||||
#[error("Unable to read JSON")]
|
||||
JsonError,
|
||||
}
|
||||
@@ -0,0 +1,77 @@
|
||||
use super::{queue_node::QueueNode, QueueStructureError};
|
||||
use log::error;
|
||||
use lqos_config::EtcLqos;
|
||||
use serde_json::Value;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
pub struct QueueNetwork {
|
||||
pub(crate) cpu_node: Vec<QueueNode>,
|
||||
}
|
||||
|
||||
impl QueueNetwork {
|
||||
pub fn path() -> Result<PathBuf, QueueStructureError> {
|
||||
let cfg = EtcLqos::load();
|
||||
if cfg.is_err() {
|
||||
error!("unable to read /etc/lqos.conf");
|
||||
return Err(QueueStructureError::LqosConf);
|
||||
}
|
||||
let cfg = cfg.unwrap();
|
||||
let base_path = Path::new(&cfg.lqos_directory);
|
||||
Ok(base_path.join("queuingStructure.json"))
|
||||
}
|
||||
|
||||
fn exists() -> bool {
|
||||
if let Ok(path) = QueueNetwork::path() {
|
||||
path.exists()
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn from_json() -> Result<Self, QueueStructureError> {
|
||||
let path = QueueNetwork::path()?;
|
||||
if !QueueNetwork::exists() {
|
||||
error!("queueStructure.json does not exist yet. Try running LibreQoS?");
|
||||
return Err(QueueStructureError::FileNotFound);
|
||||
}
|
||||
let raw_string = std::fs::read_to_string(path)
|
||||
.map_err(|_| QueueStructureError::FileNotFound)?;
|
||||
let mut result = Self { cpu_node: Vec::new() };
|
||||
let json: Value = serde_json::from_str(&raw_string)
|
||||
.map_err(|_| QueueStructureError::FileNotFound)?;
|
||||
if let Value::Object(map) = &json {
|
||||
if let Some(network) = map.get("Network") {
|
||||
if let Value::Object(map) = network {
|
||||
for (key, value) in map.iter() {
|
||||
result.cpu_node.push(QueueNode::from_json(key, value)?);
|
||||
}
|
||||
} else {
|
||||
error!("Unable to parse JSON for queueStructure");
|
||||
return Err(QueueStructureError::JsonError);
|
||||
}
|
||||
} else {
|
||||
error!("Unable to parse JSON for queueStructure");
|
||||
return Err(QueueStructureError::JsonError);
|
||||
}
|
||||
} else {
|
||||
error!("Unable to parse JSON for queueStructure");
|
||||
return Err(QueueStructureError::JsonError);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn to_flat(&self) -> Vec<QueueNode> {
|
||||
let mut result = Vec::new();
|
||||
for cpu in self.cpu_node.iter() {
|
||||
result.push(cpu.clone());
|
||||
let children = cpu.to_flat();
|
||||
result.extend_from_slice(&children);
|
||||
}
|
||||
for c in result.iter_mut() {
|
||||
c.circuits.clear();
|
||||
c.devices.clear();
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,253 @@
|
||||
use super::{QueueStructureError, tc_handle::TcHandle};
|
||||
use log::error;
|
||||
use lqos_utils::hex_string::read_hex_string;
|
||||
use serde_json::Value;
|
||||
|
||||
#[derive(Default, Clone, Debug)]
|
||||
pub struct QueueNode {
|
||||
pub download_bandwidth_mbps: u64,
|
||||
pub upload_bandwidth_mbps: u64,
|
||||
pub download_bandwidth_mbps_min: u64,
|
||||
pub upload_bandwidth_mbps_min: u64,
|
||||
pub class_id: TcHandle,
|
||||
pub up_class_id: TcHandle,
|
||||
pub parent_class_id: TcHandle,
|
||||
pub up_parent_class_id: TcHandle,
|
||||
pub class_major: u32,
|
||||
pub up_class_major: u32,
|
||||
pub class_minor: u32,
|
||||
pub cpu_num: u32,
|
||||
pub up_cpu_num: u32,
|
||||
pub circuits: Vec<QueueNode>,
|
||||
pub circuit_id: Option<String>,
|
||||
pub circuit_name: Option<String>,
|
||||
pub parent_node: Option<String>,
|
||||
pub devices: Vec<QueueNode>,
|
||||
pub comment: String,
|
||||
pub device_id: Option<String>,
|
||||
pub device_name: Option<String>,
|
||||
pub mac: Option<String>,
|
||||
pub children: Vec<QueueNode>,
|
||||
}
|
||||
|
||||
/// Provides a convenient wrapper that attempts to decode a u64 from a JSON
|
||||
/// value, and returns an error if decoding fails.
|
||||
macro_rules! grab_u64 {
|
||||
($target: expr, $key: expr, $value: expr) => {
|
||||
let tmp = $value.as_u64().ok_or(QueueStructureError::U64Parse(format!("{} => {:?}", $key, $value)));
|
||||
match tmp {
|
||||
Err(e) => {
|
||||
error!("Error decoding JSON. Key: {}, Value: {:?} is not readily convertible to a u64.", $key, $value);
|
||||
return Err(e);
|
||||
}
|
||||
Ok(data) => $target = data,
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Provides a macro to safely unwrap TC Handles and issue an error if they didn't parse
|
||||
/// correctly.
|
||||
macro_rules! grab_tc_handle {
|
||||
($target: expr, $key: expr, $value: expr) => {
|
||||
let s = $value.as_str();
|
||||
if s.is_none() {
|
||||
error!("Unable to parse {:?} as a string from JSON", s);
|
||||
return Err(QueueStructureError::StringParse(format!("{:?}", $value)));
|
||||
}
|
||||
let s = s.unwrap();
|
||||
let tmp = TcHandle::from_string(s);
|
||||
if tmp.is_err() {
|
||||
error!("Unable to parse {:?} as a TC Handle", s);
|
||||
return Err(QueueStructureError::TcHandle(format!("{:?}", tmp)));
|
||||
}
|
||||
$target = tmp.unwrap();
|
||||
};
|
||||
}
|
||||
|
||||
/// Macro to convert hex strings (e.g. 0xff) to a u32
|
||||
macro_rules! grab_hex {
|
||||
($target: expr, $key: expr, $value: expr) => {
|
||||
let s = $value.as_str();
|
||||
if s.is_none() {
|
||||
error!("Unable to parse {:?} as a string from JSON", $value);
|
||||
return Err(QueueStructureError::StringParse(format!("{:?}", s)));
|
||||
}
|
||||
let s = s.unwrap();
|
||||
let tmp = read_hex_string(s);
|
||||
if tmp.is_err() {
|
||||
error!("Unable to parse {:?} as a hex string", $value);
|
||||
return Err(QueueStructureError::HexParse(format!("{:?}", tmp)));
|
||||
}
|
||||
$target = tmp.unwrap();
|
||||
};
|
||||
}
|
||||
|
||||
/// Macro to extract an option<string>
|
||||
macro_rules! grab_string_option {
|
||||
($target: expr, $key: expr, $value: expr) => {
|
||||
let s = $value.as_str();
|
||||
if s.is_none() {
|
||||
error!("Unable to parse {:?} as a string from JSON", $value);
|
||||
return Err(QueueStructureError::StringParse(format!("{:?}", s)));
|
||||
}
|
||||
$target = Some(s.unwrap().to_string());
|
||||
};
|
||||
}
|
||||
|
||||
/// Macro to extract a string
|
||||
macro_rules! grab_string {
|
||||
($target: expr, $key: expr, $value: expr) => {
|
||||
let s = $value.as_str();
|
||||
if s.is_none() {
|
||||
error!("Unable to parse {:?} as a string from JSON", $value);
|
||||
return Err(QueueStructureError::StringParse(format!("{:?}", s)));
|
||||
}
|
||||
$target = s.unwrap().to_string();
|
||||
};
|
||||
}
|
||||
|
||||
impl QueueNode {
|
||||
pub(crate) fn from_json(
|
||||
key: &str,
|
||||
value: &Value,
|
||||
) -> Result<Self, QueueStructureError> {
|
||||
let mut result = Self::default();
|
||||
if let Value::Object(map) = value {
|
||||
for (key, value) in map.iter() {
|
||||
match key.as_str() {
|
||||
"downloadBandwidthMbps" | "maxDownload" => {
|
||||
grab_u64!(result.download_bandwidth_mbps, key.as_str(), value);
|
||||
}
|
||||
"uploadBandwidthMbps" | "maxUpload" => {
|
||||
grab_u64!(result.upload_bandwidth_mbps, key.as_str(), value);
|
||||
}
|
||||
"downloadBandwidthMbpsMin" | "minDownload" => {
|
||||
grab_u64!(result.download_bandwidth_mbps_min, key.as_str(), value);
|
||||
}
|
||||
"uploadBandwidthMbpsMin" | "minUpload" => {
|
||||
grab_u64!(result.upload_bandwidth_mbps_min, key.as_str(), value);
|
||||
}
|
||||
"classid" => {
|
||||
grab_tc_handle!(result.class_id, key.as_str(), value);
|
||||
}
|
||||
"up_classid" => {
|
||||
grab_tc_handle!(result.up_class_id, key.as_str(), value);
|
||||
}
|
||||
"classMajor" => {
|
||||
grab_hex!(result.class_major, key.as_str(), value);
|
||||
}
|
||||
"up_classMajor" => {
|
||||
grab_hex!(result.up_class_major, key.as_str(), value);
|
||||
}
|
||||
"classMinor" => {
|
||||
grab_hex!(result.class_minor, key.as_str(), value);
|
||||
}
|
||||
"cpuNum" => {
|
||||
grab_hex!(result.cpu_num, key.as_str(), value);
|
||||
}
|
||||
"up_cpuNum" => {
|
||||
grab_hex!(result.up_cpu_num, key.as_str(), value);
|
||||
}
|
||||
"parentClassID" => {
|
||||
grab_tc_handle!(result.parent_class_id, key.as_str(), value);
|
||||
}
|
||||
"up_parentClassID" => {
|
||||
grab_tc_handle!(result.up_parent_class_id, key.as_str(), value);
|
||||
}
|
||||
"circuitId" | "circuitID" => {
|
||||
grab_string_option!(result.circuit_id, key.as_str(), value);
|
||||
}
|
||||
"circuitName" => {
|
||||
grab_string_option!(result.circuit_name, key.as_str(), value);
|
||||
}
|
||||
"parentNode" | "ParentNode" => {
|
||||
grab_string_option!(result.parent_node, key.as_str(), value);
|
||||
}
|
||||
"comment" => {
|
||||
grab_string!(result.comment, key.as_str(), value);
|
||||
}
|
||||
"deviceId" | "deviceID" => {
|
||||
grab_string_option!(result.device_id, key.as_str(), value);
|
||||
}
|
||||
"deviceName" => {
|
||||
grab_string_option!(result.device_name, key.as_str(), value);
|
||||
}
|
||||
"mac" => {
|
||||
grab_string_option!(result.mac, key.as_str(), value);
|
||||
}
|
||||
"ipv4s" => {} // Ignore
|
||||
"ipv6s" => {}
|
||||
"circuits" => {
|
||||
if let Value::Array(array) = value {
|
||||
for c in array.iter() {
|
||||
let n = QueueNode::from_json(key, c);
|
||||
if n.is_err() {
|
||||
error!("Unable to read circuit children");
|
||||
error!("{:?}", n);
|
||||
return Err(QueueStructureError::Circuit);
|
||||
}
|
||||
result.circuits.push(n.unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
"devices" => {
|
||||
if let Value::Array(array) = value {
|
||||
for c in array.iter() {
|
||||
let n = QueueNode::from_json(key, c);
|
||||
if n.is_err() {
|
||||
error!("Unable to read device children");
|
||||
error!("{:?}", n);
|
||||
return Err(QueueStructureError::Device);
|
||||
}
|
||||
result.devices.push(n.unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
"children" => {
|
||||
if let Value::Object(map) = value {
|
||||
for (key, c) in map.iter() {
|
||||
let n = QueueNode::from_json(key, c);
|
||||
if n.is_err() {
|
||||
error!("Unable to read children. Don't worry, we all feel that way sometimes.");
|
||||
error!("{:?}", n);
|
||||
return Err(QueueStructureError::Children);
|
||||
}
|
||||
result.circuits.push(n.unwrap());
|
||||
}
|
||||
} else {
|
||||
log::warn!("Children was not an object");
|
||||
log::warn!("{:?}", value);
|
||||
}
|
||||
}
|
||||
"idForCircuitsWithoutParentNodes" | "type" => {
|
||||
// Ignore
|
||||
}
|
||||
_ => log::error!("I don't know how to parse key: [{key}]"),
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log::warn!("Unable to parse node structure for [{key}]");
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub(crate) fn to_flat(&self) -> Vec<QueueNode> {
|
||||
let mut result = Vec::new();
|
||||
for c in self.circuits.iter() {
|
||||
result.push(c.clone());
|
||||
let children = c.to_flat();
|
||||
result.extend_from_slice(&children);
|
||||
}
|
||||
for c in self.devices.iter() {
|
||||
result.push(c.clone());
|
||||
let children = c.to_flat();
|
||||
result.extend_from_slice(&children);
|
||||
}
|
||||
for c in self.children.iter() {
|
||||
result.push(c.clone());
|
||||
let children = c.to_flat();
|
||||
result.extend_from_slice(&children);
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,69 @@
|
||||
use log::error;
|
||||
use lqos_utils::hex_string::read_hex_string;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use thiserror::Error;
|
||||
|
||||
/// Provides consistent handling of TC handle types.
|
||||
#[derive(
|
||||
Copy, Clone, Serialize, Deserialize, Debug, Default, PartialEq, Eq, Hash
|
||||
)]
|
||||
pub struct TcHandle(u32);
|
||||
|
||||
const TC_H_ROOT: u32 = 4294967295;
|
||||
const TC_H_UNSPEC: u32 = 0;
|
||||
|
||||
impl TcHandle {
|
||||
/// Returns the TC handle as two values, indicating major and minor
|
||||
/// TC handle values.
|
||||
#[inline(always)]
|
||||
pub fn get_major_minor(&self) -> (u16, u16) {
|
||||
// According to xdp_pping.c handles are minor:major u16s inside
|
||||
// a u32.
|
||||
((self.0 >> 16) as u16, (self.0 & 0xFFFF) as u16)
|
||||
}
|
||||
|
||||
/// Build a TC handle from a string. This is actually a complicated
|
||||
/// operation, since it has to handle "root" and other strings as well
|
||||
/// as simple "1:2" mappings. Calls a C function to handle this gracefully.
|
||||
pub fn from_string(
|
||||
handle: &str,
|
||||
) -> Result<Self, TcHandleParseError> {
|
||||
let handle = handle.trim();
|
||||
match handle {
|
||||
"root" => Ok(Self(TC_H_ROOT)),
|
||||
"none" => Ok(Self(TC_H_UNSPEC)),
|
||||
_ => {
|
||||
if !handle.contains(':') {
|
||||
if let Ok(major) = read_hex_string(handle) {
|
||||
let minor = 0;
|
||||
return Ok(Self((major << 16) | minor));
|
||||
} else {
|
||||
error!("Unable to parse TC handle {handle}. Must contain a colon.");
|
||||
return Err(TcHandleParseError::InvalidInput(handle.to_string()));
|
||||
}
|
||||
}
|
||||
let parts: Vec<&str> = handle.split(':').collect();
|
||||
let major = read_hex_string(parts[0]).map_err(|_| TcHandleParseError::InvalidInput(handle.to_string()))?;
|
||||
let minor = read_hex_string(parts[1]).map_err(|_| TcHandleParseError::InvalidInput(handle.to_string()))?;
|
||||
if major >= (1<<16) || minor >= (1<<16) {
|
||||
return Err(TcHandleParseError::InvalidInput(handle.to_string()));
|
||||
}
|
||||
Ok(Self((major << 16) | minor))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ToString for TcHandle {
|
||||
fn to_string(&self) -> String {
|
||||
let (major, minor) = self.get_major_minor();
|
||||
format!("{major:x}:{minor:x}")
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum TcHandleParseError {
|
||||
#[error("Invalid input")]
|
||||
InvalidInput(String),
|
||||
}
|
||||
|
||||
171
src/rust/lts_client/src/collector/quick_drops/retriever.rs
Normal file
171
src/rust/lts_client/src/collector/quick_drops/retriever.rs
Normal file
@@ -0,0 +1,171 @@
|
||||
//! Async reader/parser for tc -s -j qdisc show dev (whatever)
|
||||
use thiserror::Error;
|
||||
use tokio::process::Command;
|
||||
pub use crate::collector::CakeStats;
|
||||
use super::queue_structure::{read_queueing_structure, QueueNode};
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub(crate) enum AsyncQueueReaderMessage {
|
||||
#[error("Unable to figure out the current queue structure")]
|
||||
QueueStructure,
|
||||
#[error("Unable to query the interface with tc")]
|
||||
FetchRawFail,
|
||||
#[error("Unable to fetch stdout")]
|
||||
FetchStdout,
|
||||
#[error("JSON decode error")]
|
||||
JsonDecode,
|
||||
}
|
||||
|
||||
pub(crate) struct AsyncQueueReader {
|
||||
pub(crate) interface: String,
|
||||
}
|
||||
|
||||
impl AsyncQueueReader {
|
||||
pub(crate) fn new<S: ToString>(interface: S) -> Self {
|
||||
Self {
|
||||
interface: interface.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn run(&self) -> Result<Option<Vec<CakeStats>>, AsyncQueueReaderMessage> {
|
||||
let mut result = None;
|
||||
if let Ok(queue_map) =
|
||||
read_queueing_structure().map_err(|_| AsyncQueueReaderMessage::QueueStructure)
|
||||
{
|
||||
if let Ok(raw) = self.fetch_raw().await {
|
||||
let stats = self.quick_parse(&raw, &queue_map).await?;
|
||||
result = Some(stats);
|
||||
} else {
|
||||
log::error!("Unable to fetch raw tc output");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub(crate) async fn run_on_a_stick(&self) -> Result<(Option<Vec<CakeStats>>, Option<Vec<CakeStats>>), AsyncQueueReaderMessage> {
|
||||
let mut result = (None, None);
|
||||
if let Ok(queue_map) =
|
||||
read_queueing_structure().map_err(|_| AsyncQueueReaderMessage::QueueStructure)
|
||||
{
|
||||
if let Ok(raw) = self.fetch_raw().await {
|
||||
let stats = self.quick_parse_stick(&raw, &queue_map).await?;
|
||||
result = (Some(stats.0), Some(stats.1));
|
||||
} else {
|
||||
log::error!("Unable to fetch raw tc output");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn fetch_raw(&self) -> Result<String, AsyncQueueReaderMessage> {
|
||||
let command_output = Command::new("/sbin/tc")
|
||||
.args(["-s", "-j", "qdisc", "show", "dev", &self.interface])
|
||||
.output()
|
||||
.await
|
||||
.map_err(|_| AsyncQueueReaderMessage::FetchRawFail)?;
|
||||
let json = String::from_utf8(command_output.stdout)
|
||||
.map_err(|_| AsyncQueueReaderMessage::FetchStdout)?;
|
||||
Ok(json)
|
||||
}
|
||||
|
||||
async fn quick_parse(&self, raw: &str, structure: &[QueueNode]) -> Result<Vec<CakeStats>, AsyncQueueReaderMessage> {
|
||||
let mut result = Vec::with_capacity(structure.len());
|
||||
|
||||
let json = serde_json::from_str::<serde_json::Value>(raw)
|
||||
.map_err(|_| AsyncQueueReaderMessage::JsonDecode)?;
|
||||
|
||||
if let Some(array) = json.as_array() {
|
||||
for entry in array.iter() {
|
||||
if let Some(map) = entry.as_object() {
|
||||
if let (Some(kind), Some(handle)) =
|
||||
(map.get_key_value("kind"), map.get_key_value("parent"))
|
||||
{
|
||||
if let (Some("cake"), Some(handle)) = (kind.1.as_str(), handle.1.as_str()) {
|
||||
structure.iter().for_each(|node| {
|
||||
if node.class_id.to_string() == handle.to_string() {
|
||||
if let Some(circuit_id) = &node.circuit_id {
|
||||
let mut stats = CakeStats {
|
||||
circuit_id: circuit_id.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
if let Some(serde_json::Value::Number(drops)) = map.get("drops") {
|
||||
stats.drops = drops.as_u64().unwrap_or(0);
|
||||
}
|
||||
if let Some(serde_json::Value::Number(marks)) = map.get("ecn_mark") {
|
||||
stats.marks = marks.as_u64().unwrap_or(0);
|
||||
}
|
||||
result.push(stats);
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Be good async citizens and don't eat the CPU
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn quick_parse_stick(&self, raw: &str, structure: &[QueueNode]) -> Result<(Vec<CakeStats>, Vec<CakeStats>), AsyncQueueReaderMessage> {
|
||||
let mut down = Vec::with_capacity(structure.len());
|
||||
let mut up = Vec::with_capacity(structure.len());
|
||||
|
||||
let json = serde_json::from_str::<serde_json::Value>(raw)
|
||||
.map_err(|_| AsyncQueueReaderMessage::JsonDecode)?;
|
||||
|
||||
if let Some(array) = json.as_array() {
|
||||
for entry in array.iter() {
|
||||
if let Some(map) = entry.as_object() {
|
||||
if let (Some(kind), Some(handle)) =
|
||||
(map.get_key_value("kind"), map.get_key_value("parent"))
|
||||
{
|
||||
if let (Some("cake"), Some(handle)) = (kind.1.as_str(), handle.1.as_str()) {
|
||||
structure.iter().for_each(|node| {
|
||||
if node.class_id.to_string() == handle.to_string() {
|
||||
if let Some(circuit_id) = &node.circuit_id {
|
||||
let mut stats = CakeStats {
|
||||
circuit_id: circuit_id.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
if let Some(serde_json::Value::Number(drops)) = map.get("drops") {
|
||||
stats.drops = drops.as_u64().unwrap_or(0);
|
||||
}
|
||||
if let Some(serde_json::Value::Number(marks)) = map.get("ecn_mark") {
|
||||
stats.marks = marks.as_u64().unwrap_or(0);
|
||||
}
|
||||
down.push(stats);
|
||||
}
|
||||
} else if node.up_class_id.to_string() == handle.to_string() {
|
||||
if let Some(circuit_id) = &node.circuit_id {
|
||||
let mut stats = CakeStats {
|
||||
circuit_id: circuit_id.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
if let Some(serde_json::Value::Number(drops)) = map.get("drops") {
|
||||
stats.drops = drops.as_u64().unwrap_or(0);
|
||||
}
|
||||
if let Some(serde_json::Value::Number(marks)) = map.get("ecn_mark") {
|
||||
stats.marks = marks.as_u64().unwrap_or(0);
|
||||
}
|
||||
up.push(stats);
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Be good async citizens and don't eat the CPU
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
}
|
||||
|
||||
Ok((down, up))
|
||||
}
|
||||
}
|
||||
76
src/rust/lts_client/src/collector/quick_drops/stats_diff.rs
Normal file
76
src/rust/lts_client/src/collector/quick_drops/stats_diff.rs
Normal file
@@ -0,0 +1,76 @@
|
||||
use tokio::sync::Mutex;
|
||||
use once_cell::sync::Lazy;
|
||||
use super::CakeStats;
|
||||
|
||||
static CAKE_TRACKER: Lazy<Mutex<CakeTracker>> = Lazy::new(|| Mutex::new(CakeTracker::new()));
|
||||
|
||||
pub(crate) async fn update_cake_stats() -> Option<(Vec<CakeStats>, Vec<CakeStats>)> {
|
||||
let mut tracker = CAKE_TRACKER.lock().await;
|
||||
tracker.update().await
|
||||
}
|
||||
|
||||
pub(crate) struct CakeTracker {
|
||||
prev: Option<(Vec<CakeStats>, Vec<CakeStats>)>,
|
||||
current: Option<(Vec<CakeStats>, Vec<CakeStats>)>,
|
||||
}
|
||||
|
||||
impl CakeTracker {
|
||||
pub(crate) fn new() -> Self {
|
||||
Self {
|
||||
prev: None,
|
||||
current: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn update(&mut self) -> Option<(Vec<CakeStats>, Vec<CakeStats>)> {
|
||||
if let Ok(cfg) = lqos_config::LibreQoSConfig::load() {
|
||||
let outbound = &cfg.internet_interface;
|
||||
let inbound = &cfg.isp_interface;
|
||||
if cfg.on_a_stick_mode {
|
||||
let reader = super::AsyncQueueReader::new(outbound);
|
||||
if let Ok((Some(up), Some(down))) = reader.run_on_a_stick().await {
|
||||
return self.read_up_down(up, down);
|
||||
}
|
||||
} else {
|
||||
let out_reader = super::AsyncQueueReader::new(outbound);
|
||||
let in_reader = super::AsyncQueueReader::new(inbound);
|
||||
let (up, down) = tokio::join!(
|
||||
out_reader.run(),
|
||||
in_reader.run(),
|
||||
);
|
||||
if let (Ok(Some(up)), Ok(Some(down))) = (up, down) {
|
||||
return self.read_up_down(up, down);
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn read_up_down(&mut self, up: Vec<CakeStats>, down: Vec<CakeStats>) -> Option<(Vec<CakeStats>, Vec<CakeStats>)> {
|
||||
if self.prev.is_none() {
|
||||
self.prev = Some((up, down));
|
||||
None
|
||||
} else {
|
||||
// Delta time
|
||||
if let Some((down, up)) = &mut self.current {
|
||||
down.iter_mut().for_each(|d| {
|
||||
if let Some(prev) = self.prev.as_ref().unwrap().0.iter().find(|p| p.circuit_id == d.circuit_id) {
|
||||
d.drops = d.drops.saturating_sub(prev.drops);
|
||||
d.marks = d.marks.saturating_sub(prev.marks);
|
||||
}
|
||||
});
|
||||
up.iter_mut().for_each(|d| {
|
||||
if let Some(prev) = self.prev.as_ref().unwrap().1.iter().find(|p| p.circuit_id == d.circuit_id) {
|
||||
d.drops = d.drops.saturating_sub(prev.drops);
|
||||
d.marks = d.marks.saturating_sub(prev.marks);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Advance the previous
|
||||
self.prev = self.current.take();
|
||||
|
||||
Some((up, down))
|
||||
}
|
||||
}
|
||||
}
|
||||
21
src/rust/lts_client/src/collector/stats_availability.rs
Normal file
21
src/rust/lts_client/src/collector/stats_availability.rs
Normal file
@@ -0,0 +1,21 @@
|
||||
//! Message type to be sent to the long-term stats thread when
|
||||
//! data is available.
|
||||
|
||||
use lqos_config::ShapedDevice;
|
||||
|
||||
use super::{ThroughputSummary, network_tree::NetworkTreeEntry};
|
||||
|
||||
#[derive(Debug)]
|
||||
/// Messages to/from the stats collection thread
|
||||
pub enum StatsUpdateMessage {
|
||||
/// Fresh throughput stats have been collected
|
||||
ThroughputReady(Box<(ThroughputSummary, Vec<(usize, NetworkTreeEntry)>)>),
|
||||
/// ShapedDevices.csv has changed and the server needs new data
|
||||
ShapedDevicesChanged(Vec<ShapedDevice>),
|
||||
/// It's time to collate the session buffer
|
||||
CollationTime,
|
||||
/// The daemon is exiting
|
||||
Quit,
|
||||
/// Time to gather UISP data
|
||||
UispCollationTime,
|
||||
}
|
||||
23
src/rust/lts_client/src/collector/throughput_summary.rs
Normal file
23
src/rust/lts_client/src/collector/throughput_summary.rs
Normal file
@@ -0,0 +1,23 @@
|
||||
//! Provides an interface for collecting data from the throughput
|
||||
//! tracker in `lqosd` and submitting it into the long-term stats
|
||||
//! system.
|
||||
//!
|
||||
//! Note that ThroughputSummary should be boxed, to avoid copying
|
||||
|
||||
use std::net::IpAddr;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ThroughputSummary {
|
||||
pub bits_per_second: (u64, u64),
|
||||
pub shaped_bits_per_second: (u64, u64),
|
||||
pub packets_per_second: (u64, u64),
|
||||
pub hosts: Vec<HostSummary>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct HostSummary {
|
||||
pub ip: IpAddr,
|
||||
pub circuit_id: Option<String>,
|
||||
pub bits_per_second: (u64, u64),
|
||||
pub median_rtt: f32,
|
||||
}
|
||||
43
src/rust/lts_client/src/collector/uisp_ext.rs
Normal file
43
src/rust/lts_client/src/collector/uisp_ext.rs
Normal file
@@ -0,0 +1,43 @@
|
||||
use lqos_utils::unix_time::unix_now;
|
||||
use tokio::sync::mpsc::Sender;
|
||||
use crate::{submission_queue::{comm_channel::SenderChannelMessage, new_submission}, transport_data::{StatsSubmission, UispExtDevice}, collector::collection_manager::DEVICE_ID_LIST};
|
||||
|
||||
pub(crate) async fn gather_uisp_data(comm_tx: Sender<SenderChannelMessage>) {
|
||||
log::info!("Gathering UISP Data for Long-Term Stats");
|
||||
let timestamp = unix_now().unwrap_or(0);
|
||||
if timestamp == 0 {
|
||||
return; // We're not ready
|
||||
}
|
||||
|
||||
if let Ok(config) = lqos_config::LibreQoSConfig::load() {
|
||||
if let Ok(devices) = uisp::load_all_devices_with_interfaces(config).await {
|
||||
log::info!("Loaded {} UISP devices", devices.len());
|
||||
|
||||
// Collate the data
|
||||
let uisp_devices: Vec<UispExtDevice> = devices
|
||||
.into_iter()
|
||||
.filter(|d| DEVICE_ID_LIST.contains(&d.identification.id))
|
||||
.map(|device| device.into())
|
||||
.collect();
|
||||
log::info!("Retained {} relevant UISP devices", uisp_devices.len());
|
||||
|
||||
// Build a queue message containing just UISP info
|
||||
// Submit it
|
||||
let submission = StatsSubmission {
|
||||
timestamp,
|
||||
totals: None,
|
||||
hosts: None,
|
||||
tree: None,
|
||||
cpu_usage: None,
|
||||
ram_percent: None,
|
||||
uisp_devices: Some(uisp_devices),
|
||||
cake_stats: None,
|
||||
};
|
||||
new_submission(submission, comm_tx).await;
|
||||
} else {
|
||||
log::warn!("Unable to load UISP devices");
|
||||
}
|
||||
} else {
|
||||
log::warn!("UISP data collection requested, but no LibreQoS configuration found");
|
||||
}
|
||||
}
|
||||
29
src/rust/lts_client/src/lib.rs
Normal file
29
src/rust/lts_client/src/lib.rs
Normal file
@@ -0,0 +1,29 @@
|
||||
//! Shared data and functionality for the long-term statistics system.
|
||||
|
||||
/// Transport data and helpers for the long-term statistics system.
|
||||
pub mod transport_data;
|
||||
|
||||
/// Shared public key infrastructure data and functionality for the long-term statistics system.
|
||||
pub mod pki;
|
||||
|
||||
/// Collection system for `lqosd`
|
||||
pub mod collector;
|
||||
|
||||
/// Submissions system for `lqosd`
|
||||
pub mod submission_queue;
|
||||
pub use collector::CakeStats;
|
||||
|
||||
/// Re-export bincode
|
||||
pub mod bincode {
|
||||
pub use bincode::*;
|
||||
}
|
||||
|
||||
/// Re-export CBOR
|
||||
pub mod cbor {
|
||||
pub use serde_cbor::*;
|
||||
}
|
||||
|
||||
/// Re-export dryocbox
|
||||
pub mod dryoc {
|
||||
pub use dryoc::*;
|
||||
}
|
||||
57
src/rust/lts_client/src/pki/keygen.rs
Normal file
57
src/rust/lts_client/src/pki/keygen.rs
Normal file
@@ -0,0 +1,57 @@
|
||||
use dryoc::dryocbox::*;
|
||||
|
||||
/// Genereate a new keypair and store it in a file. If the file exists,
|
||||
/// it will be loaded rather than re-generated.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `key_path` - The path to the file to store the keypair in
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The generated or loaded keypair
|
||||
pub fn generate_new_keypair() -> KeyPair {
|
||||
let keypair = KeyPair::gen();
|
||||
log::info!("Generated new keypair");
|
||||
keypair
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use dryoc::dryocbox::*;
|
||||
|
||||
#[test]
|
||||
fn test_sealed_box_roundtrip() {
|
||||
let sender_keypair = KeyPair::gen();
|
||||
let recipient_keypair = KeyPair::gen();
|
||||
let nonce = Nonce::gen();
|
||||
let message = b"Once upon a time, there was a man with a dream.";
|
||||
let dryocbox = DryocBox::encrypt_to_vecbox(
|
||||
message,
|
||||
&nonce,
|
||||
&recipient_keypair.public_key,
|
||||
&sender_keypair.secret_key,
|
||||
)
|
||||
.expect("unable to encrypt");
|
||||
|
||||
let sodium_box = dryocbox.to_vec();
|
||||
let dryocbox = DryocBox::from_bytes(&sodium_box).expect("failed to read box");
|
||||
let decrypted = dryocbox
|
||||
.decrypt_to_vec(
|
||||
&nonce,
|
||||
&sender_keypair.public_key,
|
||||
&recipient_keypair.secret_key,
|
||||
)
|
||||
.expect("unable to decrypt");
|
||||
|
||||
assert_eq!(message, decrypted.as_slice());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_keypair() {
|
||||
let keypair = KeyPair::gen();
|
||||
let serialized = bincode::serialize(&keypair).unwrap();
|
||||
let deserialized : KeyPair = bincode::deserialize(&serialized).unwrap();
|
||||
assert_eq!(keypair, deserialized);
|
||||
}
|
||||
}
|
||||
4
src/rust/lts_client/src/pki/mod.rs
Normal file
4
src/rust/lts_client/src/pki/mod.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
//! Shared public-key data for the license server, long-terms stats
|
||||
//! node and the client.
|
||||
mod keygen;
|
||||
pub use keygen::*;
|
||||
135
src/rust/lts_client/src/submission_queue/comm_channel/encode.rs
Normal file
135
src/rust/lts_client/src/submission_queue/comm_channel/encode.rs
Normal file
@@ -0,0 +1,135 @@
|
||||
use dryoc::{dryocbox::{Nonce, DryocBox}, types::{NewByteArray, ByteArray}};
|
||||
use lqos_config::EtcLqos;
|
||||
use thiserror::Error;
|
||||
use crate::{transport_data::{LtsCommand, NodeIdAndLicense, HelloVersion2}, submission_queue::queue::QueueError};
|
||||
use super::keys::{SERVER_PUBLIC_KEY, KEYPAIR};
|
||||
|
||||
pub(crate) async fn encode_submission_hello(license_key: &str, node_id: &str, node_name: &str) -> Result<Vec<u8>, QueueError> {
|
||||
let mut result = Vec::new();
|
||||
|
||||
// Build the body
|
||||
let hello_message = HelloVersion2 {
|
||||
license_key: license_key.to_string(),
|
||||
node_id: node_id.to_string(),
|
||||
node_name: node_name.to_string(),
|
||||
client_public_key: KEYPAIR.read().await.public_key.clone().to_vec(),
|
||||
};
|
||||
|
||||
// Add the version
|
||||
result.extend(2u16.to_be_bytes());
|
||||
|
||||
// Pad to 32-bit boundary
|
||||
result.extend(3u16.to_be_bytes());
|
||||
|
||||
// Serialize the body
|
||||
let hello_bytes = serde_cbor::to_vec(&hello_message).map_err(|_| QueueError::SendFail)?;
|
||||
|
||||
// Add the length
|
||||
result.extend((hello_bytes.len() as u64).to_be_bytes());
|
||||
|
||||
// Add the body
|
||||
result.extend(hello_bytes);
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, Error)]
|
||||
pub enum SubmissionDecodeError {
|
||||
#[error("Invalid version")]
|
||||
InvalidVersion,
|
||||
#[error("Invalid padding")]
|
||||
InvalidPadding,
|
||||
#[error("Failed to deserialize")]
|
||||
Deserialize,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn decode_submission_hello(bytes: &[u8]) -> Result<HelloVersion2, SubmissionDecodeError> {
|
||||
let version = u16::from_be_bytes([bytes[0], bytes[1]]);
|
||||
if version != 2 {
|
||||
log::error!("Received an invalid version from the server: {}", version);
|
||||
return Err(SubmissionDecodeError::InvalidVersion);
|
||||
}
|
||||
let padding = u16::from_be_bytes([bytes[2], bytes[3]]);
|
||||
if padding != 3 {
|
||||
log::error!("Received an invalid padding from the server: {}", padding);
|
||||
return Err(SubmissionDecodeError::InvalidPadding);
|
||||
}
|
||||
let size = u64::from_be_bytes([bytes[4], bytes[5], bytes[6], bytes[7], bytes[8], bytes[9], bytes[10], bytes[11]]);
|
||||
let hello_bytes = &bytes[12..12 + size as usize];
|
||||
let hello: HelloVersion2 = serde_cbor::from_slice(hello_bytes).map_err(|_| SubmissionDecodeError::Deserialize)?;
|
||||
|
||||
Ok(hello)
|
||||
}
|
||||
|
||||
pub(crate) async fn encode_submission(submission: &LtsCommand) -> Result<Vec<u8>, QueueError> {
|
||||
let nonce = Nonce::gen();
|
||||
let mut result = Vec::new();
|
||||
|
||||
// Store the version as network order
|
||||
result.extend(1u16.to_be_bytes());
|
||||
|
||||
// Pack the license key and node id into a header
|
||||
let header = get_license_key_and_node_id(&nonce)?;
|
||||
let header_bytes = serde_cbor::to_vec(&header).map_err(|_| QueueError::SendFail)?;
|
||||
|
||||
// Store the size of the header and the header
|
||||
result.extend((header_bytes.len() as u64).to_be_bytes());
|
||||
result.extend(header_bytes);
|
||||
|
||||
// Pack the submission body into bytes
|
||||
let payload_bytes = serde_cbor::to_vec(&submission).map_err(|_| QueueError::SendFail)?;
|
||||
|
||||
// TODO: Compress it?
|
||||
let payload_bytes = miniz_oxide::deflate::compress_to_vec(&payload_bytes, 8);
|
||||
|
||||
// Encrypt it
|
||||
let remote_public = SERVER_PUBLIC_KEY.read().await.clone().unwrap();
|
||||
let my_private = KEYPAIR.read().await.secret_key.clone();
|
||||
let dryocbox = DryocBox::encrypt_to_vecbox(
|
||||
&payload_bytes,
|
||||
&nonce,
|
||||
&remote_public,
|
||||
&my_private,
|
||||
).map_err(|_| QueueError::SendFail)?;
|
||||
let encrypted_bytes = dryocbox.to_vec();
|
||||
|
||||
// Store the size of the submission
|
||||
result.extend((encrypted_bytes.len() as u64).to_be_bytes());
|
||||
result.extend(encrypted_bytes);
|
||||
|
||||
// Store the encrypted, zipped submission itself
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn get_license_key_and_node_id(nonce: &Nonce) -> Result<NodeIdAndLicense, QueueError> {
|
||||
let cfg = EtcLqos::load().map_err(|_| QueueError::SendFail)?;
|
||||
if let Some(node_id) = cfg.node_id {
|
||||
if let Some(lts) = &cfg.long_term_stats {
|
||||
if let Some(license_key) = <s.license_key {
|
||||
return Ok(NodeIdAndLicense {
|
||||
node_id,
|
||||
license_key: license_key.clone(),
|
||||
nonce: *nonce.as_array(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(QueueError::SendFail)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
#[tokio::test]
|
||||
async fn hello_submission_roundtrip() {
|
||||
let license_key = "1234567890";
|
||||
let node_id = "node_id";
|
||||
let node_name = "node_name";
|
||||
let hello = super::encode_submission_hello(license_key, node_id, node_name).await.unwrap();
|
||||
let hello = super::decode_submission_hello(&hello).unwrap();
|
||||
assert_eq!(hello.license_key, license_key);
|
||||
assert_eq!(hello.node_id, node_id);
|
||||
assert_eq!(hello.node_name, node_name);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,38 @@
|
||||
use crate::{pki::generate_new_keypair, dryoc::dryocbox::{KeyPair, PublicKey}, transport_data::{exchange_keys_with_license_server, LicenseReply}};
|
||||
use lqos_config::EtcLqos;
|
||||
use once_cell::sync::Lazy;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
pub(crate) static KEYPAIR: Lazy<RwLock<KeyPair>> = Lazy::new(|| RwLock::new(generate_new_keypair()));
|
||||
pub(crate) static SERVER_PUBLIC_KEY: Lazy<RwLock<Option<PublicKey>>> = Lazy::new(|| RwLock::new(None));
|
||||
|
||||
pub(crate) async fn store_server_public_key(key: &PublicKey) {
|
||||
*SERVER_PUBLIC_KEY.write().await = Some(key.clone());
|
||||
}
|
||||
|
||||
pub(crate) async fn key_exchange() -> bool {
|
||||
let cfg = EtcLqos::load().unwrap();
|
||||
let node_id = cfg.node_id.unwrap();
|
||||
let node_name = if let Some(node_name) = cfg.node_name {
|
||||
node_name
|
||||
} else {
|
||||
node_id.clone()
|
||||
};
|
||||
let license_key = cfg.long_term_stats.unwrap().license_key.unwrap();
|
||||
let keypair = (KEYPAIR.read().await).clone();
|
||||
match exchange_keys_with_license_server(node_id, node_name, license_key, keypair.public_key.clone()).await {
|
||||
Ok(LicenseReply::MyPublicKey { public_key }) => {
|
||||
store_server_public_key(&public_key).await;
|
||||
log::info!("Received a public key for the server");
|
||||
true
|
||||
}
|
||||
Ok(_) => {
|
||||
log::warn!("License server sent an unexpected response.");
|
||||
false
|
||||
}
|
||||
Err(e) => {
|
||||
log::warn!("Error exchanging keys with license server: {}", e);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
130
src/rust/lts_client/src/submission_queue/comm_channel/mod.rs
Normal file
130
src/rust/lts_client/src/submission_queue/comm_channel/mod.rs
Normal file
@@ -0,0 +1,130 @@
|
||||
use std::time::Duration;
|
||||
use lqos_config::EtcLqos;
|
||||
use tokio::{sync::mpsc::Receiver, time::sleep, net::TcpStream, io::{AsyncWriteExt, AsyncReadExt}};
|
||||
use crate::submission_queue::comm_channel::keys::store_server_public_key;
|
||||
use self::encode::encode_submission_hello;
|
||||
use super::queue::{send_queue, QueueError};
|
||||
mod keys;
|
||||
pub(crate) use keys::key_exchange;
|
||||
mod encode;
|
||||
pub(crate) use encode::encode_submission;
|
||||
|
||||
pub(crate) enum SenderChannelMessage {
|
||||
QueueReady,
|
||||
Quit,
|
||||
}
|
||||
|
||||
pub(crate) async fn start_communication_channel(mut rx: Receiver<SenderChannelMessage>) {
|
||||
// let mut connected = false;
|
||||
// let mut stream: Option<TcpStream> = None;
|
||||
loop {
|
||||
match rx.try_recv() {
|
||||
Ok(SenderChannelMessage::QueueReady) => {
|
||||
log::info!("Trying to connect to stats.libreqos.io");
|
||||
let mut stream = connect_if_permitted().await;
|
||||
log::info!("Connection to stats.libreqos.io established");
|
||||
|
||||
// If we're still not connected, skip - otherwise, send the
|
||||
// queued data
|
||||
if let Ok(tcpstream) = &mut stream {
|
||||
// Send the data
|
||||
let all_good = send_queue(tcpstream).await;
|
||||
if all_good.is_err() {
|
||||
log::error!("Stream fail during send. Will re-send");
|
||||
}
|
||||
} else {
|
||||
log::error!("Unable to submit data to stats.libreqos.io: {stream:?}");
|
||||
}
|
||||
}
|
||||
Ok(SenderChannelMessage::Quit) => {
|
||||
break;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
sleep(Duration::from_secs(10)).await;
|
||||
}
|
||||
}
|
||||
|
||||
async fn connect_if_permitted() -> Result<TcpStream, QueueError> {
|
||||
log::info!("Connecting to stats.libreqos.io");
|
||||
// Check that we have a local license key and are enabled
|
||||
let cfg = EtcLqos::load().map_err(|_| {
|
||||
log::error!("Unable to load config file.");
|
||||
QueueError::NoLocalLicenseKey
|
||||
})?;
|
||||
let node_id = cfg.node_id.ok_or_else(|| {
|
||||
log::warn!("No node ID configured.");
|
||||
QueueError::NoLocalLicenseKey
|
||||
})?;
|
||||
let node_name = cfg.node_name.unwrap_or(node_id.clone());
|
||||
let usage_cfg = cfg.long_term_stats.ok_or_else(|| {
|
||||
log::warn!("Long-term stats are not configured.");
|
||||
QueueError::NoLocalLicenseKey
|
||||
})?;
|
||||
if !usage_cfg.gather_stats {
|
||||
log::warn!("Gathering long-term stats is disabled.");
|
||||
return Err(QueueError::StatsDisabled);
|
||||
}
|
||||
let license_key = usage_cfg.license_key.ok_or_else(|| {
|
||||
log::warn!("No license key configured.");
|
||||
QueueError::NoLocalLicenseKey
|
||||
})?;
|
||||
|
||||
// Connect
|
||||
let host = "stats.libreqos.io:9128";
|
||||
let mut stream = TcpStream::connect(&host).await
|
||||
.map_err(|e| {
|
||||
log::error!("Unable to connect to {host}: {e:?}");
|
||||
QueueError::SendFail
|
||||
})?;
|
||||
|
||||
// Send Hello
|
||||
let bytes = encode_submission_hello(&license_key, &node_id, &node_name).await?;
|
||||
stream.write_all(&bytes).await
|
||||
.map_err(|e| {
|
||||
log::error!("Unable to write to {host}: {e:?}");
|
||||
QueueError::SendFail
|
||||
})?;
|
||||
|
||||
// Receive Server Public Key or Denied
|
||||
let result = stream.read_u16().await
|
||||
.map_err(|e| {
|
||||
log::error!("Unable to read reply from {host}, {e:?}");
|
||||
QueueError::SendFail
|
||||
})?;
|
||||
match result {
|
||||
0 => {
|
||||
log::error!("License validation failure.");
|
||||
return Err(QueueError::SendFail);
|
||||
}
|
||||
1 => {
|
||||
// We received validation. Now to decode the public key.
|
||||
let key_size = stream.read_u64().await
|
||||
.map_err(|e| {
|
||||
log::error!("Unable to read reply from {host}, {e:?}");
|
||||
QueueError::SendFail
|
||||
})?;
|
||||
let mut key_buffer = vec![0u8; key_size as usize];
|
||||
stream.read_exact(&mut key_buffer).await
|
||||
.map_err(|e| {
|
||||
log::error!("Unable to read reply from {host}, {e:?}");
|
||||
QueueError::SendFail
|
||||
})?;
|
||||
let server_public_key = serde_cbor::from_slice(&key_buffer)
|
||||
.map_err(|e| {
|
||||
log::error!("Unable to decode key from {host}, {e:?}");
|
||||
QueueError::SendFail
|
||||
})?;
|
||||
store_server_public_key(&server_public_key).await;
|
||||
log::info!("Received server public key.");
|
||||
}
|
||||
_ => {
|
||||
log::error!("Unexpected reply from server.");
|
||||
return Err(QueueError::SendFail);
|
||||
}
|
||||
}
|
||||
|
||||
// Proceed
|
||||
Ok(stream)
|
||||
}
|
||||
16
src/rust/lts_client/src/submission_queue/current.rs
Normal file
16
src/rust/lts_client/src/submission_queue/current.rs
Normal file
@@ -0,0 +1,16 @@
|
||||
use std::sync::RwLock;
|
||||
use once_cell::sync::Lazy;
|
||||
use tokio::sync::mpsc::Sender;
|
||||
use crate::transport_data::StatsSubmission;
|
||||
use super::{queue::enqueue_if_allowed, comm_channel::SenderChannelMessage};
|
||||
|
||||
pub(crate) static CURRENT_STATS: Lazy<RwLock<Option<StatsSubmission>>> = Lazy::new(|| RwLock::new(None));
|
||||
|
||||
pub(crate) async fn new_submission(data: StatsSubmission, comm_tx: Sender<SenderChannelMessage>) {
|
||||
*CURRENT_STATS.write().unwrap() = Some(data.clone());
|
||||
enqueue_if_allowed(data, comm_tx).await;
|
||||
}
|
||||
|
||||
pub fn get_current_stats() -> Option<StatsSubmission> {
|
||||
CURRENT_STATS.read().unwrap().clone()
|
||||
}
|
||||
113
src/rust/lts_client/src/submission_queue/licensing.rs
Normal file
113
src/rust/lts_client/src/submission_queue/licensing.rs
Normal file
@@ -0,0 +1,113 @@
|
||||
use crate::transport_data::{ask_license_server, LicenseReply, ask_license_server_for_new_account};
|
||||
use lqos_config::EtcLqos;
|
||||
use lqos_utils::unix_time::unix_now;
|
||||
use once_cell::sync::Lazy;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
struct LicenseStatus {
|
||||
key: String,
|
||||
state: LicenseState,
|
||||
last_check: u64,
|
||||
}
|
||||
|
||||
#[derive(Default, Clone, PartialEq, Debug)]
|
||||
pub(crate) enum LicenseState {
|
||||
#[default]
|
||||
Unknown,
|
||||
Denied,
|
||||
Valid {
|
||||
/// When does the license expire?
|
||||
expiry: u64,
|
||||
/// Host to which to send stats
|
||||
stats_host: String,
|
||||
},
|
||||
}
|
||||
|
||||
static LICENSE_STATUS: Lazy<RwLock<LicenseStatus>> =
|
||||
Lazy::new(|| RwLock::new(LicenseStatus::default()));
|
||||
|
||||
pub(crate) async fn get_license_status() -> LicenseState {
|
||||
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||
if let Ok(unix_time) = unix_now() {
|
||||
let license_status = {
|
||||
LICENSE_STATUS.read().await.clone()
|
||||
};
|
||||
if license_status.state == LicenseState::Unknown || license_status.last_check < unix_time - (60 * 60) {
|
||||
return check_license(unix_time).await;
|
||||
}
|
||||
return license_status.state;
|
||||
}
|
||||
LicenseState::Unknown
|
||||
}
|
||||
|
||||
const MISERLY_NO_KEY: &str = "IDontSupportDevelopersAndShouldFeelBad";
|
||||
|
||||
async fn check_license(unix_time: u64) -> LicenseState {
|
||||
log::info!("Checking LTS stats license");
|
||||
if let Ok(cfg) = EtcLqos::load() {
|
||||
// The config file is good. Is LTS enabled?
|
||||
// If it isn't, we need to try very gently to see if a pending
|
||||
// request has been submitted.
|
||||
if let Some(cfg) = cfg.long_term_stats {
|
||||
if let Some(key) = cfg.license_key {
|
||||
if key == MISERLY_NO_KEY {
|
||||
log::warn!("You are using the self-hosting license key. We'd be happy to sell you a real one.");
|
||||
return LicenseState::Valid { expiry: 0, stats_host: "192.168.100.11:9127".to_string() }
|
||||
}
|
||||
|
||||
let mut lock = LICENSE_STATUS.write().await;
|
||||
lock.last_check = unix_time;
|
||||
lock.key = key.clone();
|
||||
match ask_license_server(key.clone()).await {
|
||||
Ok(state) => {
|
||||
match state {
|
||||
LicenseReply::Denied => {
|
||||
log::warn!("License is in state: DENIED.");
|
||||
lock.state = LicenseState::Denied;
|
||||
}
|
||||
LicenseReply::Valid{expiry, stats_host} => {
|
||||
log::info!("License is in state: VALID.");
|
||||
lock.state = LicenseState::Valid{
|
||||
expiry, stats_host
|
||||
};
|
||||
}
|
||||
_ => {
|
||||
log::warn!("Unexpected type of data received. Denying to be safe.");
|
||||
lock.state = LicenseState::Denied;
|
||||
}
|
||||
}
|
||||
return lock.state.clone();
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("Error checking licensing server");
|
||||
log::error!("{e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// LTS is unconfigured - but not explicitly disabled.
|
||||
// So we need to check if we have a pending request.
|
||||
// If a license key has been assigned, then we'll setup
|
||||
// LTS. If it hasn't, we'll just return Unknown.
|
||||
if let Some(node_id) = &cfg.node_id {
|
||||
if let Ok(result) = ask_license_server_for_new_account(node_id.to_string()).await {
|
||||
if let LicenseReply::NewActivation { license_key } = result {
|
||||
// We have a new license!
|
||||
let _ = lqos_config::enable_long_term_stats(license_key);
|
||||
// Note that we're not doing anything beyond this - the next cycle
|
||||
// will pick up on there actually being a license
|
||||
} else {
|
||||
log::info!("No pending LTS license found");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// There's no node ID either - we can't talk to this
|
||||
log::warn!("No NodeID is configured. No online services are possible.");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log::error!("Unable to load lqosd configuration. Not going to try.");
|
||||
}
|
||||
LicenseState::Unknown
|
||||
}
|
||||
7
src/rust/lts_client/src/submission_queue/mod.rs
Normal file
7
src/rust/lts_client/src/submission_queue/mod.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
mod current;
|
||||
mod licensing;
|
||||
mod queue;
|
||||
pub(crate) mod comm_channel;
|
||||
pub(crate) use current::new_submission;
|
||||
pub(crate) use queue::enqueue_shaped_devices_if_allowed;
|
||||
pub use current::get_current_stats;
|
||||
111
src/rust/lts_client/src/submission_queue/queue.rs
Normal file
111
src/rust/lts_client/src/submission_queue/queue.rs
Normal file
@@ -0,0 +1,111 @@
|
||||
use lqos_config::ShapedDevice;
|
||||
use once_cell::sync::Lazy;
|
||||
use thiserror::Error;
|
||||
use tokio::{sync::{Mutex, mpsc::Sender}, net::TcpStream, io::AsyncWriteExt};
|
||||
use crate::transport_data::{StatsSubmission, LtsCommand};
|
||||
use super::{licensing::{LicenseState, get_license_status}, comm_channel::{SenderChannelMessage, encode_submission}};
|
||||
|
||||
pub(crate) async fn enqueue_if_allowed(data: StatsSubmission, comm_tx: Sender<SenderChannelMessage>) {
|
||||
let license = get_license_status().await;
|
||||
match license {
|
||||
LicenseState::Unknown => {
|
||||
log::info!("Temporary error finding license status. Will retry.");
|
||||
}
|
||||
LicenseState::Denied => {
|
||||
log::error!("Your license is invalid. Please contact support.");
|
||||
}
|
||||
LicenseState::Valid{ .. } => {
|
||||
log::info!("Sending data to the queue.");
|
||||
QUEUE.push(LtsCommand::Submit(Box::new(data))).await;
|
||||
if let Err(e) = comm_tx.send(SenderChannelMessage::QueueReady).await {
|
||||
log::error!("Unable to send queue ready message: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn enqueue_shaped_devices_if_allowed(devices: Vec<ShapedDevice>, comm_tx: Sender<SenderChannelMessage>) {
|
||||
let license = get_license_status().await;
|
||||
match license {
|
||||
LicenseState::Unknown => {
|
||||
log::info!("Temporary error finding license status. Will retry.");
|
||||
}
|
||||
LicenseState::Denied => {
|
||||
log::error!("Your license is invalid. Please contact support.");
|
||||
}
|
||||
LicenseState::Valid{ .. } => {
|
||||
QUEUE.push(LtsCommand::Devices(devices)).await;
|
||||
let _ = comm_tx.send(SenderChannelMessage::QueueReady).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static QUEUE: Lazy<Queue> = Lazy::new(Queue::new);
|
||||
|
||||
pub(crate) struct QueueSubmission {
|
||||
pub(crate) attempts: u8,
|
||||
pub(crate) body: LtsCommand,
|
||||
pub(crate) sent: bool,
|
||||
}
|
||||
|
||||
pub(crate) struct Queue {
|
||||
queue: Mutex<Vec<QueueSubmission>>,
|
||||
}
|
||||
|
||||
impl Queue {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
queue: Mutex::new(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn push(&self, data: LtsCommand) {
|
||||
{
|
||||
let mut lock = self.queue.lock().await;
|
||||
lock.push(QueueSubmission {
|
||||
attempts: 0,
|
||||
sent: false,
|
||||
body: data,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn send_queue(stream: &mut TcpStream) -> Result<(), QueueError> {
|
||||
let mut lock = QUEUE.queue.lock().await;
|
||||
for message in lock.iter_mut() {
|
||||
let submission_buffer = encode_submission(&message.body).await?;
|
||||
let ret = stream.write_all(&submission_buffer).await;
|
||||
log::info!("Sent submission: {} bytes.", submission_buffer.len());
|
||||
if ret.is_err() {
|
||||
log::error!("Unable to write to TCP stream.");
|
||||
log::error!("{:?}", ret);
|
||||
message.sent = false;
|
||||
match crate::submission_queue::comm_channel::key_exchange().await {
|
||||
true => {
|
||||
log::info!("Successfully exchanged license keys.");
|
||||
}
|
||||
false => {
|
||||
log::error!("Unable to talk to the licensing system to fix keys.");
|
||||
}
|
||||
}
|
||||
return Err(QueueError::SendFail);
|
||||
} else {
|
||||
message.sent = true;
|
||||
}
|
||||
}
|
||||
|
||||
lock.retain(|s| !s.sent);
|
||||
lock.retain(|s| s.attempts < 200);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub(crate) enum QueueError {
|
||||
#[error("No local license key")]
|
||||
NoLocalLicenseKey,
|
||||
#[error("Stats are disabled")]
|
||||
StatsDisabled,
|
||||
#[error("Unable to send")]
|
||||
SendFail,
|
||||
}
|
||||
97
src/rust/lts_client/src/transport_data/license_types.rs
Normal file
97
src/rust/lts_client/src/transport_data/license_types.rs
Normal file
@@ -0,0 +1,97 @@
|
||||
//! Data-types used for license key exchange and lookup.
|
||||
|
||||
use serde::{Serialize, Deserialize};
|
||||
use dryoc::dryocbox::PublicKey;
|
||||
use thiserror::Error;
|
||||
|
||||
/// Network-transmitted query to ask the status of a license
|
||||
/// key.
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub enum LicenseRequest {
|
||||
/// Check the validity of a key
|
||||
LicenseCheck {
|
||||
/// The Key to Check
|
||||
key: String,
|
||||
},
|
||||
/// Exchange Keys
|
||||
KeyExchange {
|
||||
/// The node ID of the requesting shaper node
|
||||
node_id: String,
|
||||
/// The pretty name of the requesting shaper node
|
||||
node_name: String,
|
||||
/// The license key of the requesting shaper node
|
||||
license_key: String,
|
||||
/// The sodium-style public key of the requesting shaper node
|
||||
public_key: PublicKey,
|
||||
},
|
||||
/// Check to see if this node has been newly approved
|
||||
PendingLicenseRequest {
|
||||
/// The local node id
|
||||
node_id: String,
|
||||
}
|
||||
}
|
||||
|
||||
/// License server responses for a key
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub enum LicenseReply {
|
||||
/// The license is denied
|
||||
Denied,
|
||||
/// The license is valid
|
||||
Valid {
|
||||
/// When does the license expire?
|
||||
expiry: u64,
|
||||
/// Address to which statistics should be submitted
|
||||
stats_host: String,
|
||||
},
|
||||
/// Key Exchange
|
||||
MyPublicKey {
|
||||
/// The server's public key
|
||||
public_key: PublicKey,
|
||||
},
|
||||
/// New Activation
|
||||
NewActivation {
|
||||
/// The license key to apply
|
||||
license_key: String,
|
||||
}
|
||||
}
|
||||
|
||||
/// Errors that can occur when checking licenses
|
||||
#[derive(Debug, Error)]
|
||||
pub enum LicenseCheckError {
|
||||
/// Serialization error
|
||||
#[error("Unable to serialize license check")]
|
||||
SerializeFail,
|
||||
/// Network error
|
||||
#[error("Unable to send license check")]
|
||||
SendFail,
|
||||
/// Network error
|
||||
#[error("Unable to receive license result")]
|
||||
ReceiveFail,
|
||||
/// Deserialization error
|
||||
#[error("Unable to deserialize license result")]
|
||||
DeserializeFail,
|
||||
}
|
||||
|
||||
/// Stores a license id and node id for transport
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct NodeIdAndLicense {
|
||||
/// The node id
|
||||
pub node_id: String,
|
||||
/// The license key
|
||||
pub license_key: String,
|
||||
/// The Sodium Nonce
|
||||
pub nonce: [u8; 24],
|
||||
}
|
||||
|
||||
/// For the new V2 hello license system, encodes a greeting
|
||||
#[derive(Debug, Serialize, Deserialize, Default)]
|
||||
pub struct HelloVersion2 {
|
||||
/// The node id
|
||||
pub node_id: String,
|
||||
/// The license key
|
||||
pub license_key: String,
|
||||
// The name of the node requesting service
|
||||
pub node_name: String,
|
||||
/// The Sodium Public Key
|
||||
pub client_public_key: Vec<u8>,
|
||||
}
|
||||
238
src/rust/lts_client/src/transport_data/license_utils.rs
Normal file
238
src/rust/lts_client/src/transport_data/license_utils.rs
Normal file
@@ -0,0 +1,238 @@
|
||||
//! Functions for talking to the license server
|
||||
//!
|
||||
//! License requests use the following format:
|
||||
//! `u16` containing the version number (currently 1), in big-endian (network order)
|
||||
//! `u64` containing the size of the payload, in big-endian (network order)
|
||||
//! `payload` containing the actual payload. The payload is a CBOR-encoded.
|
||||
//!
|
||||
//! License requests are not expected to be frequent, and the connection is
|
||||
//! not reused. We use a simple framing protocol, and terminate the connection
|
||||
//! after use.
|
||||
|
||||
use super::{LicenseCheckError, LicenseRequest, LicenseReply, LICENSE_SERVER};
|
||||
use dryoc::dryocbox::PublicKey;
|
||||
use tokio::{net::TcpStream, io::{AsyncReadExt, AsyncWriteExt}};
|
||||
|
||||
fn build_license_request(key: String) -> Result<Vec<u8>, LicenseCheckError> {
|
||||
let mut result = Vec::new();
|
||||
let payload = serde_cbor::to_vec(&LicenseRequest::LicenseCheck { key });
|
||||
if let Err(e) = payload {
|
||||
log::warn!("Unable to serialize license request. Not sending them.");
|
||||
log::warn!("{e:?}");
|
||||
return Err(LicenseCheckError::SerializeFail);
|
||||
}
|
||||
let payload = payload.unwrap();
|
||||
|
||||
// Store the version as network order
|
||||
result.extend(1u16.to_be_bytes());
|
||||
// Store the payload size as network order
|
||||
result.extend((payload.len() as u64).to_be_bytes());
|
||||
// Store the payload itself
|
||||
result.extend(payload);
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn build_activation_query(node_id: String) -> Result<Vec<u8>, LicenseCheckError> {
|
||||
let mut result = Vec::new();
|
||||
let payload = serde_cbor::to_vec(&LicenseRequest::PendingLicenseRequest { node_id } );
|
||||
if let Err(e) = payload {
|
||||
log::warn!("Unable to serialize license request. Not sending them.");
|
||||
log::warn!("{e:?}");
|
||||
return Err(LicenseCheckError::SerializeFail);
|
||||
}
|
||||
let payload = payload.unwrap();
|
||||
|
||||
// Store the version as network order
|
||||
result.extend(1u16.to_be_bytes());
|
||||
// Store the payload size as network order
|
||||
result.extend((payload.len() as u64).to_be_bytes());
|
||||
// Store the payload itself
|
||||
result.extend(payload);
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn build_key_exchange_request(
|
||||
node_id: String,
|
||||
node_name: String,
|
||||
license_key: String,
|
||||
public_key: PublicKey,
|
||||
) -> Result<Vec<u8>, LicenseCheckError> {
|
||||
let mut result = Vec::new();
|
||||
let payload = serde_cbor::to_vec(&LicenseRequest::KeyExchange {
|
||||
node_id,
|
||||
node_name,
|
||||
license_key,
|
||||
public_key,
|
||||
});
|
||||
if let Err(e) = payload {
|
||||
log::warn!("Unable to serialize statistics. Not sending them.");
|
||||
log::warn!("{e:?}");
|
||||
return Err(LicenseCheckError::SerializeFail);
|
||||
}
|
||||
let payload = payload.unwrap();
|
||||
|
||||
// Store the version as network order
|
||||
result.extend(1u16.to_be_bytes());
|
||||
// Store the payload size as network order
|
||||
result.extend((payload.len() as u64).to_be_bytes());
|
||||
// Store the payload itself
|
||||
result.extend(payload);
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
|
||||
/// Ask the license server if the license is valid
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `key` - The license key to check
|
||||
pub async fn ask_license_server(key: String) -> Result<LicenseReply, LicenseCheckError> {
|
||||
if let Ok(buffer) = build_license_request(key) {
|
||||
let stream = TcpStream::connect(LICENSE_SERVER).await;
|
||||
if let Err(e) = &stream {
|
||||
if e.kind() == std::io::ErrorKind::NotFound {
|
||||
log::error!("Unable to access {LICENSE_SERVER}. Check that lqosd is running and you have appropriate permissions.");
|
||||
return Err(LicenseCheckError::SendFail);
|
||||
}
|
||||
}
|
||||
let stream = stream;
|
||||
match stream {
|
||||
Ok(mut stream) => {
|
||||
let ret = stream.write(&buffer).await;
|
||||
if ret.is_err() {
|
||||
log::error!("Unable to write to {LICENSE_SERVER} stream.");
|
||||
log::error!("{:?}", ret);
|
||||
return Err(LicenseCheckError::SendFail);
|
||||
}
|
||||
let mut buf = Vec::with_capacity(10240);
|
||||
let ret = stream.read_to_end(&mut buf).await;
|
||||
if ret.is_err() {
|
||||
log::error!("Unable to read from {LICENSE_SERVER} stream.");
|
||||
log::error!("{:?}", ret);
|
||||
return Err(LicenseCheckError::SendFail);
|
||||
}
|
||||
|
||||
decode_response(&buf)
|
||||
}
|
||||
Err(e) => {
|
||||
log::warn!("TCP stream failed to connect: {:?}", e);
|
||||
Err(LicenseCheckError::ReceiveFail)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Err(LicenseCheckError::SerializeFail)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn ask_license_server_for_new_account(
|
||||
node_id: String,
|
||||
) -> Result<LicenseReply, LicenseCheckError>
|
||||
{
|
||||
if let Ok(buffer) = build_activation_query(node_id) {
|
||||
let stream = TcpStream::connect(LICENSE_SERVER).await;
|
||||
if let Err(e) = &stream {
|
||||
if e.kind() == std::io::ErrorKind::NotFound {
|
||||
log::error!("Unable to access {LICENSE_SERVER}. Check that lqosd is running and you have appropriate permissions.");
|
||||
return Err(LicenseCheckError::SendFail);
|
||||
}
|
||||
}
|
||||
let stream = stream;
|
||||
match stream {
|
||||
Ok(mut stream) => {
|
||||
let ret = stream.write(&buffer).await;
|
||||
if ret.is_err() {
|
||||
log::error!("Unable to write to {LICENSE_SERVER} stream.");
|
||||
log::error!("{:?}", ret);
|
||||
return Err(LicenseCheckError::SendFail);
|
||||
}
|
||||
let mut buf = Vec::with_capacity(10240);
|
||||
let ret = stream.read_to_end(&mut buf).await;
|
||||
if ret.is_err() {
|
||||
log::error!("Unable to read from {LICENSE_SERVER} stream.");
|
||||
log::error!("{:?}", ret);
|
||||
return Err(LicenseCheckError::SendFail);
|
||||
}
|
||||
|
||||
decode_response(&buf)
|
||||
}
|
||||
Err(e) => {
|
||||
log::warn!("TCP stream failed to connect: {:?}", e);
|
||||
Err(LicenseCheckError::ReceiveFail)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Err(LicenseCheckError::SerializeFail)
|
||||
}
|
||||
}
|
||||
|
||||
/// Ask the license server for the public key
|
||||
pub async fn exchange_keys_with_license_server(
|
||||
node_id: String,
|
||||
node_name: String,
|
||||
license_key: String,
|
||||
public_key: PublicKey,
|
||||
) -> Result<LicenseReply, LicenseCheckError> {
|
||||
if let Ok(buffer) = build_key_exchange_request(node_id, node_name, license_key, public_key) {
|
||||
let stream = TcpStream::connect(LICENSE_SERVER).await;
|
||||
if let Err(e) = &stream {
|
||||
if e.kind() == std::io::ErrorKind::NotFound {
|
||||
log::error!("Unable to access {LICENSE_SERVER}. Check that lqosd is running and you have appropriate permissions.");
|
||||
return Err(LicenseCheckError::SendFail);
|
||||
}
|
||||
}
|
||||
let mut stream = stream.unwrap(); // This unwrap is safe, we checked that it exists previously
|
||||
let ret = stream.write(&buffer).await;
|
||||
if ret.is_err() {
|
||||
log::error!("Unable to write to {LICENSE_SERVER} stream.");
|
||||
log::error!("{:?}", ret);
|
||||
return Err(LicenseCheckError::SendFail);
|
||||
}
|
||||
let mut buf = Vec::with_capacity(10240);
|
||||
let ret = stream.read_to_end(&mut buf).await;
|
||||
if ret.is_err() {
|
||||
log::error!("Unable to read from {LICENSE_SERVER} stream.");
|
||||
log::error!("{:?}", ret);
|
||||
return Err(LicenseCheckError::SendFail);
|
||||
}
|
||||
|
||||
decode_response(&buf)
|
||||
} else {
|
||||
Err(LicenseCheckError::SerializeFail)
|
||||
}
|
||||
}
|
||||
|
||||
fn decode_response(buf: &[u8]) -> Result<LicenseReply, LicenseCheckError> {
|
||||
if buf.len() < 2 + std::mem::size_of::<u64>() {
|
||||
log::error!("License server returned an invalid response");
|
||||
return Err(LicenseCheckError::DeserializeFail);
|
||||
}
|
||||
const U64SIZE: usize = std::mem::size_of::<u64>();
|
||||
let version_buf = &buf[0..2]
|
||||
.try_into()
|
||||
.map_err(|_| LicenseCheckError::DeserializeFail)?;
|
||||
let version = u16::from_be_bytes(*version_buf);
|
||||
let size_buf = &buf[2..2 + U64SIZE]
|
||||
.try_into()
|
||||
.map_err(|_| LicenseCheckError::DeserializeFail)?;
|
||||
let size = u64::from_be_bytes(*size_buf);
|
||||
|
||||
if version != 1 {
|
||||
log::error!("License server returned an unknown version: {}", version);
|
||||
return Err(LicenseCheckError::DeserializeFail);
|
||||
}
|
||||
|
||||
let start = 2 + U64SIZE;
|
||||
let end = start + size as usize;
|
||||
let payload: Result<LicenseReply, _> = serde_cbor::from_slice(&buf[start..end]);
|
||||
match payload {
|
||||
Ok(payload) => Ok(payload),
|
||||
Err(e) => {
|
||||
log::error!("Unable to deserialize license result");
|
||||
log::error!("{e:?}");
|
||||
Err(LicenseCheckError::DeserializeFail)
|
||||
}
|
||||
}
|
||||
}
|
||||
15
src/rust/lts_client/src/transport_data/mod.rs
Normal file
15
src/rust/lts_client/src/transport_data/mod.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
//! Holds data-types and utility functions for the long-term
|
||||
//! statistics retention system.
|
||||
//!
|
||||
//! This is in the bus so that it can be readily shared between
|
||||
//! server and client code.
|
||||
|
||||
mod submissions;
|
||||
mod license_types;
|
||||
mod license_utils;
|
||||
|
||||
pub use submissions::*;
|
||||
pub use license_types::*;
|
||||
pub use license_utils::*;
|
||||
|
||||
pub(crate) const LICENSE_SERVER: &str = "license.libreqos.io:9126";
|
||||
256
src/rust/lts_client/src/transport_data/submissions.rs
Normal file
256
src/rust/lts_client/src/transport_data/submissions.rs
Normal file
@@ -0,0 +1,256 @@
|
||||
//! Holds data-types to be submitted as part of long-term stats
|
||||
//! collection.
|
||||
|
||||
use lqos_config::ShapedDevice;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use uisp::Device;
|
||||
|
||||
use crate::collector::CakeStats;
|
||||
|
||||
/// Type that provides a minimum, maximum and average value
|
||||
/// for a given statistic within the associated time period.
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub struct StatsSummary {
|
||||
/// Minimum value
|
||||
pub min: (u64, u64),
|
||||
/// Maximum value
|
||||
pub max: (u64, u64),
|
||||
/// Average value
|
||||
pub avg: (u64, u64),
|
||||
}
|
||||
|
||||
/// Type that provides a minimum, maximum and average value
|
||||
/// for a given RTT value within the associated time period.
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub struct StatsRttSummary {
|
||||
/// Minimum value
|
||||
pub min: u32,
|
||||
/// Maximum value
|
||||
pub max: u32,
|
||||
/// Average value
|
||||
pub avg: u32,
|
||||
}
|
||||
|
||||
/// Type that holds total traffic statistics for a given time period
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub struct StatsTotals {
|
||||
/// Total number of packets
|
||||
pub packets: StatsSummary,
|
||||
/// Total number of bits
|
||||
pub bits: StatsSummary,
|
||||
/// Total number of shaped bits
|
||||
pub shaped_bits: StatsSummary,
|
||||
}
|
||||
|
||||
/// Type that holds per-host statistics for a given stats collation
|
||||
/// period.
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub struct StatsHost {
|
||||
/// Host circuit_id as it appears in ShapedDevices.csv
|
||||
pub circuit_id: Option<String>,
|
||||
/// Host's IP address
|
||||
pub ip_address: String,
|
||||
/// Host's traffic statistics
|
||||
pub bits: StatsSummary,
|
||||
/// Host's RTT statistics
|
||||
pub rtt: StatsRttSummary,
|
||||
}
|
||||
|
||||
/// Node inside a traffic summary tree
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub struct StatsTreeNode {
|
||||
/// Index in the tree vector
|
||||
pub index: usize,
|
||||
/// Name (from network.json)
|
||||
pub name: String,
|
||||
/// Maximum allowed throughput (from network.json)
|
||||
pub max_throughput: (u32, u32),
|
||||
/// Current throughput (from network.json)
|
||||
pub current_throughput: StatsSummary,
|
||||
/// RTT summaries
|
||||
pub rtt: StatsRttSummary,
|
||||
/// Indices of parents in the tree
|
||||
pub parents: Vec<usize>,
|
||||
/// Index of immediate parent in the tree
|
||||
pub immediate_parent: Option<usize>,
|
||||
/// Node Type
|
||||
pub node_type: Option<String>,
|
||||
}
|
||||
|
||||
/// Collation of all stats for a given time period
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub struct StatsSubmission {
|
||||
/// Timestamp of the collation (UNIX time)
|
||||
pub timestamp: u64,
|
||||
/// Total traffic statistics
|
||||
pub totals: Option<StatsTotals>,
|
||||
/// Per-host statistics
|
||||
pub hosts: Option<Vec<StatsHost>>,
|
||||
/// Tree of traffic summaries
|
||||
pub tree: Option<Vec<StatsTreeNode>>,
|
||||
/// CPU utiliation on the shaper
|
||||
pub cpu_usage: Option<Vec<u32>>,
|
||||
/// RAM utilization on the shaper
|
||||
pub ram_percent: Option<u32>,
|
||||
/// UISP Device Information
|
||||
pub uisp_devices: Option<Vec<UispExtDevice>>,
|
||||
/// Queue Stats
|
||||
pub cake_stats: Option<(Vec<CakeStats>, Vec<CakeStats>)>,
|
||||
|
||||
}
|
||||
|
||||
/// Submission to the `lts_node` process
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub enum LtsCommand {
|
||||
Submit(Box<StatsSubmission>),
|
||||
Devices(Vec<ShapedDevice>),
|
||||
}
|
||||
|
||||
/// Extended data provided from UISP
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub struct UispExtDevice {
|
||||
pub device_id: String,
|
||||
pub name: String,
|
||||
pub model: String,
|
||||
pub firmware: String,
|
||||
pub status: String,
|
||||
pub frequency: f64,
|
||||
pub channel_width: i32,
|
||||
pub tx_power: i32,
|
||||
pub rx_signal: i32,
|
||||
pub downlink_capacity_mbps: i32,
|
||||
pub uplink_capacity_mbps: i32,
|
||||
pub noise_floor: i32,
|
||||
pub mode: String,
|
||||
pub interfaces: Vec<UispExtDeviceInterface>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub struct UispExtDeviceInterface {
|
||||
pub name: String,
|
||||
pub mac: String,
|
||||
pub ip: Vec<String>,
|
||||
pub status: String,
|
||||
pub speed: String,
|
||||
}
|
||||
|
||||
impl From<Device> for UispExtDevice {
|
||||
fn from(d: Device) -> Self {
|
||||
let device_id = d.identification.id.to_string();
|
||||
let device_name = d.get_name().as_ref().unwrap_or(&"".to_string()).to_string();
|
||||
let model = d
|
||||
.identification
|
||||
.modelName
|
||||
.as_ref()
|
||||
.unwrap_or(&"".to_string())
|
||||
.to_string();
|
||||
let firmware = d
|
||||
.identification
|
||||
.firmwareVersion
|
||||
.as_ref()
|
||||
.unwrap_or(&"".to_string())
|
||||
.to_string();
|
||||
let mode = d.mode.as_ref().unwrap_or(&"".to_string()).to_string();
|
||||
let status;
|
||||
let frequency;
|
||||
let channel_width;
|
||||
let tx_power;
|
||||
let rx_signal;
|
||||
let downlink_capacity_mbps;
|
||||
let uplink_capacity_mbps;
|
||||
if let Some(ov) = &d.overview {
|
||||
status = ov.status.as_ref().unwrap_or(&"".to_string()).to_string();
|
||||
frequency = ov.frequency.unwrap_or(0.0);
|
||||
channel_width = ov.channelWidth.unwrap_or(0);
|
||||
tx_power = ov.transmitPower.unwrap_or(0);
|
||||
rx_signal = ov.signal.unwrap_or(0);
|
||||
downlink_capacity_mbps = ov.downlinkCapacity.unwrap_or(0);
|
||||
uplink_capacity_mbps = ov.uplinkCapacity.unwrap_or(0);
|
||||
} else {
|
||||
status = "".to_string();
|
||||
frequency = 0.0;
|
||||
channel_width = 0;
|
||||
tx_power = 0;
|
||||
rx_signal = 0;
|
||||
downlink_capacity_mbps = 0;
|
||||
uplink_capacity_mbps = 0;
|
||||
}
|
||||
|
||||
let mut noise_floor = 0;
|
||||
let mut iflist = Vec::new();
|
||||
if let Some(interfaces) = &d.interfaces {
|
||||
interfaces.iter().for_each(|i| {
|
||||
if let Some(wireless) = &i.wireless {
|
||||
if let Some(nf) = wireless.noiseFloor {
|
||||
noise_floor = nf;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(addr) = &i.addresses {
|
||||
let mut ip = Vec::new();
|
||||
addr.iter().for_each(|a| {
|
||||
if let Some(ipaddr) = &a.cidr {
|
||||
ip.push(ipaddr.to_string());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
let mut if_name = "".to_string();
|
||||
let mut if_mac = "".to_string();
|
||||
if let Some(id) = &i.identification {
|
||||
if let Some(name) = &id.name {
|
||||
if_name = name.to_string();
|
||||
}
|
||||
if let Some(mac) = &id.mac {
|
||||
if_mac = mac.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
let mut if_status = "".to_string();
|
||||
let mut if_speed = "".to_string();
|
||||
if let Some(status) = &i.status {
|
||||
if let Some(s) = &status.status {
|
||||
if_status = s.to_string();
|
||||
}
|
||||
if let Some(s) = &status.speed {
|
||||
if_speed = s.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
let mut if_ip = Vec::new();
|
||||
if let Some(addr) = &i.addresses {
|
||||
addr.iter().for_each(|a| {
|
||||
if let Some(ipaddr) = &a.cidr {
|
||||
if_ip.push(ipaddr.to_string());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
iflist.push(UispExtDeviceInterface {
|
||||
name: if_name,
|
||||
mac: if_mac,
|
||||
status: if_status,
|
||||
speed: if_speed,
|
||||
ip: if_ip,
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
Self {
|
||||
device_id,
|
||||
name: device_name,
|
||||
model,
|
||||
firmware,
|
||||
status,
|
||||
frequency,
|
||||
channel_width,
|
||||
tx_power,
|
||||
rx_signal,
|
||||
downlink_capacity_mbps,
|
||||
uplink_capacity_mbps,
|
||||
noise_floor,
|
||||
mode,
|
||||
interfaces: iflist,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
max_width = 79
|
||||
tab_spaces = 2
|
||||
use_small_heuristics = "Max"
|
||||
array_width = 77
|
||||
|
||||
10
src/rust/uisp/Cargo.toml
Normal file
10
src/rust/uisp/Cargo.toml
Normal file
@@ -0,0 +1,10 @@
|
||||
[package]
|
||||
name = "uisp"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
lqos_config = { path = "../lqos_config" }
|
||||
serde = { version = "1.0", features = [ "derive" ] }
|
||||
reqwest = { version = "0.11", features = [ "json" ] }
|
||||
anyhow = "1"
|
||||
42
src/rust/uisp/src/data_link.rs
Normal file
42
src/rust/uisp/src/data_link.rs
Normal file
@@ -0,0 +1,42 @@
|
||||
use serde::Deserialize;
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct DataLink {
|
||||
pub id: String,
|
||||
pub from: DataLinkFrom,
|
||||
pub to: DataLinkTo,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct DataLinkFrom {
|
||||
pub device: Option<DataLinkDevice>,
|
||||
pub site: Option<DataLinkSite>,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct DataLinkDevice {
|
||||
pub identification: DataLinkDeviceIdentification,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct DataLinkDeviceIdentification {
|
||||
pub id: String,
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct DataLinkTo {
|
||||
pub device: Option<DataLinkDevice>,
|
||||
pub site: Option<DataLinkSite>,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct DataLinkSite {
|
||||
pub identification: DataLinkDeviceIdentification,
|
||||
}
|
||||
203
src/rust/uisp/src/device.rs
Normal file
203
src/rust/uisp/src/device.rs
Normal file
@@ -0,0 +1,203 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use serde::Deserialize;
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct Device {
|
||||
pub identification: DeviceIdentification,
|
||||
pub ipAddress: Option<String>,
|
||||
pub attributes: Option<DeviceAttributes>,
|
||||
pub mode: Option<String>,
|
||||
pub interfaces: Option<Vec<DeviceInterface>>,
|
||||
pub overview: Option<DeviceOverview>,
|
||||
}
|
||||
|
||||
impl Device {
|
||||
pub fn get_name(&self) -> Option<String> {
|
||||
if let Some(hostname) = &self.identification.hostname {
|
||||
return Some(hostname.clone());
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn get_model(&self) -> Option<String> {
|
||||
if let Some(model) = &self.identification.model {
|
||||
return Some(model.clone());
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn get_model_name(&self) -> Option<String> {
|
||||
if let Some(model) = &self.identification.modelName {
|
||||
return Some(model.clone());
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn get_firmware(&self) -> Option<String> {
|
||||
if let Some(firmware) = &self.identification.firmwareVersion {
|
||||
return Some(firmware.clone());
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn get_id(&self) -> String {
|
||||
self.identification.id.clone()
|
||||
}
|
||||
|
||||
pub fn get_site_id(&self) -> Option<String> {
|
||||
if let Some(site) = &self.identification.site {
|
||||
return Some(site.id.clone());
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn get_status(&self) -> Option<String> {
|
||||
if let Some(overview) = &self.overview {
|
||||
if let Some(status) = &overview.status {
|
||||
return Some(status.clone());
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn get_frequency(&self) -> Option<f64> {
|
||||
if let Some(overview) = &self.overview {
|
||||
if let Some(frequency) = &overview.frequency {
|
||||
return Some(*frequency);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn strip_ip(ip: &str) -> String {
|
||||
if !ip.contains('/') {
|
||||
ip.to_string()
|
||||
} else {
|
||||
ip[0..ip.find('/').unwrap()].to_string()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_addresses(&self) -> HashSet<String> {
|
||||
let mut result = HashSet::new();
|
||||
if let Some(ip) = &self.ipAddress {
|
||||
result.insert(Device::strip_ip(ip));
|
||||
}
|
||||
if let Some(interfaces) = &self.interfaces {
|
||||
for interface in interfaces {
|
||||
if let Some(addresses) = &interface.addresses {
|
||||
for addy in addresses {
|
||||
if let Some(cidr) = &addy.cidr {
|
||||
result.insert(Device::strip_ip(cidr));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
pub fn get_noise_floor(&self) -> Option<i32> {
|
||||
if let Some(interfaces) = &self.interfaces {
|
||||
for intf in interfaces.iter() {
|
||||
if let Some(w) = &intf.wireless {
|
||||
if let Some(nf) = &w.noiseFloor {
|
||||
return Some(*nf);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct DeviceIdentification {
|
||||
pub id: String,
|
||||
pub hostname: Option<String>,
|
||||
pub mac: Option<String>,
|
||||
pub model: Option<String>,
|
||||
pub modelName: Option<String>,
|
||||
pub role: Option<String>,
|
||||
pub site: Option<DeviceSite>,
|
||||
pub firmwareVersion: Option<String>,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct DeviceSite {
|
||||
pub id: String,
|
||||
pub parent: Option<DeviceParent>,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct DeviceParent {
|
||||
pub id: String,
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct DeviceAttributes {
|
||||
pub ssid: Option<String>,
|
||||
pub apDevice: Option<DeviceAccessPoint>,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct DeviceAccessPoint {
|
||||
pub id: Option<String>,
|
||||
pub name: Option<String>,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct DeviceInterface {
|
||||
pub identification: Option<InterfaceIdentification>,
|
||||
pub addresses: Option<Vec<DeviceAddress>>,
|
||||
pub status: Option<InterfaceStatus>,
|
||||
pub wireless: Option<DeviceWireless>,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct InterfaceIdentification {
|
||||
pub name: Option<String>,
|
||||
pub mac: Option<String>,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct DeviceAddress {
|
||||
pub cidr: Option<String>,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct InterfaceStatus {
|
||||
pub status: Option<String>,
|
||||
pub speed: Option<String>,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct DeviceOverview {
|
||||
pub status: Option<String>,
|
||||
pub frequency: Option<f64>,
|
||||
pub outageScore: Option<f64>,
|
||||
pub stationsCount: Option<i32>,
|
||||
pub downlinkCapacity: Option<i32>,
|
||||
pub uplinkCapacity: Option<i32>,
|
||||
pub channelWidth: Option<i32>,
|
||||
pub transmitPower: Option<i32>,
|
||||
pub signal: Option<i32>,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct DeviceWireless {
|
||||
pub noiseFloor: Option<i32>,
|
||||
}
|
||||
36
src/rust/uisp/src/lib.rs
Normal file
36
src/rust/uisp/src/lib.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
/// UISP Data Structures
|
||||
///
|
||||
/// Strong-typed implementation of the UISP API system. Used by long-term
|
||||
/// stats to attach device information, possibly in the future used to
|
||||
/// accelerate the UISP integration.
|
||||
|
||||
mod rest; // REST HTTP services
|
||||
mod site; // UISP data definition for a site, pulled from the JSON
|
||||
mod device; // UISP data definition for a device, including interfaces
|
||||
mod data_link; // UISP data link definitions
|
||||
use lqos_config::LibreQoSConfig;
|
||||
pub use site::Site;
|
||||
pub use device::Device;
|
||||
pub use data_link::DataLink;
|
||||
use self::rest::nms_request_get_vec;
|
||||
use anyhow::Result;
|
||||
|
||||
/// Loads a complete list of all sites from UISP
|
||||
pub async fn load_all_sites(config: LibreQoSConfig) -> Result<Vec<Site>> {
|
||||
Ok(nms_request_get_vec("sites", &config.uisp_auth_token, &config.uisp_base_url).await?)
|
||||
}
|
||||
|
||||
/// Load all devices from UISP that are authorized, and include their full interface definitions
|
||||
pub async fn load_all_devices_with_interfaces(config: LibreQoSConfig) -> Result<Vec<Device>> {
|
||||
Ok(nms_request_get_vec(
|
||||
"devices?withInterfaces=true&authorized=true",
|
||||
&config.uisp_auth_token,
|
||||
&config.uisp_base_url,
|
||||
)
|
||||
.await?)
|
||||
}
|
||||
|
||||
/// Loads all data links from UISP (including links in client sites)
|
||||
pub async fn load_all_data_links(config: LibreQoSConfig) -> Result<Vec<DataLink>> {
|
||||
Ok(nms_request_get_vec("data-links", &config.uisp_auth_token, &config.uisp_base_url).await?)
|
||||
}
|
||||
136
src/rust/uisp/src/rest.rs
Normal file
136
src/rust/uisp/src/rest.rs
Normal file
@@ -0,0 +1,136 @@
|
||||
use anyhow::Result;
|
||||
use serde::de::DeserializeOwned;
|
||||
|
||||
fn url_fixup(base: &str) -> String {
|
||||
if base.contains("/nms/api/v2.1") {
|
||||
base.to_string()
|
||||
} else {
|
||||
format!("{base}/nms/api/v2.1")
|
||||
}
|
||||
}
|
||||
|
||||
/// Submits a request to the UNMS API and returns the result as unprocessed text.
|
||||
/// This is a debug function: it doesn't do any parsing
|
||||
#[allow(dead_code)]
|
||||
pub async fn nms_request_get_text(
|
||||
url: &str,
|
||||
key: &str,
|
||||
api: &str,
|
||||
) -> Result<String, reqwest::Error> {
|
||||
let full_url = format!("{}/{}", url_fixup(api), url);
|
||||
//println!("{full_url}");
|
||||
let client = reqwest::Client::new();
|
||||
|
||||
let res = client
|
||||
.get(&full_url)
|
||||
.header("'Content-Type", "application/json")
|
||||
.header("X-Auth-Token", key)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
res.text().await
|
||||
}
|
||||
|
||||
/// Submits a request to the UNMS API, returning a deserialized vector of type T.
|
||||
#[allow(dead_code)]
|
||||
pub async fn nms_request_get_vec<T>(
|
||||
url: &str,
|
||||
key: &str,
|
||||
api: &str,
|
||||
) -> Result<Vec<T>, reqwest::Error>
|
||||
where
|
||||
T: DeserializeOwned,
|
||||
{
|
||||
let full_url = format!("{}/{}", url_fixup(api), url);
|
||||
//println!("{full_url}");
|
||||
let client = reqwest::Client::new();
|
||||
|
||||
let res = client
|
||||
.get(&full_url)
|
||||
.header("'Content-Type", "application/json")
|
||||
.header("X-Auth-Token", key)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
res.json::<Vec<T>>().await
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub async fn nms_request_get_one<T>(url: &str, key: &str, api: &str) -> Result<T, reqwest::Error>
|
||||
where
|
||||
T: DeserializeOwned,
|
||||
{
|
||||
let full_url = format!("{}/{}", url_fixup(api), url);
|
||||
//println!("{full_url}");
|
||||
let client = reqwest::Client::new();
|
||||
|
||||
let res = client
|
||||
.get(&full_url)
|
||||
.header("'Content-Type", "application/json")
|
||||
.header("X-Auth-Token", key)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
res.json::<T>().await
|
||||
}
|
||||
|
||||
/// This is a debug function: it doesn't do any parsing
|
||||
#[allow(dead_code)]
|
||||
pub async fn crm_request_get_text(
|
||||
api: &str,
|
||||
key: &str,
|
||||
url: &str,
|
||||
) -> Result<String, reqwest::Error> {
|
||||
let full_url = format!("{}/{}", url_fixup(api), url);
|
||||
let client = reqwest::Client::new();
|
||||
|
||||
let res = client
|
||||
.get(&full_url)
|
||||
.header("'Content-Type", "application/json")
|
||||
.header("X-Auth-App-Key", key)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
res.text().await
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub async fn crm_request_get_vec<T>(
|
||||
api: &str,
|
||||
key: &str,
|
||||
url: &str,
|
||||
) -> Result<Vec<T>, reqwest::Error>
|
||||
where
|
||||
T: DeserializeOwned,
|
||||
{
|
||||
let full_url = format!("{}/{}", api, url);
|
||||
let client = reqwest::Client::new();
|
||||
|
||||
let res = client
|
||||
.get(&full_url)
|
||||
.header("'Content-Type", "application/json")
|
||||
.header("X-Auth-App-Key", key)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
res.json::<Vec<T>>().await
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub async fn crm_request_get_one<T>(api: &str, key: &str, url: &str) -> Result<T, reqwest::Error>
|
||||
where
|
||||
T: DeserializeOwned,
|
||||
{
|
||||
let full_url = format!("{}/{}", api, url);
|
||||
let client = reqwest::Client::new();
|
||||
|
||||
let res = client
|
||||
.get(&full_url)
|
||||
.header("'Content-Type", "application/json")
|
||||
.header("X-Auth-App-Key", key)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
res.json::<T>().await
|
||||
}
|
||||
159
src/rust/uisp/src/site.rs
Normal file
159
src/rust/uisp/src/site.rs
Normal file
@@ -0,0 +1,159 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct Site {
|
||||
pub id: String,
|
||||
pub identification: Option<SiteId>,
|
||||
pub description: Option<Description>,
|
||||
pub qos: Option<Qos>,
|
||||
pub ucrm: Option<Ucrm>,
|
||||
}
|
||||
|
||||
impl Site {
|
||||
pub fn name(&self) -> Option<String> {
|
||||
if let Some(id) = &self.identification {
|
||||
if let Some(name) = &id.name {
|
||||
return Some(name.clone());
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn address(&self) -> Option<String> {
|
||||
if let Some(desc) = &self.description {
|
||||
if let Some(address) = &desc.address {
|
||||
return Some(address.to_string());
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn is_tower(&self) -> bool {
|
||||
if let Some(id) = &self.identification {
|
||||
if let Some(site_type) = &id.site_type {
|
||||
if site_type == "site" {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub fn is_client_site(&self) -> bool {
|
||||
if let Some(id) = &self.identification {
|
||||
if let Some(site_type) = &id.site_type {
|
||||
if site_type == "endpoint" {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub fn is_child_of(&self, parent_id: &str) -> bool {
|
||||
if let Some(id) = &self.identification {
|
||||
if let Some(parent) = &id.parent {
|
||||
if let Some(pid) = &parent.id {
|
||||
if pid == parent_id {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub fn qos(&self, default_download_mbps: u32, default_upload_mbps: u32) -> (u32, u32) {
|
||||
let mut down = default_download_mbps;
|
||||
let mut up = default_upload_mbps;
|
||||
if let Some(qos) = &self.qos {
|
||||
if let Some(d) = &qos.downloadSpeed {
|
||||
down = *d as u32 / 1_000_000;
|
||||
}
|
||||
if let Some(u) = &qos.uploadSpeed {
|
||||
up = *u as u32 / 1_000_000;
|
||||
}
|
||||
}
|
||||
if down == 0 {
|
||||
down = default_download_mbps;
|
||||
}
|
||||
if up == 0 {
|
||||
up = default_upload_mbps;
|
||||
}
|
||||
(down, up)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct SiteParent {
|
||||
pub id: Option<String>,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct SiteId {
|
||||
pub name: Option<String>,
|
||||
#[serde(rename = "type")]
|
||||
pub site_type: Option<String>,
|
||||
pub parent: Option<SiteParent>,
|
||||
pub status: Option<String>,
|
||||
pub suspended: bool,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct Endpoint {
|
||||
pub id: Option<String>,
|
||||
pub name: Option<String>,
|
||||
pub parentId: Option<String>,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct Description {
|
||||
pub address: Option<String>,
|
||||
pub location: Option<Location>,
|
||||
pub height: Option<f64>,
|
||||
pub endpoints: Option<Vec<Endpoint>>,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct Location {
|
||||
pub longitude: f64,
|
||||
pub latitude: f64,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct Qos {
|
||||
pub enabled: bool,
|
||||
pub downloadSpeed: Option<usize>,
|
||||
pub uploadSpeed: Option<usize>,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct Ucrm {
|
||||
pub client: Option<UcrmClient>,
|
||||
pub service: Option<UcrmService>,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct UcrmClient {
|
||||
pub id: String,
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct UcrmService {
|
||||
pub id: String,
|
||||
pub name: String,
|
||||
pub status: i32,
|
||||
pub tariffId: String,
|
||||
pub trafficShapingOverrideEnabled: bool,
|
||||
}
|
||||
@@ -6,7 +6,7 @@ license = "GPL-2.0-only"
|
||||
|
||||
[dependencies]
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
tokio = { version = "1", features = [ "rt", "macros", "net", "io-util", "time" ] }
|
||||
tokio = { version = "1", features = [ "full" ] }
|
||||
anyhow = "1"
|
||||
lqos_bus = { path = "../lqos_bus" }
|
||||
lqos_utils = { path = "../lqos_utils" }
|
||||
|
||||
@@ -5,6 +5,6 @@ edition = "2021"
|
||||
license = "GPL-2.0-only"
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1", features = [ "rt", "macros", "net", "io-util", "time" ] }
|
||||
tokio = { version = "1", features = [ "full" ] }
|
||||
anyhow = "1"
|
||||
lqos_bus = { path = "../lqos_bus" }
|
||||
|
||||
BIN
src/testdata/network_new.png
vendored
Normal file
BIN
src/testdata/network_new.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 62 KiB |
Reference in New Issue
Block a user