mirror of
https://github.com/LibreQoE/LibreQoS.git
synced 2025-02-25 18:55:32 -06:00
add v1.3-alpha
This commit is contained in:
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -13,3 +13,6 @@
|
|||||||
[submodule "v1.2/xdp-cpumap-tc"]
|
[submodule "v1.2/xdp-cpumap-tc"]
|
||||||
path = v1.2/xdp-cpumap-tc
|
path = v1.2/xdp-cpumap-tc
|
||||||
url = https://github.com/xdp-project/xdp-cpumap-tc.git
|
url = https://github.com/xdp-project/xdp-cpumap-tc.git
|
||||||
|
[submodule "v1.3/xdp-cpumap-tc"]
|
||||||
|
path = v1.3/xdp-cpumap-tc
|
||||||
|
url = https://github.com/xdp-project/xdp-cpumap-tc.git
|
||||||
|
|||||||
763
v1.3/LibreQoS.py
Executable file
763
v1.3/LibreQoS.py
Executable file
@@ -0,0 +1,763 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
# v1.2.1
|
||||||
|
|
||||||
|
import csv
|
||||||
|
import io
|
||||||
|
import ipaddress
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import subprocess
|
||||||
|
from subprocess import PIPE, STDOUT
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
import multiprocessing
|
||||||
|
import warnings
|
||||||
|
import psutil
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
import shutil
|
||||||
|
import binpacking
|
||||||
|
|
||||||
|
from ispConfig import fqOrCAKE, upstreamBandwidthCapacityDownloadMbps, upstreamBandwidthCapacityUploadMbps, \
|
||||||
|
interfaceA, interfaceB, enableActualShellCommands, \
|
||||||
|
runShellCommandsAsSudo, generatedPNDownloadMbps, generatedPNUploadMbps, queuesAvailableOverride
|
||||||
|
|
||||||
|
def shell(command):
|
||||||
|
if enableActualShellCommands:
|
||||||
|
if runShellCommandsAsSudo:
|
||||||
|
command = 'sudo ' + command
|
||||||
|
logging.info(command)
|
||||||
|
commands = command.split(' ')
|
||||||
|
proc = subprocess.Popen(commands, stdout=subprocess.PIPE)
|
||||||
|
for line in io.TextIOWrapper(proc.stdout, encoding="utf-8"): # or another encoding
|
||||||
|
logging.info(line)
|
||||||
|
if ("RTNETLINK answers" in line) or ("We have an error talking to the kernel" in line):
|
||||||
|
warnings.warn("Command: '" + command + "' resulted in " + line, stacklevel=2)
|
||||||
|
else:
|
||||||
|
logging.info(command)
|
||||||
|
|
||||||
|
def checkIfFirstRunSinceBoot():
|
||||||
|
if os.path.isfile("lastRun.txt"):
|
||||||
|
with open("lastRun.txt", 'r') as file:
|
||||||
|
lastRun = datetime.strptime(file.read(), "%d-%b-%Y (%H:%M:%S.%f)")
|
||||||
|
systemRunningSince = datetime.fromtimestamp(psutil.boot_time())
|
||||||
|
if systemRunningSince > lastRun:
|
||||||
|
print("First time run since system boot.")
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
print("Not first time run since system boot.")
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
print("First time run since system boot.")
|
||||||
|
return True
|
||||||
|
|
||||||
|
def clearPriorSettings(interfaceA, interfaceB):
|
||||||
|
if enableActualShellCommands:
|
||||||
|
# Clear tc filter
|
||||||
|
shell('tc qdisc delete dev ' + interfaceA + ' root')
|
||||||
|
shell('tc qdisc delete dev ' + interfaceB + ' root')
|
||||||
|
#shell('tc qdisc delete dev ' + interfaceA)
|
||||||
|
#shell('tc qdisc delete dev ' + interfaceB)
|
||||||
|
|
||||||
|
def tearDown(interfaceA, interfaceB):
|
||||||
|
# Full teardown of everything for exiting LibreQoS
|
||||||
|
if enableActualShellCommands:
|
||||||
|
# Clear IP filters and remove xdp program from interfaces
|
||||||
|
result = os.system('./xdp-cpumap-tc/src/xdp_iphash_to_cpu_cmdline --clear')
|
||||||
|
shell('ip link set dev ' + interfaceA + ' xdp off')
|
||||||
|
shell('ip link set dev ' + interfaceB + ' xdp off')
|
||||||
|
clearPriorSettings(interfaceA, interfaceB)
|
||||||
|
|
||||||
|
def findQueuesAvailable():
|
||||||
|
# Find queues and CPU cores available. Use min between those two as queuesAvailable
|
||||||
|
if enableActualShellCommands:
|
||||||
|
if queuesAvailableOverride == 0:
|
||||||
|
queuesAvailable = 0
|
||||||
|
path = '/sys/class/net/' + interfaceA + '/queues/'
|
||||||
|
directory_contents = os.listdir(path)
|
||||||
|
for item in directory_contents:
|
||||||
|
if "tx-" in str(item):
|
||||||
|
queuesAvailable += 1
|
||||||
|
print("NIC queues:\t\t\t" + str(queuesAvailable))
|
||||||
|
else:
|
||||||
|
queuesAvailable = queuesAvailableOverride
|
||||||
|
print("NIC queues (Override):\t\t\t" + str(queuesAvailable))
|
||||||
|
cpuCount = multiprocessing.cpu_count()
|
||||||
|
print("CPU cores:\t\t\t" + str(cpuCount))
|
||||||
|
queuesAvailable = min(queuesAvailable,cpuCount)
|
||||||
|
print("queuesAvailable set to:\t" + str(queuesAvailable))
|
||||||
|
else:
|
||||||
|
print("As enableActualShellCommands is False, CPU core / queue count has been set to 16")
|
||||||
|
logging.info("NIC queues:\t\t\t" + str(16))
|
||||||
|
cpuCount = multiprocessing.cpu_count()
|
||||||
|
logging.info("CPU cores:\t\t\t" + str(16))
|
||||||
|
logging.info("queuesAvailable set to:\t" + str(16))
|
||||||
|
queuesAvailable = 16
|
||||||
|
return queuesAvailable
|
||||||
|
|
||||||
|
def validateNetworkAndDevices():
|
||||||
|
# Verify Network.json is valid json
|
||||||
|
networkValidatedOrNot = True
|
||||||
|
with open('network.json') as file:
|
||||||
|
try:
|
||||||
|
temporaryVariable = json.load(file) # put JSON-data to a variable
|
||||||
|
except json.decoder.JSONDecodeError:
|
||||||
|
warnings.warn("network.json is an invalid JSON file", stacklevel=2) # in case json is invalid
|
||||||
|
networkValidatedOrNot = False
|
||||||
|
if networkValidatedOrNot == True:
|
||||||
|
print("network.json passed validation")
|
||||||
|
# Verify ShapedDevices.csv is valid
|
||||||
|
devicesValidatedOrNot = True # True by default, switches to false if ANY entry in ShapedDevices.csv fails validation
|
||||||
|
rowNum = 2
|
||||||
|
with open('ShapedDevices.csv') as csv_file:
|
||||||
|
csv_reader = csv.reader(csv_file, delimiter=',')
|
||||||
|
#Remove comments if any
|
||||||
|
commentsRemoved = []
|
||||||
|
for row in csv_reader:
|
||||||
|
if not row[0].startswith('#'):
|
||||||
|
commentsRemoved.append(row)
|
||||||
|
#Remove header
|
||||||
|
commentsRemoved.pop(0)
|
||||||
|
seenTheseIPsAlready = []
|
||||||
|
for row in commentsRemoved:
|
||||||
|
circuitID, circuitName, deviceID, deviceName, ParentNode, mac, ipv4_input, ipv6_input, downloadMin, uploadMin, downloadMax, uploadMax, comment = row
|
||||||
|
# Each entry in ShapedDevices.csv can have multiple IPv4s or IPv6s seperated by commas. Split them up and parse each to ensure valid
|
||||||
|
ipv4_subnets_and_hosts = []
|
||||||
|
ipv6_subnets_and_hosts = []
|
||||||
|
if ipv4_input != "":
|
||||||
|
try:
|
||||||
|
ipv4_input = ipv4_input.replace(' ','')
|
||||||
|
if "," in ipv4_input:
|
||||||
|
ipv4_list = ipv4_input.split(',')
|
||||||
|
else:
|
||||||
|
ipv4_list = [ipv4_input]
|
||||||
|
for ipEntry in ipv4_list:
|
||||||
|
if ipEntry in seenTheseIPsAlready:
|
||||||
|
warnings.warn("Provided IPv4 '" + ipEntry + "' in ShapedDevices.csv at row " + str(rowNum) + " is duplicate.", stacklevel=2)
|
||||||
|
devicesValidatedOrNot = False
|
||||||
|
seenTheseIPsAlready.append(ipEntry)
|
||||||
|
else:
|
||||||
|
if (type(ipaddress.ip_network(ipEntry)) is ipaddress.IPv4Network) or (type(ipaddress.ip_address(ipEntry)) is ipaddress.IPv4Address):
|
||||||
|
ipv4_subnets_and_hosts.extend(ipEntry)
|
||||||
|
else:
|
||||||
|
warnings.warn("Provided IPv4 '" + ipEntry + "' in ShapedDevices.csv at row " + str(rowNum) + " is not valid.", stacklevel=2)
|
||||||
|
devicesValidatedOrNot = False
|
||||||
|
seenTheseIPsAlready.append(ipEntry)
|
||||||
|
except:
|
||||||
|
warnings.warn("Provided IPv4 '" + ipv4_input + "' in ShapedDevices.csv at row " + str(rowNum) + " is not valid.", stacklevel=2)
|
||||||
|
devicesValidatedOrNot = False
|
||||||
|
if ipv6_input != "":
|
||||||
|
try:
|
||||||
|
ipv6_input = ipv6_input.replace(' ','')
|
||||||
|
if "," in ipv6_input:
|
||||||
|
ipv6_list = ipv6_input.split(',')
|
||||||
|
else:
|
||||||
|
ipv6_list = [ipv6_input]
|
||||||
|
for ipEntry in ipv6_list:
|
||||||
|
if ipEntry in seenTheseIPsAlready:
|
||||||
|
warnings.warn("Provided IPv6 '" + ipEntry + "' in ShapedDevices.csv at row " + str(rowNum) + " is duplicate.", stacklevel=2)
|
||||||
|
devicesValidatedOrNot = False
|
||||||
|
seenTheseIPsAlready.append(ipEntry)
|
||||||
|
else:
|
||||||
|
if (type(ipaddress.ip_network(ipEntry)) is ipaddress.IPv6Network) or (type(ipaddress.ip_address(ipEntry)) is ipaddress.IPv6Address):
|
||||||
|
ipv6_subnets_and_hosts.extend(ipEntry)
|
||||||
|
else:
|
||||||
|
warnings.warn("Provided IPv6 '" + ipEntry + "' in ShapedDevices.csv at row " + str(rowNum) + " is not valid.", stacklevel=2)
|
||||||
|
devicesValidatedOrNot = False
|
||||||
|
seenTheseIPsAlready.append(ipEntry)
|
||||||
|
except:
|
||||||
|
warnings.warn("Provided IPv6 '" + ipv6_input + "' in ShapedDevices.csv at row " + str(rowNum) + " is not valid.", stacklevel=2)
|
||||||
|
devicesValidatedOrNot = False
|
||||||
|
try:
|
||||||
|
a = int(downloadMin)
|
||||||
|
if a < 1:
|
||||||
|
warnings.warn("Provided downloadMin '" + downloadMin + "' in ShapedDevices.csv at row " + str(rowNum) + " is < 1 Mbps.", stacklevel=2)
|
||||||
|
devicesValidatedOrNot = False
|
||||||
|
except:
|
||||||
|
warnings.warn("Provided downloadMin '" + downloadMin + "' in ShapedDevices.csv at row " + str(rowNum) + " is not a valid integer.", stacklevel=2)
|
||||||
|
devicesValidatedOrNot = False
|
||||||
|
try:
|
||||||
|
a = int(uploadMin)
|
||||||
|
if a < 1:
|
||||||
|
warnings.warn("Provided uploadMin '" + uploadMin + "' in ShapedDevices.csv at row " + str(rowNum) + " is < 1 Mbps.", stacklevel=2)
|
||||||
|
devicesValidatedOrNot = False
|
||||||
|
except:
|
||||||
|
warnings.warn("Provided uploadMin '" + uploadMin + "' in ShapedDevices.csv at row " + str(rowNum) + " is not a valid integer.", stacklevel=2)
|
||||||
|
devicesValidatedOrNot = False
|
||||||
|
try:
|
||||||
|
a = int(downloadMax)
|
||||||
|
if a < 2:
|
||||||
|
warnings.warn("Provided downloadMax '" + downloadMax + "' in ShapedDevices.csv at row " + str(rowNum) + " is < 2 Mbps.", stacklevel=2)
|
||||||
|
devicesValidatedOrNot = False
|
||||||
|
except:
|
||||||
|
warnings.warn("Provided downloadMax '" + downloadMax + "' in ShapedDevices.csv at row " + str(rowNum) + " is not a valid integer.", stacklevel=2)
|
||||||
|
devicesValidatedOrNot = False
|
||||||
|
try:
|
||||||
|
a = int(uploadMax)
|
||||||
|
if a < 2:
|
||||||
|
warnings.warn("Provided uploadMax '" + uploadMax + "' in ShapedDevices.csv at row " + str(rowNum) + " is < 2 Mbps.", stacklevel=2)
|
||||||
|
devicesValidatedOrNot = False
|
||||||
|
except:
|
||||||
|
warnings.warn("Provided uploadMax '" + uploadMax + "' in ShapedDevices.csv at row " + str(rowNum) + " is not a valid integer.", stacklevel=2)
|
||||||
|
devicesValidatedOrNot = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
if int(downloadMin) > int(downloadMax):
|
||||||
|
warnings.warn("Provided downloadMin '" + downloadMin + "' in ShapedDevices.csv at row " + str(rowNum) + " is greater than downloadMax", stacklevel=2)
|
||||||
|
devicesValidatedOrNot = False
|
||||||
|
if int(uploadMin) > int(uploadMax):
|
||||||
|
warnings.warn("Provided uploadMin '" + downloadMin + "' in ShapedDevices.csv at row " + str(rowNum) + " is greater than uploadMax", stacklevel=2)
|
||||||
|
devicesValidatedOrNot = False
|
||||||
|
except:
|
||||||
|
devicesValidatedOrNot = False
|
||||||
|
|
||||||
|
rowNum += 1
|
||||||
|
if devicesValidatedOrNot == True:
|
||||||
|
print("ShapedDevices.csv passed validation")
|
||||||
|
else:
|
||||||
|
print("ShapedDevices.csv failed validation")
|
||||||
|
|
||||||
|
if (devicesValidatedOrNot == True) and (devicesValidatedOrNot == True):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def refreshShapers():
|
||||||
|
|
||||||
|
# Starting
|
||||||
|
print("refreshShapers starting at " + datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
|
||||||
|
|
||||||
|
|
||||||
|
# Warn user if enableActualShellCommands is False, because that would mean no actual commands are executing
|
||||||
|
if enableActualShellCommands == False:
|
||||||
|
warnings.warn("enableActualShellCommands is set to False. None of the commands below will actually be executed. Simulated run.", stacklevel=2)
|
||||||
|
|
||||||
|
|
||||||
|
# Check if first run since boot
|
||||||
|
isThisFirstRunSinceBoot = checkIfFirstRunSinceBoot()
|
||||||
|
|
||||||
|
|
||||||
|
# Automatically account for TCP overhead of plans. For example a 100Mbps plan needs to be set to 109Mbps for the user to ever see that result on a speed test
|
||||||
|
# Does not apply to nodes of any sort, just endpoint devices
|
||||||
|
tcpOverheadFactor = 1.09
|
||||||
|
|
||||||
|
|
||||||
|
# Files
|
||||||
|
shapedDevicesFile = 'ShapedDevices.csv'
|
||||||
|
networkJSONfile = 'network.json'
|
||||||
|
|
||||||
|
|
||||||
|
# Check validation
|
||||||
|
safeToRunRefresh = False
|
||||||
|
if (validateNetworkAndDevices() == True):
|
||||||
|
shutil.copyfile('ShapedDevices.csv', 'lastGoodConfig.csv')
|
||||||
|
shutil.copyfile('network.json', 'lastGoodConfig.json')
|
||||||
|
print("Backed up good config as lastGoodConfig.csv and lastGoodConfig.json")
|
||||||
|
safeToRunRefresh = True
|
||||||
|
else:
|
||||||
|
if (isThisFirstRunSinceBoot == False):
|
||||||
|
warnings.warn("Validation failed. Because this is not the first run since boot (queues already set up) - will now exit.", stacklevel=2)
|
||||||
|
safeToRunRefresh = False
|
||||||
|
else:
|
||||||
|
warnings.warn("Validation failed. However - because this is the first run since boot - will load queues from last good config", stacklevel=2)
|
||||||
|
shapedDevicesFile = 'lastGoodConfig.csv'
|
||||||
|
networkJSONfile = 'lastGoodConfig.json'
|
||||||
|
safeToRunRefresh = True
|
||||||
|
|
||||||
|
if safeToRunRefresh == True:
|
||||||
|
|
||||||
|
# Load Subscriber Circuits & Devices
|
||||||
|
subscriberCircuits = []
|
||||||
|
knownCircuitIDs = []
|
||||||
|
counterForCircuitsWithoutParentNodes = 0
|
||||||
|
dictForCircuitsWithoutParentNodes = {}
|
||||||
|
with open(shapedDevicesFile) as csv_file:
|
||||||
|
csv_reader = csv.reader(csv_file, delimiter=',')
|
||||||
|
# Remove comments if any
|
||||||
|
commentsRemoved = []
|
||||||
|
for row in csv_reader:
|
||||||
|
if not row[0].startswith('#'):
|
||||||
|
commentsRemoved.append(row)
|
||||||
|
# Remove header
|
||||||
|
commentsRemoved.pop(0)
|
||||||
|
for row in commentsRemoved:
|
||||||
|
circuitID, circuitName, deviceID, deviceName, ParentNode, mac, ipv4_input, ipv6_input, downloadMin, uploadMin, downloadMax, uploadMax, comment = row
|
||||||
|
ipv4_subnets_and_hosts = []
|
||||||
|
# Each entry in ShapedDevices.csv can have multiple IPv4s or IPv6s seperated by commas. Split them up and parse each
|
||||||
|
if ipv4_input != "":
|
||||||
|
ipv4_input = ipv4_input.replace(' ','')
|
||||||
|
if "," in ipv4_input:
|
||||||
|
ipv4_list = ipv4_input.split(',')
|
||||||
|
else:
|
||||||
|
ipv4_list = [ipv4_input]
|
||||||
|
for ipEntry in ipv4_list:
|
||||||
|
ipv4_subnets_and_hosts.append(ipEntry)
|
||||||
|
ipv6_subnets_and_hosts = []
|
||||||
|
if ipv6_input != "":
|
||||||
|
ipv6_input = ipv6_input.replace(' ','')
|
||||||
|
if "," in ipv6_input:
|
||||||
|
ipv6_list = ipv6_input.split(',')
|
||||||
|
else:
|
||||||
|
ipv6_list = [ipv6_input]
|
||||||
|
for ipEntry in ipv6_list:
|
||||||
|
ipv6_subnets_and_hosts.append(ipEntry)
|
||||||
|
# If there is something in the circuit ID field
|
||||||
|
if circuitID != "":
|
||||||
|
# Seen circuit before
|
||||||
|
if circuitID in knownCircuitIDs:
|
||||||
|
for circuit in subscriberCircuits:
|
||||||
|
if circuit['circuitID'] == circuitID:
|
||||||
|
if circuit['ParentNode'] != "none":
|
||||||
|
if circuit['ParentNode'] != ParentNode:
|
||||||
|
errorMessageString = "Device " + deviceName + " with deviceID " + deviceID + " had different Parent Node from other devices of circuit ID #" + circuitID
|
||||||
|
raise ValueError(errorMessageString)
|
||||||
|
if ((circuit['downloadMin'] != round(int(downloadMin)*tcpOverheadFactor))
|
||||||
|
or (circuit['uploadMin'] != round(int(uploadMin)*tcpOverheadFactor))
|
||||||
|
or (circuit['downloadMax'] != round(int(downloadMax)*tcpOverheadFactor))
|
||||||
|
or (circuit['uploadMax'] != round(int(uploadMax)*tcpOverheadFactor))):
|
||||||
|
warnings.warn("Device " + deviceName + " with ID " + deviceID + " had different bandwidth parameters than other devices on this circuit. Will instead use the bandwidth parameters defined by the first device added to its circuit.", stacklevel=2)
|
||||||
|
devicesListForCircuit = circuit['devices']
|
||||||
|
thisDevice = {
|
||||||
|
"deviceID": deviceID,
|
||||||
|
"deviceName": deviceName,
|
||||||
|
"mac": mac,
|
||||||
|
"ipv4s": ipv4_subnets_and_hosts,
|
||||||
|
"ipv6s": ipv6_subnets_and_hosts,
|
||||||
|
"comment": comment
|
||||||
|
}
|
||||||
|
devicesListForCircuit.append(thisDevice)
|
||||||
|
circuit['devices'] = devicesListForCircuit
|
||||||
|
# Have not seen circuit before
|
||||||
|
else:
|
||||||
|
knownCircuitIDs.append(circuitID)
|
||||||
|
if ParentNode == "":
|
||||||
|
ParentNode = "none"
|
||||||
|
ParentNode = ParentNode.strip()
|
||||||
|
deviceListForCircuit = []
|
||||||
|
thisDevice = {
|
||||||
|
"deviceID": deviceID,
|
||||||
|
"deviceName": deviceName,
|
||||||
|
"mac": mac,
|
||||||
|
"ipv4s": ipv4_subnets_and_hosts,
|
||||||
|
"ipv6s": ipv6_subnets_and_hosts,
|
||||||
|
"comment": comment
|
||||||
|
}
|
||||||
|
deviceListForCircuit.append(thisDevice)
|
||||||
|
thisCircuit = {
|
||||||
|
"circuitID": circuitID,
|
||||||
|
"circuitName": circuitName,
|
||||||
|
"ParentNode": ParentNode,
|
||||||
|
"devices": deviceListForCircuit,
|
||||||
|
"downloadMin": round(int(downloadMin)*tcpOverheadFactor),
|
||||||
|
"uploadMin": round(int(uploadMin)*tcpOverheadFactor),
|
||||||
|
"downloadMax": round(int(downloadMax)*tcpOverheadFactor),
|
||||||
|
"uploadMax": round(int(uploadMax)*tcpOverheadFactor),
|
||||||
|
"qdisc": '',
|
||||||
|
"comment": comment
|
||||||
|
}
|
||||||
|
if thisCircuit['ParentNode'] == 'none':
|
||||||
|
thisCircuit['idForCircuitsWithoutParentNodes'] = counterForCircuitsWithoutParentNodes
|
||||||
|
dictForCircuitsWithoutParentNodes[counterForCircuitsWithoutParentNodes] = ((round(int(downloadMax)*tcpOverheadFactor))+(round(int(uploadMax)*tcpOverheadFactor)))
|
||||||
|
counterForCircuitsWithoutParentNodes += 1
|
||||||
|
subscriberCircuits.append(thisCircuit)
|
||||||
|
# If there is nothing in the circuit ID field
|
||||||
|
else:
|
||||||
|
# Copy deviceName to circuitName if none defined already
|
||||||
|
if circuitName == "":
|
||||||
|
circuitName = deviceName
|
||||||
|
if ParentNode == "":
|
||||||
|
ParentNode = "none"
|
||||||
|
ParentNode = ParentNode.strip()
|
||||||
|
deviceListForCircuit = []
|
||||||
|
thisDevice = {
|
||||||
|
"deviceID": deviceID,
|
||||||
|
"deviceName": deviceName,
|
||||||
|
"mac": mac,
|
||||||
|
"ipv4s": ipv4_subnets_and_hosts,
|
||||||
|
"ipv6s": ipv6_subnets_and_hosts,
|
||||||
|
}
|
||||||
|
deviceListForCircuit.append(thisDevice)
|
||||||
|
thisCircuit = {
|
||||||
|
"circuitID": circuitID,
|
||||||
|
"circuitName": circuitName,
|
||||||
|
"ParentNode": ParentNode,
|
||||||
|
"devices": deviceListForCircuit,
|
||||||
|
"downloadMin": round(int(downloadMin)*tcpOverheadFactor),
|
||||||
|
"uploadMin": round(int(uploadMin)*tcpOverheadFactor),
|
||||||
|
"downloadMax": round(int(downloadMax)*tcpOverheadFactor),
|
||||||
|
"uploadMax": round(int(uploadMax)*tcpOverheadFactor),
|
||||||
|
"qdisc": '',
|
||||||
|
"comment": comment
|
||||||
|
}
|
||||||
|
if thisCircuit['ParentNode'] == 'none':
|
||||||
|
thisCircuit['idForCircuitsWithoutParentNodes'] = counterForCircuitsWithoutParentNodes
|
||||||
|
dictForCircuitsWithoutParentNodes[counterForCircuitsWithoutParentNodes] = ((round(int(downloadMax)*tcpOverheadFactor))+(round(int(uploadMax)*tcpOverheadFactor)))
|
||||||
|
counterForCircuitsWithoutParentNodes += 1
|
||||||
|
subscriberCircuits.append(thisCircuit)
|
||||||
|
|
||||||
|
|
||||||
|
# Load network heirarchy
|
||||||
|
with open(networkJSONfile, 'r') as j:
|
||||||
|
network = json.loads(j.read())
|
||||||
|
|
||||||
|
|
||||||
|
# Pull rx/tx queues / CPU cores available
|
||||||
|
queuesAvailable = findQueuesAvailable()
|
||||||
|
|
||||||
|
|
||||||
|
# Generate Parent Nodes. Spread ShapedDevices.csv which lack defined ParentNode across these (balance across CPUs)
|
||||||
|
generatedPNs = []
|
||||||
|
for x in range(queuesAvailable):
|
||||||
|
genPNname = "Generated_PN_" + str(x+1)
|
||||||
|
network[genPNname] = {
|
||||||
|
"downloadBandwidthMbps":generatedPNDownloadMbps,
|
||||||
|
"uploadBandwidthMbps":generatedPNUploadMbps
|
||||||
|
}
|
||||||
|
generatedPNs.append(genPNname)
|
||||||
|
bins = binpacking.to_constant_bin_number(dictForCircuitsWithoutParentNodes, queuesAvailable)
|
||||||
|
genPNcounter = 0
|
||||||
|
for binItem in bins:
|
||||||
|
sumItem = 0
|
||||||
|
logging.info(generatedPNs[genPNcounter] + " will contain " + str(len(binItem)) + " circuits")
|
||||||
|
for key in binItem.keys():
|
||||||
|
for circuit in subscriberCircuits:
|
||||||
|
if circuit['ParentNode'] == 'none':
|
||||||
|
if circuit['idForCircuitsWithoutParentNodes'] == key:
|
||||||
|
circuit['ParentNode'] = generatedPNs[genPNcounter]
|
||||||
|
genPNcounter += 1
|
||||||
|
if genPNcounter >= queuesAvailable:
|
||||||
|
genPNcounter = 0
|
||||||
|
|
||||||
|
|
||||||
|
# Find the bandwidth minimums for each node by combining mimimums of devices lower in that node's heirarchy
|
||||||
|
def findBandwidthMins(data, depth):
|
||||||
|
tabs = ' ' * depth
|
||||||
|
minDownload = 0
|
||||||
|
minUpload = 0
|
||||||
|
for elem in data:
|
||||||
|
for circuit in subscriberCircuits:
|
||||||
|
if elem == circuit['ParentNode']:
|
||||||
|
minDownload += circuit['downloadMin']
|
||||||
|
minUpload += circuit['uploadMin']
|
||||||
|
if 'children' in data[elem]:
|
||||||
|
minDL, minUL = findBandwidthMins(data[elem]['children'], depth+1)
|
||||||
|
minDownload += minDL
|
||||||
|
minUpload += minUL
|
||||||
|
data[elem]['downloadBandwidthMbpsMin'] = minDownload
|
||||||
|
data[elem]['uploadBandwidthMbpsMin'] = minUpload
|
||||||
|
return minDownload, minUpload
|
||||||
|
minDownload, minUpload = findBandwidthMins(network, 0)
|
||||||
|
|
||||||
|
|
||||||
|
# Parse network structure and add devices from ShapedDevices.csv
|
||||||
|
linuxTCcommands = []
|
||||||
|
xdpCPUmapCommands = []
|
||||||
|
parentNodes = []
|
||||||
|
def traverseNetwork(data, depth, major, minor, queue, parentClassID, parentMaxDL, parentMaxUL):
|
||||||
|
for node in data:
|
||||||
|
circuitsForThisNetworkNode = []
|
||||||
|
nodeClassID = hex(major) + ':' + hex(minor)
|
||||||
|
data[node]['classid'] = nodeClassID
|
||||||
|
data[node]['parentClassID'] = parentClassID
|
||||||
|
# Cap based on this node's max bandwidth, or parent node's max bandwidth, whichever is lower
|
||||||
|
data[node]['downloadBandwidthMbps'] = min(data[node]['downloadBandwidthMbps'],parentMaxDL)
|
||||||
|
data[node]['uploadBandwidthMbps'] = min(data[node]['uploadBandwidthMbps'],parentMaxUL)
|
||||||
|
# Calculations are done in findBandwidthMins(), determine optimal HTB rates (mins) and ceils (maxs)
|
||||||
|
# For some reason that doesn't always yield the expected result, so it's better to play with ceil more than rate
|
||||||
|
# Here we override the rate as 95% of ceil.
|
||||||
|
data[node]['downloadBandwidthMbpsMin'] = round(data[node]['downloadBandwidthMbps']*.95)
|
||||||
|
data[node]['uploadBandwidthMbpsMin'] = round(data[node]['uploadBandwidthMbps']*.95)
|
||||||
|
data[node]['classMajor'] = hex(major)
|
||||||
|
data[node]['classMinor'] = hex(minor)
|
||||||
|
data[node]['cpuNum'] = hex(queue-1)
|
||||||
|
thisParentNode = {
|
||||||
|
"parentNodeName": node,
|
||||||
|
"classID": nodeClassID,
|
||||||
|
"downloadMax": data[node]['downloadBandwidthMbps'],
|
||||||
|
"uploadMax": data[node]['uploadBandwidthMbps'],
|
||||||
|
}
|
||||||
|
parentNodes.append(thisParentNode)
|
||||||
|
minor += 1
|
||||||
|
for circuit in subscriberCircuits:
|
||||||
|
#If a device from ShapedDevices.csv lists this node as its Parent Node, attach it as a leaf to this node HTB
|
||||||
|
if node == circuit['ParentNode']:
|
||||||
|
if circuit['downloadMax'] > data[node]['downloadBandwidthMbps']:
|
||||||
|
warnings.warn("downloadMax of Circuit ID [" + circuit['circuitID'] + "] exceeded that of its parent node. Reducing to that of its parent node now.", stacklevel=2)
|
||||||
|
if circuit['uploadMax'] > data[node]['uploadBandwidthMbps']:
|
||||||
|
warnings.warn("uploadMax of Circuit ID [" + circuit['circuitID'] + "] exceeded that of its parent node. Reducing to that of its parent node now.", stacklevel=2)
|
||||||
|
parentString = hex(major) + ':'
|
||||||
|
flowIDstring = hex(major) + ':' + hex(minor)
|
||||||
|
circuit['qdisc'] = flowIDstring
|
||||||
|
# Create circuit dictionary to be added to network structure, eventually output as queuingStructure.json
|
||||||
|
maxDownload = min(circuit['downloadMax'],data[node]['downloadBandwidthMbps'])
|
||||||
|
maxUpload = min(circuit['uploadMax'],data[node]['uploadBandwidthMbps'])
|
||||||
|
minDownload = min(circuit['downloadMin'],maxDownload)
|
||||||
|
minUpload = min(circuit['uploadMin'],maxUpload)
|
||||||
|
thisNewCircuitItemForNetwork = {
|
||||||
|
'maxDownload' : maxDownload,
|
||||||
|
'maxUpload' : maxUpload,
|
||||||
|
'minDownload' : minDownload,
|
||||||
|
'minUpload' : minUpload,
|
||||||
|
"circuitID": circuit['circuitID'],
|
||||||
|
"circuitName": circuit['circuitName'],
|
||||||
|
"ParentNode": circuit['ParentNode'],
|
||||||
|
"devices": circuit['devices'],
|
||||||
|
"qdisc": flowIDstring,
|
||||||
|
"classMajor": hex(major),
|
||||||
|
"classMinor": hex(minor),
|
||||||
|
"comment": circuit['comment']
|
||||||
|
}
|
||||||
|
# Generate TC commands to be executed later
|
||||||
|
thisNewCircuitItemForNetwork['devices'] = circuit['devices']
|
||||||
|
circuitsForThisNetworkNode.append(thisNewCircuitItemForNetwork)
|
||||||
|
minor += 1
|
||||||
|
if len(circuitsForThisNetworkNode) > 0:
|
||||||
|
data[node]['circuits'] = circuitsForThisNetworkNode
|
||||||
|
# Recursive call this function for children nodes attached to this node
|
||||||
|
if 'children' in data[node]:
|
||||||
|
# We need to keep tabs on the minor counter, because we can't have repeating class IDs. Here, we bring back the minor counter from the recursive function
|
||||||
|
minor = traverseNetwork(data[node]['children'], depth+1, major, minor+1, queue, nodeClassID, data[node]['downloadBandwidthMbps'], data[node]['uploadBandwidthMbps'])
|
||||||
|
# If top level node, increment to next queue / cpu core
|
||||||
|
if depth == 0:
|
||||||
|
if queue >= queuesAvailable:
|
||||||
|
queue = 1
|
||||||
|
major = queue
|
||||||
|
else:
|
||||||
|
queue += 1
|
||||||
|
major += 1
|
||||||
|
return minor
|
||||||
|
# Here is the actual call to the recursive traverseNetwork() function. finalMinor is not used.
|
||||||
|
finalMinor = traverseNetwork(network, 0, major=1, minor=3, queue=1, parentClassID="1:1", parentMaxDL=upstreamBandwidthCapacityDownloadMbps, parentMaxUL=upstreamBandwidthCapacityUploadMbps)
|
||||||
|
|
||||||
|
|
||||||
|
linuxTCcommands = []
|
||||||
|
xdpCPUmapCommands = []
|
||||||
|
devicesShaped = []
|
||||||
|
# Root HTB Setup
|
||||||
|
# Create MQ qdisc for each CPU core / rx-tx queue (XDP method - requires IPv4)
|
||||||
|
thisInterface = interfaceA
|
||||||
|
logging.info("# MQ Setup for " + thisInterface)
|
||||||
|
command = 'qdisc replace dev ' + thisInterface + ' root handle 7FFF: mq'
|
||||||
|
linuxTCcommands.append(command)
|
||||||
|
for queue in range(queuesAvailable):
|
||||||
|
command = 'qdisc add dev ' + thisInterface + ' parent 7FFF:' + hex(queue+1) + ' handle ' + hex(queue+1) + ': htb default 2'
|
||||||
|
linuxTCcommands.append(command)
|
||||||
|
command = 'class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ': classid ' + hex(queue+1) + ':1 htb rate '+ str(upstreamBandwidthCapacityDownloadMbps) + 'mbit ceil ' + str(upstreamBandwidthCapacityDownloadMbps) + 'mbit'
|
||||||
|
linuxTCcommands.append(command)
|
||||||
|
command = 'qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 ' + fqOrCAKE
|
||||||
|
linuxTCcommands.append(command)
|
||||||
|
# Default class - traffic gets passed through this limiter with lower priority if it enters the top HTB without a specific class.
|
||||||
|
# Technically, that should not even happen. So don't expect much if any traffic in this default class.
|
||||||
|
# Only 1/4 of defaultClassCapacity is guarenteed (to prevent hitting ceiling of upstream), for the most part it serves as an "up to" ceiling.
|
||||||
|
command = 'class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 classid ' + hex(queue+1) + ':2 htb rate ' + str(round((upstreamBandwidthCapacityDownloadMbps-1)/4)) + 'mbit ceil ' + str(upstreamBandwidthCapacityDownloadMbps-1) + 'mbit prio 5'
|
||||||
|
linuxTCcommands.append(command)
|
||||||
|
command = 'qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':2 ' + fqOrCAKE
|
||||||
|
linuxTCcommands.append(command)
|
||||||
|
|
||||||
|
thisInterface = interfaceB
|
||||||
|
logging.info("# MQ Setup for " + thisInterface)
|
||||||
|
command = 'qdisc replace dev ' + thisInterface + ' root handle 7FFF: mq'
|
||||||
|
linuxTCcommands.append(command)
|
||||||
|
for queue in range(queuesAvailable):
|
||||||
|
command = 'qdisc add dev ' + thisInterface + ' parent 7FFF:' + hex(queue+1) + ' handle ' + hex(queue+1) + ': htb default 2'
|
||||||
|
linuxTCcommands.append(command)
|
||||||
|
command = 'class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ': classid ' + hex(queue+1) + ':1 htb rate '+ str(upstreamBandwidthCapacityUploadMbps) + 'mbit ceil ' + str(upstreamBandwidthCapacityUploadMbps) + 'mbit'
|
||||||
|
linuxTCcommands.append(command)
|
||||||
|
command = 'qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 ' + fqOrCAKE
|
||||||
|
linuxTCcommands.append(command)
|
||||||
|
# Default class - traffic gets passed through this limiter with lower priority if it enters the top HTB without a specific class.
|
||||||
|
# Technically, that should not even happen. So don't expect much if any traffic in this default class.
|
||||||
|
# Only 1/4 of defaultClassCapacity is guarenteed (to prevent hitting ceiling of upstream), for the most part it serves as an "up to" ceiling.
|
||||||
|
command = 'class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 classid ' + hex(queue+1) + ':2 htb rate ' + str(round((upstreamBandwidthCapacityUploadMbps-1)/4)) + 'mbit ceil ' + str(upstreamBandwidthCapacityUploadMbps-1) + 'mbit prio 5'
|
||||||
|
linuxTCcommands.append(command)
|
||||||
|
command = 'qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':2 ' + fqOrCAKE
|
||||||
|
linuxTCcommands.append(command)
|
||||||
|
|
||||||
|
|
||||||
|
# Parse network structure. For each tier, generate commands to create corresponding HTB and leaf classes. Prepare commands for execution later
|
||||||
|
# Define lists for hash filters
|
||||||
|
def traverseNetwork(data):
|
||||||
|
for node in data:
|
||||||
|
command = 'class add dev ' + interfaceA + ' parent ' + data[node]['parentClassID'] + ' classid ' + data[node]['classMinor'] + ' htb rate '+ str(data[node]['downloadBandwidthMbpsMin']) + 'mbit ceil '+ str(data[node]['downloadBandwidthMbps']) + 'mbit prio 3' + " # Node: " + node
|
||||||
|
linuxTCcommands.append(command)
|
||||||
|
command = 'class add dev ' + interfaceB + ' parent ' + data[node]['parentClassID'] + ' classid ' + data[node]['classMinor'] + ' htb rate '+ str(data[node]['uploadBandwidthMbpsMin']) + 'mbit ceil '+ str(data[node]['uploadBandwidthMbps']) + 'mbit prio 3'
|
||||||
|
linuxTCcommands.append(command)
|
||||||
|
if 'circuits' in data[node]:
|
||||||
|
for circuit in data[node]['circuits']:
|
||||||
|
# Generate TC commands to be executed later
|
||||||
|
comment = " # CircuitID: " + circuit['circuitID'] + " DeviceIDs: "
|
||||||
|
for device in circuit['devices']:
|
||||||
|
comment = comment + device['deviceID'] + ', '
|
||||||
|
if 'devices' in circuit:
|
||||||
|
if 'comment' in circuit['devices'][0]:
|
||||||
|
comment = comment + '| Comment: ' + circuit['devices'][0]['comment']
|
||||||
|
command = 'class add dev ' + interfaceA + ' parent ' + data[node]['classid'] + ' classid ' + circuit['classMinor'] + ' htb rate '+ str(circuit['minDownload']) + 'mbit ceil '+ str(circuit['maxDownload']) + 'mbit prio 3' + comment
|
||||||
|
linuxTCcommands.append(command)
|
||||||
|
command = 'qdisc add dev ' + interfaceA + ' parent ' + circuit['classMajor'] + ':' + circuit['classMinor'] + ' ' + fqOrCAKE
|
||||||
|
linuxTCcommands.append(command)
|
||||||
|
command = 'class add dev ' + interfaceB + ' parent ' + data[node]['classid'] + ' classid ' + circuit['classMinor'] + ' htb rate '+ str(circuit['minUpload']) + 'mbit ceil '+ str(circuit['maxUpload']) + 'mbit prio 3'
|
||||||
|
linuxTCcommands.append(command)
|
||||||
|
command = 'qdisc add dev ' + interfaceB + ' parent ' + circuit['classMajor'] + ':' + circuit['classMinor'] + ' ' + fqOrCAKE
|
||||||
|
linuxTCcommands.append(command)
|
||||||
|
for device in circuit['devices']:
|
||||||
|
if device['ipv4s']:
|
||||||
|
for ipv4 in device['ipv4s']:
|
||||||
|
if '/' in ipv4:
|
||||||
|
ipv4AddressOnly, prefixOnly = ipv4.split('/')
|
||||||
|
xdpCPUmapCommands.append('./xdp-cpumap-tc/src/xdp_iphash_to_cpu_cmdline --add --ip ' + str(ipv4AddressOnly) + ' --cpu ' + data[node]['cpuNum'] + ' --classid ' + circuit['qdisc'] + ' --prefix ' + prefixOnly)
|
||||||
|
else:
|
||||||
|
xdpCPUmapCommands.append('./xdp-cpumap-tc/src/xdp_iphash_to_cpu_cmdline --add --ip ' + str(ipv4) + ' --cpu ' + data[node]['cpuNum'] + ' --classid ' + circuit['qdisc'])
|
||||||
|
if device['ipv6s']:
|
||||||
|
for ipv6 in device['ipv6s']:
|
||||||
|
if '/' in ipv6:
|
||||||
|
ipv6AddressOnly, prefixOnly = ipv6.split('/')
|
||||||
|
xdpCPUmapCommands.append('./xdp-cpumap-tc/src/xdp_iphash_to_cpu_cmdline --add --ip ' + str(ipv6AddressOnly) + ' --cpu ' + data[node]['cpuNum'] + ' --classid ' + circuit['qdisc'] + ' --prefix ' + prefixOnly)
|
||||||
|
else:
|
||||||
|
xdpCPUmapCommands.append('./xdp-cpumap-tc/src/xdp_iphash_to_cpu_cmdline --add --ip ' + str(ipv6) + ' --cpu ' + data[node]['cpuNum'] + ' --classid ' + circuit['qdisc'])
|
||||||
|
if device['deviceName'] not in devicesShaped:
|
||||||
|
devicesShaped.append(device['deviceName'])
|
||||||
|
# Recursive call this function for children nodes attached to this node
|
||||||
|
if 'children' in data[node]:
|
||||||
|
traverseNetwork(data[node]['children'])
|
||||||
|
# Here is the actual call to the recursive traverseNetwork() function. finalResult is not used.
|
||||||
|
traverseNetwork(network)
|
||||||
|
|
||||||
|
|
||||||
|
# Save queuingStructure
|
||||||
|
with open('queuingStructure.json', 'w') as infile:
|
||||||
|
json.dump(network, infile, indent=4)
|
||||||
|
|
||||||
|
|
||||||
|
# Record start time of actual filter reload
|
||||||
|
reloadStartTime = datetime.now()
|
||||||
|
|
||||||
|
|
||||||
|
# Clear Prior Settings
|
||||||
|
clearPriorSettings(interfaceA, interfaceB)
|
||||||
|
|
||||||
|
|
||||||
|
# Setup XDP and disable XPS regardless of whether it is first run or not (necessary to handle cases where systemctl stop was used)
|
||||||
|
xdpStartTime = datetime.now()
|
||||||
|
if enableActualShellCommands:
|
||||||
|
# Here we use os.system for the command, because otherwise it sometimes gltiches out with Popen in shell()
|
||||||
|
result = os.system('./xdp-cpumap-tc/src/xdp_iphash_to_cpu_cmdline --clear')
|
||||||
|
# Set up XDP-CPUMAP-TC
|
||||||
|
logging.info("# XDP Setup")
|
||||||
|
shell('./xdp-cpumap-tc/bin/xps_setup.sh -d ' + interfaceA + ' --default --disable')
|
||||||
|
shell('./xdp-cpumap-tc/bin/xps_setup.sh -d ' + interfaceB + ' --default --disable')
|
||||||
|
shell('./xdp-cpumap-tc/src/xdp_iphash_to_cpu --dev ' + interfaceA + ' --lan')
|
||||||
|
shell('./xdp-cpumap-tc/src/xdp_iphash_to_cpu --dev ' + interfaceB + ' --wan')
|
||||||
|
shell('./xdp-cpumap-tc/src/tc_classify --dev-egress ' + interfaceA)
|
||||||
|
shell('./xdp-cpumap-tc/src/tc_classify --dev-egress ' + interfaceB)
|
||||||
|
xdpEndTime = datetime.now()
|
||||||
|
|
||||||
|
|
||||||
|
# Execute actual Linux TC commands
|
||||||
|
tcStartTime = datetime.now()
|
||||||
|
print("Executing linux TC class/qdisc commands")
|
||||||
|
with open('linux_tc.txt', 'w') as f:
|
||||||
|
for command in linuxTCcommands:
|
||||||
|
logging.info(command)
|
||||||
|
f.write(f"{command}\n")
|
||||||
|
if logging.DEBUG <= logging.root.level:
|
||||||
|
# Do not --force in debug mode, so we can see any errors
|
||||||
|
shell("/sbin/tc -b linux_tc.txt")
|
||||||
|
else:
|
||||||
|
shell("/sbin/tc -f -b linux_tc.txt")
|
||||||
|
tcEndTime = datetime.now()
|
||||||
|
print("Executed " + str(len(linuxTCcommands)) + " linux TC class/qdisc commands")
|
||||||
|
|
||||||
|
|
||||||
|
# Execute actual XDP-CPUMAP-TC filter commands
|
||||||
|
xdpFilterStartTime = datetime.now()
|
||||||
|
print("Executing XDP-CPUMAP-TC IP filter commands")
|
||||||
|
if enableActualShellCommands:
|
||||||
|
for command in xdpCPUmapCommands:
|
||||||
|
logging.info(command)
|
||||||
|
commands = command.split(' ')
|
||||||
|
proc = subprocess.Popen(commands, stdout=subprocess.DEVNULL)
|
||||||
|
else:
|
||||||
|
for command in xdpCPUmapCommands:
|
||||||
|
logging.info(command)
|
||||||
|
print("Executed " + str(len(xdpCPUmapCommands)) + " XDP-CPUMAP-TC IP filter commands")
|
||||||
|
xdpFilterEndTime = datetime.now()
|
||||||
|
|
||||||
|
|
||||||
|
# Record end time of all reload commands
|
||||||
|
reloadEndTime = datetime.now()
|
||||||
|
|
||||||
|
|
||||||
|
# Recap - warn operator if devices were skipped
|
||||||
|
devicesSkipped = []
|
||||||
|
for circuit in subscriberCircuits:
|
||||||
|
for device in circuit['devices']:
|
||||||
|
if device['deviceName'] not in devicesShaped:
|
||||||
|
devicesSkipped.append((device['deviceName'],device['deviceID']))
|
||||||
|
if len(devicesSkipped) > 0:
|
||||||
|
warnings.warn('Some devices were not shaped. Please check to ensure they have a valid ParentNode listed in ShapedDevices.csv:', stacklevel=2)
|
||||||
|
print("Devices not shaped:")
|
||||||
|
for entry in devicesSkipped:
|
||||||
|
name, idNum = entry
|
||||||
|
print('DeviceID: ' + idNum + '\t DeviceName: ' + name)
|
||||||
|
|
||||||
|
|
||||||
|
# Save for stats
|
||||||
|
with open('statsByCircuit.json', 'w') as f:
|
||||||
|
f.write(json.dumps(subscriberCircuits, indent=4))
|
||||||
|
with open('statsByParentNode.json', 'w') as f:
|
||||||
|
f.write(json.dumps(parentNodes, indent=4))
|
||||||
|
|
||||||
|
|
||||||
|
# Record time this run completed at
|
||||||
|
# filename = os.path.join(_here, 'lastRun.txt')
|
||||||
|
with open("lastRun.txt", 'w') as file:
|
||||||
|
file.write(datetime.now().strftime("%d-%b-%Y (%H:%M:%S.%f)"))
|
||||||
|
|
||||||
|
|
||||||
|
# Report reload time
|
||||||
|
reloadTimeSeconds = ((reloadEndTime - reloadStartTime).seconds) + (((reloadEndTime - reloadStartTime).microseconds) / 1000000)
|
||||||
|
tcTimeSeconds = ((tcEndTime - tcStartTime).seconds) + (((tcEndTime - tcStartTime).microseconds) / 1000000)
|
||||||
|
xdpSetupTimeSeconds = ((xdpEndTime - xdpStartTime).seconds) + (((xdpEndTime - xdpStartTime).microseconds) / 1000000)
|
||||||
|
xdpFilterTimeSeconds = ((xdpFilterEndTime - xdpFilterStartTime).seconds) + (((xdpFilterEndTime - xdpFilterStartTime).microseconds) / 1000000)
|
||||||
|
print("Queue and IP filter reload completed in " + "{:g}".format(round(reloadTimeSeconds,1)) + " seconds")
|
||||||
|
print("\tTC commands: \t" + "{:g}".format(round(tcTimeSeconds,1)) + " seconds")
|
||||||
|
print("\tXDP setup: \t " + "{:g}".format(round(xdpSetupTimeSeconds,1)) + " seconds")
|
||||||
|
print("\tXDP filters: \t " + "{:g}".format(round(xdpFilterTimeSeconds,1)) + " seconds")
|
||||||
|
|
||||||
|
|
||||||
|
# Done
|
||||||
|
print("refreshShapers completed on " + datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument(
|
||||||
|
'-d', '--debug',
|
||||||
|
help="Print lots of debugging statements",
|
||||||
|
action="store_const", dest="loglevel", const=logging.DEBUG,
|
||||||
|
default=logging.WARNING,
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'-v', '--verbose',
|
||||||
|
help="Be verbose",
|
||||||
|
action="store_const", dest="loglevel", const=logging.INFO,
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--validate',
|
||||||
|
help="Just validate network.json and ShapedDevices.csv",
|
||||||
|
action=argparse.BooleanOptionalAction,
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--clearrules',
|
||||||
|
help="Clear ip filters, qdiscs, and xdp setup if any",
|
||||||
|
action=argparse.BooleanOptionalAction,
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
logging.basicConfig(level=args.loglevel)
|
||||||
|
|
||||||
|
if args.validate:
|
||||||
|
status = validateNetworkAndDevices()
|
||||||
|
elif args.clearrules:
|
||||||
|
tearDown(interfaceA, interfaceB)
|
||||||
|
else:
|
||||||
|
# Refresh and/or set up queues
|
||||||
|
refreshShapers()
|
||||||
53
v1.3/README.md
Normal file
53
v1.3/README.md
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
# v1.2 (IPv4 + IPv6) (Stable)
|
||||||
|
|
||||||
|
<img alt="LibreQoS" src="https://raw.githubusercontent.com/rchac/LibreQoS/main/docs/v1.1-alpha-preview.jpg"></a>
|
||||||
|
|
||||||
|
## Installation Guide
|
||||||
|
- 📄 [LibreQoS v1.2 Installation & Usage Guide Physical Server and Ubuntu 22.04](https://github.com/rchac/LibreQoS/wiki/LibreQoS-v1.2-Installation-&-Usage-Guide-Physical-Server-and-Ubuntu-22.04)
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Support for multiple devices per subscriber circuit. This allows for multiple IPv4s to be filtered into the same queue, without necessarily being in the same subnet.
|
||||||
|
|
||||||
|
- Support for multiple IPv4s or IPv6s per device
|
||||||
|
|
||||||
|
- Reduced reload time by 80%. Actual packet loss is <25ms on reload of queues.
|
||||||
|
|
||||||
|
- Command line arguments ```--debug```, ```--verbose```, ```--clearrules``` and ```--validate```.
|
||||||
|
|
||||||
|
- lqTools.py - ```change-circuit-bandwidth```, ```change-circuit-bandwidth-using-ip```, ```show-active-plan-from-ip```, ```tc-statistics-from-ip```
|
||||||
|
|
||||||
|
- Validation of ShapedDevices.csv and network.json during load. If either fails validation, LibreQoS pulls from the last known good configuration (lastGoodConfig.csv and lastGoodConfig.json).
|
||||||
|
|
||||||
|
## ShapedDevices.csv
|
||||||
|
Shaper.csv is now ShapedDevices.csv
|
||||||
|
|
||||||
|
New minimums apply to upload and download parameters:
|
||||||
|
|
||||||
|
* Download minimum must be 1Mbps or more
|
||||||
|
* Upload minimum must be 1Mbps or more
|
||||||
|
* Download maximum must be 2Mbps or more
|
||||||
|
* Upload maximum must be 2Mbps or more
|
||||||
|
|
||||||
|
ShapedDevices.csv now has a field for Circuit ID. If the listed Circuit ID is the same between two or more devices, those devices will all be placed into the same queue. If a Circuit ID is not provided for a device, it gets its own circuit. Circuit Name is optional, but recommended. The client's service loction address might be good to use as the Circuit Name.
|
||||||
|
|
||||||
|
## IPv6 Support
|
||||||
|
Full, XDP accelerated made possible by [@thebracket](https://github.com/thebracket)
|
||||||
|
|
||||||
|
## UISP Integration
|
||||||
|
This integration fully maps out your entire UISP network.
|
||||||
|
Add UISP info under "Optional UISP integration" in ispConfig.py
|
||||||
|
|
||||||
|
To use:
|
||||||
|
1. Delete network.json and, if you have it, integrationUISPbandwidths.csv
|
||||||
|
2. run ```python3 integrationUISP.py```
|
||||||
|
|
||||||
|
It will create a network.json with approximated bandwidths for APs based on UISP's reported capacities, and fixed bandwidth of 1000/1000 for sites.
|
||||||
|
You can modify integrationUISPbandwidths.csv to correct bandwidth rates. It will load integrationUISPbandwidths.csv on each run and use those listed bandwidths to create network.json. It will always overwrite ShapedDevices.csv on each run by pulling devices from UISP.
|
||||||
|
|
||||||
|
### UISP Integration - IPv6 Support
|
||||||
|
This will match IPv4 MAC addresses in the DHCP server leases of your mikrotik to DHCPv6 bindings, and include those IPv6 addresses with their respective devices.
|
||||||
|
|
||||||
|
To enable:
|
||||||
|
* Edit mikrotikDHCPRouterList.csv to list of your mikrotik DHCPv6 servers
|
||||||
|
* Set findIPv6usingMikrotik in ispConfig.py to True
|
||||||
14
v1.3/ShapedDevices.example.csv
Normal file
14
v1.3/ShapedDevices.example.csv
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
#LibreQoS - autogenerated file - START
|
||||||
|
Circuit ID,Circuit Name,Device ID,Device Name,Parent Node,MAC,IPv4,IPv6,Download Min,Upload Min,Download Max,Upload Max,Comment
|
||||||
|
,"968 Circle St., Gurnee, IL 60031",1,Device 1,AP_A,,"100.64.0.1, 100.64.0.14",,25,5,155,20,
|
||||||
|
,"31 Marconi Street, Lake In The Hills, IL 60156",2,Device 2,AP_A,,100.64.0.2,,25,5,105,18,
|
||||||
|
,"255 NW. Newport Ave., Jamestown, NY 14701",3,Device 3,AP_9,,100.64.0.3,,25,5,105,18,
|
||||||
|
,"8493 Campfire Street, Peabody, MA 01960",4,Device 4,AP_9,,100.64.0.4,,25,5,105,18,
|
||||||
|
2794,"6 Littleton Drive, Ringgold, GA 30736",5,Device 5,AP_11,,100.64.0.5,,25,5,105,18,
|
||||||
|
2794,"6 Littleton Drive, Ringgold, GA 30736",6,Device 6,AP_11,,100.64.0.6,,25,5,105,18,
|
||||||
|
,"93 Oklahoma Ave., Parsippany, NJ 07054",7,Device 7,AP_1,,100.64.0.7,,25,5,155,20,
|
||||||
|
,"74 Bishop Ave., Bakersfield, CA 93306",8,Device 8,AP_1,,100.64.0.8,,25,5,105,18,
|
||||||
|
,"9598 Peg Shop Drive, Lutherville Timonium, MD 21093",9,Device 9,AP_7,,100.64.0.9,,25,5,105,18,
|
||||||
|
,"115 Gartner Rd., Gettysburg, PA 17325",10,Device 10,AP_7,,100.64.0.10,,25,5,105,18,
|
||||||
|
,"525 Birchpond St., Romulus, MI 48174",11,Device 11,Site_1,,100.64.0.11,,25,5,105,18,
|
||||||
|
#LibreQoS - autogenerated file - EOF
|
||||||
|
Can't render this file because it has a wrong number of fields in line 2.
|
418
v1.3/graphBandwidth.py
Normal file
418
v1.3/graphBandwidth.py
Normal file
@@ -0,0 +1,418 @@
|
|||||||
|
import subprocess
|
||||||
|
import json
|
||||||
|
import subprocess
|
||||||
|
from datetime import datetime
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from influxdb_client import InfluxDBClient, Point
|
||||||
|
from influxdb_client.client.write_api import SYNCHRONOUS
|
||||||
|
|
||||||
|
from ispConfig import interfaceA, interfaceB, influxDBBucket, influxDBOrg, influxDBtoken, influxDBurl, fqOrCAKE
|
||||||
|
|
||||||
|
|
||||||
|
def getInterfaceStats(interface):
|
||||||
|
command = 'tc -j -s qdisc show dev ' + interface
|
||||||
|
jsonAr = json.loads(subprocess.run(command.split(' '), stdout=subprocess.PIPE).stdout.decode('utf-8'))
|
||||||
|
jsonDict = {}
|
||||||
|
for element in filter(lambda e: 'parent' in e, jsonAr):
|
||||||
|
flowID = ':'.join(map(lambda p: f'0x{p}', element['parent'].split(':')[0:2]))
|
||||||
|
jsonDict[flowID] = element
|
||||||
|
del jsonAr
|
||||||
|
return jsonDict
|
||||||
|
|
||||||
|
|
||||||
|
def chunk_list(l, n):
|
||||||
|
for i in range(0, len(l), n):
|
||||||
|
yield l[i:i + n]
|
||||||
|
|
||||||
|
def getsubscriberCircuitstats(subscriberCircuits, tinsStats):
|
||||||
|
interfaces = [interfaceA, interfaceB]
|
||||||
|
ifaceStats = list(map(getInterfaceStats, interfaces))
|
||||||
|
|
||||||
|
for circuit in subscriberCircuits:
|
||||||
|
if 'stats' not in circuit:
|
||||||
|
circuit['stats'] = {}
|
||||||
|
if 'currentQuery' in circuit['stats']:
|
||||||
|
circuit['stats']['priorQuery'] = circuit['stats']['currentQuery']
|
||||||
|
circuit['stats']['currentQuery'] = {}
|
||||||
|
circuit['stats']['sinceLastQuery'] = {}
|
||||||
|
else:
|
||||||
|
#circuit['stats']['priorQuery'] = {}
|
||||||
|
#circuit['stats']['priorQuery']['time'] = datetime.now().isoformat()
|
||||||
|
circuit['stats']['currentQuery'] = {}
|
||||||
|
circuit['stats']['sinceLastQuery'] = {}
|
||||||
|
|
||||||
|
#for entry in tinsStats:
|
||||||
|
if 'currentQuery' in tinsStats:
|
||||||
|
tinsStats['priorQuery'] = tinsStats['currentQuery']
|
||||||
|
tinsStats['currentQuery'] = {}
|
||||||
|
tinsStats['sinceLastQuery'] = {}
|
||||||
|
else:
|
||||||
|
tinsStats['currentQuery'] = {}
|
||||||
|
tinsStats['sinceLastQuery'] = {}
|
||||||
|
|
||||||
|
tinsStats['currentQuery'] = { 'Bulk': {'Download': {'sent_packets': 0.0, 'drops': 0.0}, 'Upload': {'sent_packets': 0.0, 'drops': 0.0}},
|
||||||
|
'BestEffort': {'Download': {'sent_packets': 0.0, 'drops': 0.0}, 'Upload': {'sent_packets': 0.0, 'drops': 0.0}},
|
||||||
|
'Video': {'Download': {'sent_packets': 0.0, 'drops': 0.0}, 'Upload': {'sent_packets': 0.0, 'drops': 0.0}},
|
||||||
|
'Voice': {'Download': {'sent_packets': 0.0, 'drops': 0.0}, 'Upload': {'sent_packets': 0.0, 'drops': 0.0}},
|
||||||
|
}
|
||||||
|
tinsStats['sinceLastQuery'] = { 'Bulk': {'Download': {'sent_packets': 0.0, 'drops': 0.0}, 'Upload': {'sent_packets': 0.0, 'drops': 0.0}},
|
||||||
|
'BestEffort': {'Download': {'sent_packets': 0.0, 'drops': 0.0}, 'Upload': {'sent_packets': 0.0, 'drops': 0.0}},
|
||||||
|
'Video': {'Download': {'sent_packets': 0.0, 'drops': 0.0}, 'Upload': {'sent_packets': 0.0, 'drops': 0.0}},
|
||||||
|
'Voice': {'Download': {'sent_packets': 0.0, 'drops': 0.0}, 'Upload': {'sent_packets': 0.0, 'drops': 0.0}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for circuit in subscriberCircuits:
|
||||||
|
for (interface, stats, dirSuffix) in zip(interfaces, ifaceStats, ['Download', 'Upload']):
|
||||||
|
|
||||||
|
element = stats[circuit['classid']] if circuit['classid'] in stats else False
|
||||||
|
|
||||||
|
if element:
|
||||||
|
bytesSent = float(element['bytes'])
|
||||||
|
drops = float(element['drops'])
|
||||||
|
packets = float(element['packets'])
|
||||||
|
if (element['drops'] > 0) and (element['packets'] > 0):
|
||||||
|
overloadFactor = float(round(element['drops']/element['packets'],3))
|
||||||
|
else:
|
||||||
|
overloadFactor = 0.0
|
||||||
|
|
||||||
|
if 'cake diffserv4' in fqOrCAKE:
|
||||||
|
tinCounter = 1
|
||||||
|
for tin in element['tins']:
|
||||||
|
sent_packets = float(tin['sent_packets'])
|
||||||
|
ack_drops = float(tin['ack_drops'])
|
||||||
|
ecn_mark = float(tin['ecn_mark'])
|
||||||
|
tinDrops = float(tin['drops'])
|
||||||
|
trueDrops = ecn_mark + tinDrops - ack_drops
|
||||||
|
if tinCounter == 1:
|
||||||
|
tinsStats['currentQuery']['Bulk'][dirSuffix]['sent_packets'] += sent_packets
|
||||||
|
tinsStats['currentQuery']['Bulk'][dirSuffix]['drops'] += trueDrops
|
||||||
|
elif tinCounter == 2:
|
||||||
|
tinsStats['currentQuery']['BestEffort'][dirSuffix]['sent_packets'] += sent_packets
|
||||||
|
tinsStats['currentQuery']['BestEffort'][dirSuffix]['drops'] += trueDrops
|
||||||
|
elif tinCounter == 3:
|
||||||
|
tinsStats['currentQuery']['Video'][dirSuffix]['sent_packets'] += sent_packets
|
||||||
|
tinsStats['currentQuery']['Video'][dirSuffix]['drops'] += trueDrops
|
||||||
|
elif tinCounter == 4:
|
||||||
|
tinsStats['currentQuery']['Voice'][dirSuffix]['sent_packets'] += sent_packets
|
||||||
|
tinsStats['currentQuery']['Voice'][dirSuffix]['drops'] += trueDrops
|
||||||
|
tinCounter += 1
|
||||||
|
|
||||||
|
circuit['stats']['currentQuery']['bytesSent' + dirSuffix] = bytesSent
|
||||||
|
circuit['stats']['currentQuery']['packetDrops' + dirSuffix] = drops
|
||||||
|
circuit['stats']['currentQuery']['packetsSent' + dirSuffix] = packets
|
||||||
|
circuit['stats']['currentQuery']['overloadFactor' + dirSuffix] = overloadFactor
|
||||||
|
|
||||||
|
#if 'cake diffserv4' in fqOrCAKE:
|
||||||
|
# circuit['stats']['currentQuery']['tins'] = theseTins
|
||||||
|
|
||||||
|
circuit['stats']['currentQuery']['time'] = datetime.now().isoformat()
|
||||||
|
|
||||||
|
allPacketsDownload = 0.0
|
||||||
|
allPacketsUpload = 0.0
|
||||||
|
for circuit in subscriberCircuits:
|
||||||
|
circuit['stats']['sinceLastQuery']['bitsDownload'] = circuit['stats']['sinceLastQuery']['bitsUpload'] = 0.0
|
||||||
|
circuit['stats']['sinceLastQuery']['bytesSentDownload'] = circuit['stats']['sinceLastQuery']['bytesSentUpload'] = 0.0
|
||||||
|
circuit['stats']['sinceLastQuery']['packetDropsDownload'] = circuit['stats']['sinceLastQuery']['packetDropsUpload'] = 0.0
|
||||||
|
circuit['stats']['sinceLastQuery']['packetsSentDownload'] = circuit['stats']['sinceLastQuery']['packetsSentUpload'] = 0.0
|
||||||
|
|
||||||
|
try:
|
||||||
|
circuit['stats']['sinceLastQuery']['bytesSentDownload'] = circuit['stats']['currentQuery']['bytesSentDownload'] - circuit['stats']['priorQuery']['bytesSentDownload']
|
||||||
|
circuit['stats']['sinceLastQuery']['bytesSentUpload'] = circuit['stats']['currentQuery']['bytesSentUpload'] - circuit['stats']['priorQuery']['bytesSentUpload']
|
||||||
|
except:
|
||||||
|
circuit['stats']['sinceLastQuery']['bytesSentDownload'] = 0.0
|
||||||
|
circuit['stats']['sinceLastQuery']['bytesSentUpload'] = 0.0
|
||||||
|
try:
|
||||||
|
circuit['stats']['sinceLastQuery']['packetDropsDownload'] = circuit['stats']['currentQuery']['packetDropsDownload'] - circuit['stats']['priorQuery']['packetDropsDownload']
|
||||||
|
circuit['stats']['sinceLastQuery']['packetDropsUpload'] = circuit['stats']['currentQuery']['packetDropsUpload'] - circuit['stats']['priorQuery']['packetDropsUpload']
|
||||||
|
except:
|
||||||
|
circuit['stats']['sinceLastQuery']['packetDropsDownload'] = 0.0
|
||||||
|
circuit['stats']['sinceLastQuery']['packetDropsUpload'] = 0.0
|
||||||
|
try:
|
||||||
|
circuit['stats']['sinceLastQuery']['packetsSentDownload'] = circuit['stats']['currentQuery']['packetsSentDownload'] - circuit['stats']['priorQuery']['packetsSentDownload']
|
||||||
|
circuit['stats']['sinceLastQuery']['packetsSentUpload'] = circuit['stats']['currentQuery']['packetsSentUpload'] - circuit['stats']['priorQuery']['packetsSentUpload']
|
||||||
|
except:
|
||||||
|
circuit['stats']['sinceLastQuery']['packetsSentDownload'] = 0.0
|
||||||
|
circuit['stats']['sinceLastQuery']['packetsSentUpload'] = 0.0
|
||||||
|
|
||||||
|
allPacketsDownload += circuit['stats']['sinceLastQuery']['packetsSentDownload']
|
||||||
|
allPacketsUpload += circuit['stats']['sinceLastQuery']['packetsSentUpload']
|
||||||
|
|
||||||
|
if 'priorQuery' in circuit['stats']:
|
||||||
|
if 'time' in circuit['stats']['priorQuery']:
|
||||||
|
currentQueryTime = datetime.fromisoformat(circuit['stats']['currentQuery']['time'])
|
||||||
|
priorQueryTime = datetime.fromisoformat(circuit['stats']['priorQuery']['time'])
|
||||||
|
deltaSeconds = (currentQueryTime - priorQueryTime).total_seconds()
|
||||||
|
circuit['stats']['sinceLastQuery']['bitsDownload'] = round(
|
||||||
|
((circuit['stats']['sinceLastQuery']['bytesSentDownload'] * 8) / deltaSeconds)) if deltaSeconds > 0 else 0
|
||||||
|
circuit['stats']['sinceLastQuery']['bitsUpload'] = round(
|
||||||
|
((circuit['stats']['sinceLastQuery']['bytesSentUpload'] * 8) / deltaSeconds)) if deltaSeconds > 0 else 0
|
||||||
|
else:
|
||||||
|
circuit['stats']['sinceLastQuery']['bitsDownload'] = (circuit['stats']['sinceLastQuery']['bytesSentDownload'] * 8)
|
||||||
|
circuit['stats']['sinceLastQuery']['bitsUpload'] = (circuit['stats']['sinceLastQuery']['bytesSentUpload'] * 8)
|
||||||
|
|
||||||
|
tinsStats['sinceLastQuery']['Bulk']['Download']['dropPercentage'] = tinsStats['sinceLastQuery']['Bulk']['Upload']['dropPercentage'] = 0.0
|
||||||
|
tinsStats['sinceLastQuery']['BestEffort']['Download']['dropPercentage'] = tinsStats['sinceLastQuery']['BestEffort']['Upload']['dropPercentage'] = 0.0
|
||||||
|
tinsStats['sinceLastQuery']['Video']['Download']['dropPercentage'] = tinsStats['sinceLastQuery']['Video']['Upload']['dropPercentage'] = 0.0
|
||||||
|
tinsStats['sinceLastQuery']['Voice']['Download']['dropPercentage'] = tinsStats['sinceLastQuery']['Voice']['Upload']['dropPercentage'] = 0.0
|
||||||
|
|
||||||
|
tinsStats['sinceLastQuery']['Bulk']['Download']['percentage'] = tinsStats['sinceLastQuery']['Bulk']['Upload']['percentage'] = 0.0
|
||||||
|
tinsStats['sinceLastQuery']['BestEffort']['Download']['percentage'] = tinsStats['sinceLastQuery']['BestEffort']['Upload']['percentage'] = 0.0
|
||||||
|
tinsStats['sinceLastQuery']['Video']['Download']['percentage'] = tinsStats['sinceLastQuery']['Video']['Upload']['percentage'] = 0.0
|
||||||
|
tinsStats['sinceLastQuery']['Voice']['Download']['percentage'] = tinsStats['sinceLastQuery']['Voice']['Upload']['percentage'] = 0.0
|
||||||
|
|
||||||
|
try:
|
||||||
|
tinsStats['sinceLastQuery']['Bulk']['Download']['sent_packets'] = tinsStats['currentQuery']['Bulk']['Download']['sent_packets'] - tinsStats['priorQuery']['Bulk']['Download']['sent_packets']
|
||||||
|
tinsStats['sinceLastQuery']['BestEffort']['Download']['sent_packets'] = tinsStats['currentQuery']['BestEffort']['Download']['sent_packets'] - tinsStats['priorQuery']['BestEffort']['Download']['sent_packets']
|
||||||
|
tinsStats['sinceLastQuery']['Video']['Download']['sent_packets'] = tinsStats['currentQuery']['Video']['Download']['sent_packets'] - tinsStats['priorQuery']['Video']['Download']['sent_packets']
|
||||||
|
tinsStats['sinceLastQuery']['Voice']['Download']['sent_packets'] = tinsStats['currentQuery']['Voice']['Download']['sent_packets'] - tinsStats['priorQuery']['Voice']['Download']['sent_packets']
|
||||||
|
tinsStats['sinceLastQuery']['Bulk']['Upload']['sent_packets'] = tinsStats['currentQuery']['Bulk']['Upload']['sent_packets'] - tinsStats['priorQuery']['Bulk']['Upload']['sent_packets']
|
||||||
|
tinsStats['sinceLastQuery']['BestEffort']['Upload']['sent_packets'] = tinsStats['currentQuery']['BestEffort']['Upload']['sent_packets'] - tinsStats['priorQuery']['BestEffort']['Upload']['sent_packets']
|
||||||
|
tinsStats['sinceLastQuery']['Video']['Upload']['sent_packets'] = tinsStats['currentQuery']['Video']['Upload']['sent_packets'] - tinsStats['priorQuery']['Video']['Upload']['sent_packets']
|
||||||
|
tinsStats['sinceLastQuery']['Voice']['Upload']['sent_packets'] = tinsStats['currentQuery']['Voice']['Upload']['sent_packets'] - tinsStats['priorQuery']['Voice']['Upload']['sent_packets']
|
||||||
|
except:
|
||||||
|
tinsStats['sinceLastQuery']['Bulk']['Download']['sent_packets'] = tinsStats['sinceLastQuery']['BestEffort']['Download']['sent_packets'] = 0.0
|
||||||
|
tinsStats['sinceLastQuery']['Video']['Download']['sent_packets'] = tinsStats['sinceLastQuery']['Voice']['Download']['sent_packets'] = 0.0
|
||||||
|
tinsStats['sinceLastQuery']['Bulk']['Upload']['sent_packets'] = tinsStats['sinceLastQuery']['BestEffort']['Upload']['sent_packets'] = 0.0
|
||||||
|
tinsStats['sinceLastQuery']['Video']['Upload']['sent_packets'] = tinsStats['sinceLastQuery']['Voice']['Upload']['sent_packets'] = 0.0
|
||||||
|
|
||||||
|
try:
|
||||||
|
tinsStats['sinceLastQuery']['Bulk']['Download']['drops'] = tinsStats['currentQuery']['Bulk']['Download']['drops'] - tinsStats['priorQuery']['Bulk']['Download']['drops']
|
||||||
|
tinsStats['sinceLastQuery']['BestEffort']['Download']['drops'] = tinsStats['currentQuery']['BestEffort']['Download']['drops'] - tinsStats['priorQuery']['BestEffort']['Download']['drops']
|
||||||
|
tinsStats['sinceLastQuery']['Video']['Download']['drops'] = tinsStats['currentQuery']['Video']['Download']['drops'] - tinsStats['priorQuery']['Video']['Download']['drops']
|
||||||
|
tinsStats['sinceLastQuery']['Voice']['Download']['drops'] = tinsStats['currentQuery']['Voice']['Download']['drops'] - tinsStats['priorQuery']['Voice']['Download']['drops']
|
||||||
|
tinsStats['sinceLastQuery']['Bulk']['Upload']['drops'] = tinsStats['currentQuery']['Bulk']['Upload']['drops'] - tinsStats['priorQuery']['Bulk']['Upload']['drops']
|
||||||
|
tinsStats['sinceLastQuery']['BestEffort']['Upload']['drops'] = tinsStats['currentQuery']['BestEffort']['Upload']['drops'] - tinsStats['priorQuery']['BestEffort']['Upload']['drops']
|
||||||
|
tinsStats['sinceLastQuery']['Video']['Upload']['drops'] = tinsStats['currentQuery']['Video']['Upload']['drops'] - tinsStats['priorQuery']['Video']['Upload']['drops']
|
||||||
|
tinsStats['sinceLastQuery']['Voice']['Upload']['drops'] = tinsStats['currentQuery']['Voice']['Upload']['drops'] - tinsStats['priorQuery']['Voice']['Upload']['drops']
|
||||||
|
except:
|
||||||
|
tinsStats['sinceLastQuery']['Bulk']['Download']['drops'] = tinsStats['sinceLastQuery']['BestEffort']['Download']['drops'] = 0.0
|
||||||
|
tinsStats['sinceLastQuery']['Video']['Download']['drops'] = tinsStats['sinceLastQuery']['Voice']['Download']['drops'] = 0.0
|
||||||
|
tinsStats['sinceLastQuery']['Bulk']['Upload']['drops'] = tinsStats['sinceLastQuery']['BestEffort']['Upload']['drops'] = 0.0
|
||||||
|
tinsStats['sinceLastQuery']['Video']['Upload']['drops'] = tinsStats['sinceLastQuery']['Voice']['Upload']['drops'] = 0.0
|
||||||
|
|
||||||
|
try:
|
||||||
|
dlPerc = tinsStats['sinceLastQuery']['Bulk']['Download']['drops'] / tinsStats['sinceLastQuery']['Bulk']['Download']['sent_packets']
|
||||||
|
ulPerc = tinsStats['sinceLastQuery']['Bulk']['Upload']['drops'] / tinsStats['sinceLastQuery']['Bulk']['Upload']['sent_packets']
|
||||||
|
tinsStats['sinceLastQuery']['Bulk']['Download']['dropPercentage'] = max(round(dlPerc * 100.0, 3),0.0)
|
||||||
|
tinsStats['sinceLastQuery']['Bulk']['Upload']['dropPercentage'] = max(round(ulPerc * 100.0, 3),0.0)
|
||||||
|
|
||||||
|
dlPerc = tinsStats['sinceLastQuery']['BestEffort']['Download']['drops'] / tinsStats['sinceLastQuery']['BestEffort']['Download']['sent_packets']
|
||||||
|
ulPerc = tinsStats['sinceLastQuery']['BestEffort']['Upload']['drops'] / tinsStats['sinceLastQuery']['BestEffort']['Upload']['sent_packets']
|
||||||
|
tinsStats['sinceLastQuery']['BestEffort']['Download']['dropPercentage'] = max(round(dlPerc * 100.0, 3),0.0)
|
||||||
|
tinsStats['sinceLastQuery']['BestEffort']['Upload']['dropPercentage'] = max(round(ulPerc * 100.0, 3),0.0)
|
||||||
|
|
||||||
|
dlPerc = tinsStats['sinceLastQuery']['Video']['Download']['drops'] / tinsStats['sinceLastQuery']['Video']['Download']['sent_packets']
|
||||||
|
ulPerc = tinsStats['sinceLastQuery']['Video']['Upload']['drops'] / tinsStats['sinceLastQuery']['Video']['Upload']['sent_packets']
|
||||||
|
tinsStats['sinceLastQuery']['Video']['Download']['dropPercentage'] = max(round(dlPerc * 100.0, 3),0.0)
|
||||||
|
tinsStats['sinceLastQuery']['Video']['Upload']['dropPercentage'] = max(round(ulPerc * 100.0, 3),0.0)
|
||||||
|
|
||||||
|
dlPerc = tinsStats['sinceLastQuery']['Voice']['Download']['drops'] / tinsStats['sinceLastQuery']['Voice']['Download']['sent_packets']
|
||||||
|
ulPerc = tinsStats['sinceLastQuery']['Voice']['Upload']['drops'] / tinsStats['sinceLastQuery']['Voice']['Upload']['sent_packets']
|
||||||
|
tinsStats['sinceLastQuery']['Voice']['Download']['dropPercentage'] = max(round(dlPerc * 100.0, 3),0.0)
|
||||||
|
tinsStats['sinceLastQuery']['Voice']['Upload']['dropPercentage'] = max(round(ulPerc * 100.0, 3),0.0)
|
||||||
|
except:
|
||||||
|
tinsStats['sinceLastQuery']['Bulk']['Download']['dropPercentage'] = 0.0
|
||||||
|
tinsStats['sinceLastQuery']['Bulk']['Upload']['dropPercentage'] = 0.0
|
||||||
|
tinsStats['sinceLastQuery']['BestEffort']['Download']['dropPercentage'] = 0.0
|
||||||
|
tinsStats['sinceLastQuery']['BestEffort']['Upload']['dropPercentage'] = 0.0
|
||||||
|
tinsStats['sinceLastQuery']['Video']['Download']['dropPercentage'] = 0.0
|
||||||
|
tinsStats['sinceLastQuery']['Video']['Upload']['dropPercentage'] = 0.0
|
||||||
|
tinsStats['sinceLastQuery']['Voice']['Download']['dropPercentage'] = 0.0
|
||||||
|
tinsStats['sinceLastQuery']['Voice']['Upload']['dropPercentage'] = 0.0
|
||||||
|
|
||||||
|
try:
|
||||||
|
tinsStats['sinceLastQuery']['Bulk']['Download']['percentage'] = min(round((tinsStats['sinceLastQuery']['Bulk']['Download']['sent_packets']/allPacketsUpload)*100.0, 3),100.0)
|
||||||
|
tinsStats['sinceLastQuery']['Bulk']['Upload']['percentage'] = min(round((tinsStats['sinceLastQuery']['Bulk']['Upload']['sent_packets']/allPacketsUpload)*100.0, 3),100.0)
|
||||||
|
tinsStats['sinceLastQuery']['BestEffort']['Download']['percentage'] = min(round((tinsStats['sinceLastQuery']['BestEffort']['Download']['sent_packets']/allPacketsDownload)*100.0, 3),100.0)
|
||||||
|
tinsStats['sinceLastQuery']['BestEffort']['Upload']['percentage'] = min(round((tinsStats['sinceLastQuery']['BestEffort']['Upload']['sent_packets']/allPacketsUpload)*100.0, 3),100.0)
|
||||||
|
tinsStats['sinceLastQuery']['Video']['Download']['percentage'] = min(round((tinsStats['sinceLastQuery']['Video']['Download']['sent_packets']/allPacketsDownload)*100.0, 3),100.0)
|
||||||
|
tinsStats['sinceLastQuery']['Video']['Upload']['percentage'] = min(round((tinsStats['sinceLastQuery']['Video']['Upload']['sent_packets']/allPacketsUpload)*100.0, 3),100.0)
|
||||||
|
tinsStats['sinceLastQuery']['Voice']['Download']['percentage'] = min(round((tinsStats['sinceLastQuery']['Voice']['Download']['sent_packets']/allPacketsDownload)*100.0, 3),100.0)
|
||||||
|
tinsStats['sinceLastQuery']['Voice']['Upload']['percentage'] = min(round((tinsStats['sinceLastQuery']['Voice']['Upload']['sent_packets']/allPacketsUpload)*100.0, 3),100.0)
|
||||||
|
except:
|
||||||
|
tinsStats['sinceLastQuery']['Bulk']['Download']['percentage'] = tinsStats['sinceLastQuery']['Bulk']['Upload']['percentage'] = 0.0
|
||||||
|
tinsStats['sinceLastQuery']['BestEffort']['Download']['percentage'] = tinsStats['sinceLastQuery']['BestEffort']['Upload']['percentage'] = 0.0
|
||||||
|
tinsStats['sinceLastQuery']['Video']['Download']['percentage'] = tinsStats['sinceLastQuery']['Video']['Upload']['percentage'] = 0.0
|
||||||
|
tinsStats['sinceLastQuery']['Voice']['Download']['percentage'] = tinsStats['sinceLastQuery']['Voice']['Upload']['percentage'] = 0.0
|
||||||
|
|
||||||
|
return subscriberCircuits, tinsStats
|
||||||
|
|
||||||
|
|
||||||
|
def getParentNodeStats(parentNodes, subscriberCircuits):
|
||||||
|
for parentNode in parentNodes:
|
||||||
|
thisNodeDropsDownload = 0
|
||||||
|
thisNodeDropsUpload = 0
|
||||||
|
thisNodeDropsTotal = 0
|
||||||
|
thisNodeBitsDownload = 0
|
||||||
|
thisNodeBitsUpload = 0
|
||||||
|
packetsSentDownloadAggregate = 0.0
|
||||||
|
packetsSentUploadAggregate = 0.0
|
||||||
|
packetsSentTotalAggregate = 0.0
|
||||||
|
circuitsMatched = 0
|
||||||
|
thisParentNodeStats = {'sinceLastQuery': {}}
|
||||||
|
for circuit in subscriberCircuits:
|
||||||
|
if circuit['ParentNode'] == parentNode['parentNodeName']:
|
||||||
|
thisNodeBitsDownload += circuit['stats']['sinceLastQuery']['bitsDownload']
|
||||||
|
thisNodeBitsUpload += circuit['stats']['sinceLastQuery']['bitsUpload']
|
||||||
|
#thisNodeDropsDownload += circuit['packetDropsDownloadSinceLastQuery']
|
||||||
|
#thisNodeDropsUpload += circuit['packetDropsUploadSinceLastQuery']
|
||||||
|
thisNodeDropsTotal += (circuit['stats']['sinceLastQuery']['packetDropsDownload'] + circuit['stats']['sinceLastQuery']['packetDropsUpload'])
|
||||||
|
packetsSentDownloadAggregate += circuit['stats']['sinceLastQuery']['packetsSentDownload']
|
||||||
|
packetsSentUploadAggregate += circuit['stats']['sinceLastQuery']['packetsSentUpload']
|
||||||
|
packetsSentTotalAggregate += (circuit['stats']['sinceLastQuery']['packetsSentDownload'] + circuit['stats']['sinceLastQuery']['packetsSentUpload'])
|
||||||
|
circuitsMatched += 1
|
||||||
|
if (packetsSentDownloadAggregate > 0) and (packetsSentUploadAggregate > 0):
|
||||||
|
#overloadFactorDownloadSinceLastQuery = float(round((thisNodeDropsDownload/packetsSentDownloadAggregate)*100.0, 3))
|
||||||
|
#overloadFactorUploadSinceLastQuery = float(round((thisNodeDropsUpload/packetsSentUploadAggregate)*100.0, 3))
|
||||||
|
overloadFactorTotalSinceLastQuery = float(round((thisNodeDropsTotal/packetsSentTotalAggregate)*100.0, 1))
|
||||||
|
else:
|
||||||
|
#overloadFactorDownloadSinceLastQuery = 0.0
|
||||||
|
#overloadFactorUploadSinceLastQuery = 0.0
|
||||||
|
overloadFactorTotalSinceLastQuery = 0.0
|
||||||
|
|
||||||
|
thisParentNodeStats['sinceLastQuery']['bitsDownload'] = thisNodeBitsDownload
|
||||||
|
thisParentNodeStats['sinceLastQuery']['bitsUpload'] = thisNodeBitsUpload
|
||||||
|
thisParentNodeStats['sinceLastQuery']['packetDropsTotal'] = thisNodeDropsTotal
|
||||||
|
thisParentNodeStats['sinceLastQuery']['overloadFactorTotal'] = overloadFactorTotalSinceLastQuery
|
||||||
|
parentNode['stats'] = thisParentNodeStats
|
||||||
|
|
||||||
|
return parentNodes
|
||||||
|
|
||||||
|
|
||||||
|
def getParentNodeDict(data, depth, parentNodeNameDict):
|
||||||
|
if parentNodeNameDict == None:
|
||||||
|
parentNodeNameDict = {}
|
||||||
|
|
||||||
|
for elem in data:
|
||||||
|
if 'children' in data[elem]:
|
||||||
|
for child in data[elem]['children']:
|
||||||
|
parentNodeNameDict[child] = elem
|
||||||
|
tempDict = getParentNodeDict(data[elem]['children'], depth + 1, parentNodeNameDict)
|
||||||
|
parentNodeNameDict = dict(parentNodeNameDict, **tempDict)
|
||||||
|
return parentNodeNameDict
|
||||||
|
|
||||||
|
|
||||||
|
def parentNodeNameDictPull():
|
||||||
|
# Load network heirarchy
|
||||||
|
with open('network.json', 'r') as j:
|
||||||
|
network = json.loads(j.read())
|
||||||
|
parentNodeNameDict = getParentNodeDict(network, 0, None)
|
||||||
|
return parentNodeNameDict
|
||||||
|
|
||||||
|
def refreshBandwidthGraphs():
|
||||||
|
startTime = datetime.now()
|
||||||
|
with open('statsByParentNode.json', 'r') as j:
|
||||||
|
parentNodes = json.loads(j.read())
|
||||||
|
|
||||||
|
with open('statsByCircuit.json', 'r') as j:
|
||||||
|
subscriberCircuits = json.loads(j.read())
|
||||||
|
|
||||||
|
fileLoc = Path("tinsStats.json")
|
||||||
|
if fileLoc.is_file():
|
||||||
|
with open(fileLoc, 'r') as j:
|
||||||
|
tinsStats = json.loads(j.read())
|
||||||
|
else:
|
||||||
|
tinsStats = {}
|
||||||
|
|
||||||
|
fileLoc = Path("longTermStats.json")
|
||||||
|
if fileLoc.is_file():
|
||||||
|
with open(fileLoc, 'r') as j:
|
||||||
|
longTermStats = json.loads(j.read())
|
||||||
|
droppedPacketsAllTime = longTermStats['droppedPacketsTotal']
|
||||||
|
else:
|
||||||
|
longTermStats = {}
|
||||||
|
longTermStats['droppedPacketsTotal'] = 0.0
|
||||||
|
droppedPacketsAllTime = 0.0
|
||||||
|
|
||||||
|
parentNodeNameDict = parentNodeNameDictPull()
|
||||||
|
|
||||||
|
print("Retrieving circuit statistics")
|
||||||
|
subscriberCircuits, tinsStats = getsubscriberCircuitstats(subscriberCircuits, tinsStats)
|
||||||
|
print("Computing parent node statistics")
|
||||||
|
parentNodes = getParentNodeStats(parentNodes, subscriberCircuits)
|
||||||
|
print("Writing data to InfluxDB")
|
||||||
|
client = InfluxDBClient(
|
||||||
|
url=influxDBurl,
|
||||||
|
token=influxDBtoken,
|
||||||
|
org=influxDBOrg
|
||||||
|
)
|
||||||
|
write_api = client.write_api(write_options=SYNCHRONOUS)
|
||||||
|
|
||||||
|
chunkedsubscriberCircuits = list(chunk_list(subscriberCircuits, 200))
|
||||||
|
|
||||||
|
queriesToSendCount = 0
|
||||||
|
for chunk in chunkedsubscriberCircuits:
|
||||||
|
queriesToSend = []
|
||||||
|
for circuit in chunk:
|
||||||
|
bitsDownload = float(circuit['stats']['sinceLastQuery']['bitsDownload'])
|
||||||
|
bitsUpload = float(circuit['stats']['sinceLastQuery']['bitsUpload'])
|
||||||
|
if (bitsDownload > 0) and (bitsUpload > 0):
|
||||||
|
percentUtilizationDownload = round((bitsDownload / round(circuit['maxDownload'] * 1000000))*100.0, 1)
|
||||||
|
percentUtilizationUpload = round((bitsUpload / round(circuit['maxUpload'] * 1000000))*100.0, 1)
|
||||||
|
p = Point('Bandwidth').tag("Circuit", circuit['circuitName']).tag("ParentNode", circuit['ParentNode']).tag("Type", "Circuit").field("Download", bitsDownload).field("Upload", bitsUpload)
|
||||||
|
queriesToSend.append(p)
|
||||||
|
p = Point('Utilization').tag("Circuit", circuit['circuitName']).tag("ParentNode", circuit['ParentNode']).tag("Type", "Circuit").field("Download", percentUtilizationDownload).field("Upload", percentUtilizationUpload)
|
||||||
|
queriesToSend.append(p)
|
||||||
|
|
||||||
|
write_api.write(bucket=influxDBBucket, record=queriesToSend)
|
||||||
|
# print("Added " + str(len(queriesToSend)) + " points to InfluxDB.")
|
||||||
|
queriesToSendCount += len(queriesToSend)
|
||||||
|
|
||||||
|
queriesToSend = []
|
||||||
|
for parentNode in parentNodes:
|
||||||
|
bitsDownload = float(parentNode['stats']['sinceLastQuery']['bitsDownload'])
|
||||||
|
bitsUpload = float(parentNode['stats']['sinceLastQuery']['bitsUpload'])
|
||||||
|
dropsTotal = float(parentNode['stats']['sinceLastQuery']['packetDropsTotal'])
|
||||||
|
overloadFactor = float(parentNode['stats']['sinceLastQuery']['overloadFactorTotal'])
|
||||||
|
droppedPacketsAllTime += dropsTotal
|
||||||
|
if (bitsDownload > 0) and (bitsUpload > 0):
|
||||||
|
percentUtilizationDownload = round((bitsDownload / round(parentNode['maxDownload'] * 1000000))*100.0, 1)
|
||||||
|
percentUtilizationUpload = round((bitsUpload / round(parentNode['maxUpload'] * 1000000))*100.0, 1)
|
||||||
|
p = Point('Bandwidth').tag("Device", parentNode['parentNodeName']).tag("ParentNode", parentNode['parentNodeName']).tag("Type", "Parent Node").field("Download", bitsDownload).field("Upload", bitsUpload)
|
||||||
|
queriesToSend.append(p)
|
||||||
|
p = Point('Utilization').tag("Device", parentNode['parentNodeName']).tag("ParentNode", parentNode['parentNodeName']).tag("Type", "Parent Node").field("Download", percentUtilizationDownload).field("Upload", percentUtilizationUpload)
|
||||||
|
queriesToSend.append(p)
|
||||||
|
p = Point('Overload').tag("Device", parentNode['parentNodeName']).tag("ParentNode", parentNode['parentNodeName']).tag("Type", "Parent Node").field("Overload", overloadFactor)
|
||||||
|
queriesToSend.append(p)
|
||||||
|
|
||||||
|
write_api.write(bucket=influxDBBucket, record=queriesToSend)
|
||||||
|
# print("Added " + str(len(queriesToSend)) + " points to InfluxDB.")
|
||||||
|
queriesToSendCount += len(queriesToSend)
|
||||||
|
|
||||||
|
if 'cake diffserv4' in fqOrCAKE:
|
||||||
|
queriesToSend = []
|
||||||
|
listOfTins = ['Bulk', 'BestEffort', 'Video', 'Voice']
|
||||||
|
for tin in listOfTins:
|
||||||
|
p = Point('Tin Drop Percentage').tag("Type", "Tin").tag("Tin", tin).field("Download", tinsStats['sinceLastQuery'][tin]['Download']['dropPercentage']).field("Upload", tinsStats['sinceLastQuery'][tin]['Upload']['dropPercentage'])
|
||||||
|
queriesToSend.append(p)
|
||||||
|
p = Point('Tins Assigned').tag("Type", "Tin").tag("Tin", tin).field("Download", tinsStats['sinceLastQuery'][tin]['Download']['percentage']).field("Upload", tinsStats['sinceLastQuery'][tin]['Upload']['percentage'])
|
||||||
|
queriesToSend.append(p)
|
||||||
|
|
||||||
|
write_api.write(bucket=influxDBBucket, record=queriesToSend)
|
||||||
|
# print("Added " + str(len(queriesToSend)) + " points to InfluxDB.")
|
||||||
|
queriesToSendCount += len(queriesToSend)
|
||||||
|
|
||||||
|
print("Added " + str(queriesToSendCount) + " points to InfluxDB.")
|
||||||
|
|
||||||
|
client.close()
|
||||||
|
|
||||||
|
with open('statsByParentNode.json', 'w') as f:
|
||||||
|
f.write(json.dumps(parentNodes, indent=4))
|
||||||
|
|
||||||
|
with open('statsByCircuit.json', 'w') as f:
|
||||||
|
f.write(json.dumps(subscriberCircuits, indent=4))
|
||||||
|
|
||||||
|
longTermStats['droppedPacketsTotal'] = droppedPacketsAllTime
|
||||||
|
with open('longTermStats.json', 'w') as f:
|
||||||
|
f.write(json.dumps(longTermStats, indent=4))
|
||||||
|
|
||||||
|
with open('tinsStats.json', 'w') as f:
|
||||||
|
f.write(json.dumps(tinsStats, indent=4))
|
||||||
|
|
||||||
|
endTime = datetime.now()
|
||||||
|
durationSeconds = round((endTime - startTime).total_seconds(), 2)
|
||||||
|
print("Graphs updated within " + str(durationSeconds) + " seconds.")
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
refreshBandwidthGraphs()
|
||||||
137
v1.3/graphLatency.py
Normal file
137
v1.3/graphLatency.py
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
from subprocess import PIPE
|
||||||
|
import io
|
||||||
|
import decimal
|
||||||
|
import json
|
||||||
|
from ispConfig import fqOrCAKE, interfaceA, interfaceB, influxDBBucket, influxDBOrg, influxDBtoken, influxDBurl, ppingLocation
|
||||||
|
from datetime import date, datetime, timedelta
|
||||||
|
import decimal
|
||||||
|
from influxdb_client import InfluxDBClient, Point, Dialect
|
||||||
|
from influxdb_client.client.write_api import SYNCHRONOUS
|
||||||
|
import dateutil.parser
|
||||||
|
|
||||||
|
def getLatencies(subscriberCircuits, secondsToRun):
|
||||||
|
interfaces = [interfaceA, interfaceB]
|
||||||
|
tcpLatency = 0
|
||||||
|
listOfAllDiffs = []
|
||||||
|
maxLatencyRecordable = 200
|
||||||
|
matchableIPs = []
|
||||||
|
for circuit in subscriberCircuits:
|
||||||
|
for device in circuit['devices']:
|
||||||
|
matchableIPs.append(device['ipv4'])
|
||||||
|
|
||||||
|
rttDict = {}
|
||||||
|
jitterDict = {}
|
||||||
|
#for interface in interfaces:
|
||||||
|
command = "./pping -i " + interfaceA + " -s " + str(secondsToRun) + " -m"
|
||||||
|
commands = command.split(' ')
|
||||||
|
wd = ppingLocation
|
||||||
|
tcShowResults = subprocess.run(command, shell=True, cwd=wd,stdout=subprocess.PIPE, stderr=subprocess.DEVNULL).stdout.decode('utf-8').splitlines()
|
||||||
|
for line in tcShowResults:
|
||||||
|
if len(line) > 59:
|
||||||
|
rtt1 = float(line[18:27])*1000
|
||||||
|
rtt2 = float(line[27:36]) *1000
|
||||||
|
toAndFrom = line[38:].split(' ')[3]
|
||||||
|
fromIP = toAndFrom.split('+')[0].split(':')[0]
|
||||||
|
toIP = toAndFrom.split('+')[1].split(':')[0]
|
||||||
|
matchedIP = ''
|
||||||
|
if fromIP in matchableIPs:
|
||||||
|
matchedIP = fromIP
|
||||||
|
elif toIP in matchableIPs:
|
||||||
|
matchedIP = toIP
|
||||||
|
jitter = rtt1 - rtt2
|
||||||
|
#Cap ceil
|
||||||
|
if rtt1 >= maxLatencyRecordable:
|
||||||
|
rtt1 = 200
|
||||||
|
#Lowest observed rtt
|
||||||
|
if matchedIP in rttDict:
|
||||||
|
if rtt1 < rttDict[matchedIP]:
|
||||||
|
rttDict[matchedIP] = rtt1
|
||||||
|
jitterDict[matchedIP] = jitter
|
||||||
|
else:
|
||||||
|
rttDict[matchedIP] = rtt1
|
||||||
|
jitterDict[matchedIP] = jitter
|
||||||
|
for circuit in subscriberCircuits:
|
||||||
|
for device in circuit['devices']:
|
||||||
|
diffsForThisDevice = []
|
||||||
|
if device['ipv4'] in rttDict:
|
||||||
|
device['tcpLatency'] = rttDict[device['ipv4']]
|
||||||
|
else:
|
||||||
|
device['tcpLatency'] = None
|
||||||
|
if device['ipv4'] in jitterDict:
|
||||||
|
device['tcpJitter'] = jitterDict[device['ipv4']]
|
||||||
|
else:
|
||||||
|
device['tcpJitter'] = None
|
||||||
|
return subscriberCircuits
|
||||||
|
|
||||||
|
def getParentNodeStats(parentNodes, subscriberCircuits):
|
||||||
|
for parentNode in parentNodes:
|
||||||
|
acceptableLatencies = []
|
||||||
|
for circuit in subscriberCircuits:
|
||||||
|
for device in circuit['devices']:
|
||||||
|
if device['ParentNode'] == parentNode['parentNodeName']:
|
||||||
|
if device['tcpLatency'] != None:
|
||||||
|
acceptableLatencies.append(device['tcpLatency'])
|
||||||
|
|
||||||
|
if len(acceptableLatencies) > 0:
|
||||||
|
parentNode['tcpLatency'] = sum(acceptableLatencies)/len(acceptableLatencies)
|
||||||
|
else:
|
||||||
|
parentNode['tcpLatency'] = None
|
||||||
|
return parentNodes
|
||||||
|
|
||||||
|
def refreshLatencyGraphs(secondsToRun):
|
||||||
|
startTime = datetime.now()
|
||||||
|
with open('statsByParentNode.json', 'r') as j:
|
||||||
|
parentNodes = json.loads(j.read())
|
||||||
|
|
||||||
|
with open('statsByCircuit.json', 'r') as j:
|
||||||
|
subscriberCircuits = json.loads(j.read())
|
||||||
|
|
||||||
|
print("Retrieving circuit statistics")
|
||||||
|
subscriberCircuits = getLatencies(subscriberCircuits, secondsToRun)
|
||||||
|
|
||||||
|
print("Computing parent node statistics")
|
||||||
|
parentNodes = getParentNodeStats(parentNodes, devices)
|
||||||
|
|
||||||
|
print("Writing data to InfluxDB")
|
||||||
|
bucket = influxDBBucket
|
||||||
|
org = influxDBOrg
|
||||||
|
token = influxDBtoken
|
||||||
|
url = influxDBurl
|
||||||
|
client = InfluxDBClient(
|
||||||
|
url=url,
|
||||||
|
token=token,
|
||||||
|
org=org
|
||||||
|
)
|
||||||
|
write_api = client.write_api(write_options=SYNCHRONOUS)
|
||||||
|
|
||||||
|
queriesToSend = []
|
||||||
|
|
||||||
|
for circuit in subscriberCircuits:
|
||||||
|
for device in circuit['devices']:
|
||||||
|
if device['tcpLatency'] != None:
|
||||||
|
p = Point('Latency').tag("Device", device['deviceName']).tag("ParentNode", device['ParentNode']).tag("Type", "Device").field("TCP Latency", device['tcpLatency'])
|
||||||
|
queriesToSend.append(p)
|
||||||
|
|
||||||
|
for parentNode in parentNodes:
|
||||||
|
if parentNode['tcpLatency'] != None:
|
||||||
|
p = Point('Latency').tag("Device", parentNode['parentNodeName']).tag("ParentNode", parentNode['parentNodeName']).tag("Type", "Parent Node").field("TCP Latency", parentNode['tcpLatency'])
|
||||||
|
queriesToSend.append(p)
|
||||||
|
|
||||||
|
write_api.write(bucket=bucket, record=queriesToSend)
|
||||||
|
print("Added " + str(len(queriesToSend)) + " points to InfluxDB.")
|
||||||
|
client.close()
|
||||||
|
|
||||||
|
#with open('statsByParentNode.json', 'w') as infile:
|
||||||
|
# json.dump(parentNodes, infile)
|
||||||
|
|
||||||
|
#with open('statsByDevice.json', 'w') as infile:
|
||||||
|
# json.dump(devices, infile)
|
||||||
|
|
||||||
|
endTime = datetime.now()
|
||||||
|
durationSeconds = round((endTime - startTime).total_seconds())
|
||||||
|
print("Graphs updated within " + str(durationSeconds) + " seconds.")
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
refreshLatencyGraphs(10)
|
||||||
1633
v1.3/influxDBdashboardTemplate.json
Normal file
1633
v1.3/influxDBdashboardTemplate.json
Normal file
File diff suppressed because it is too large
Load Diff
262
v1.3/integrationUISP.py
Normal file
262
v1.3/integrationUISP.py
Normal file
@@ -0,0 +1,262 @@
|
|||||||
|
import requests
|
||||||
|
import os
|
||||||
|
import csv
|
||||||
|
import ipaddress
|
||||||
|
from ispConfig import UISPbaseURL, uispAuthToken, shapeRouterOrStation, allowedSubnets, ignoreSubnets, excludeSites, findIPv6usingMikrotik, bandwidthOverheadFactor, exceptionCPEs
|
||||||
|
import shutil
|
||||||
|
import json
|
||||||
|
if findIPv6usingMikrotik == True:
|
||||||
|
from mikrotikFindIPv6 import pullMikrotikIPv6
|
||||||
|
|
||||||
|
knownRouterModels = ['ACB-AC', 'ACB-ISP']
|
||||||
|
knownAPmodels = ['LTU-Rocket', 'RP-5AC', 'RP-5AC-Gen2', 'LAP-GPS', 'Wave-AP']
|
||||||
|
|
||||||
|
def isInAllowedSubnets(inputIP):
|
||||||
|
isAllowed = False
|
||||||
|
if '/' in inputIP:
|
||||||
|
inputIP = inputIP.split('/')[0]
|
||||||
|
for subnet in allowedSubnets:
|
||||||
|
if (ipaddress.ip_address(inputIP) in ipaddress.ip_network(subnet)):
|
||||||
|
isAllowed = True
|
||||||
|
return isAllowed
|
||||||
|
|
||||||
|
def createTree(sites,accessPoints,bandwidthDL,bandwidthUL,siteParentDict,siteIDtoName,sitesWithParents,currentNode):
|
||||||
|
currentNodeName = list(currentNode.items())[0][0]
|
||||||
|
childrenList = []
|
||||||
|
for site in sites:
|
||||||
|
try:
|
||||||
|
thisOnesParent = siteIDtoName[site['identification']['parent']['id']]
|
||||||
|
if thisOnesParent == currentNodeName:
|
||||||
|
childrenList.append(site['id'])
|
||||||
|
except:
|
||||||
|
thisOnesParent = None
|
||||||
|
aps = []
|
||||||
|
for ap in accessPoints:
|
||||||
|
thisOnesParent = ap['device']['site']['name']
|
||||||
|
if thisOnesParent == currentNodeName:
|
||||||
|
if ap['device']['model'] in knownAPmodels:
|
||||||
|
aps.append(ap['device']['name'])
|
||||||
|
apDict = {}
|
||||||
|
for ap in aps:
|
||||||
|
maxDL = min(bandwidthDL[ap],bandwidthDL[currentNodeName])
|
||||||
|
maxUL = min(bandwidthUL[ap],bandwidthUL[currentNodeName])
|
||||||
|
apStruct = {
|
||||||
|
ap :
|
||||||
|
{
|
||||||
|
"downloadBandwidthMbps": maxDL,
|
||||||
|
"uploadBandwidthMbps": maxUL,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
apDictNew = apDict | apStruct
|
||||||
|
apDict = apDictNew
|
||||||
|
if bool(apDict):
|
||||||
|
currentNode[currentNodeName]['children'] = apDict
|
||||||
|
counter = 0
|
||||||
|
tempChildren = {}
|
||||||
|
for child in childrenList:
|
||||||
|
name = siteIDtoName[child]
|
||||||
|
maxDL = min(bandwidthDL[name],bandwidthDL[currentNodeName])
|
||||||
|
maxUL = min(bandwidthUL[name],bandwidthUL[currentNodeName])
|
||||||
|
childStruct = {
|
||||||
|
name :
|
||||||
|
{
|
||||||
|
"downloadBandwidthMbps": maxDL,
|
||||||
|
"uploadBandwidthMbps": maxUL,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
childStruct = createTree(sites,accessPoints,bandwidthDL,bandwidthUL,siteParentDict,siteIDtoName,sitesWithParents,childStruct)
|
||||||
|
tempChildren = tempChildren | childStruct
|
||||||
|
counter += 1
|
||||||
|
if tempChildren != {}:
|
||||||
|
if 'children' in currentNode[currentNodeName]:
|
||||||
|
currentNode[currentNodeName]['children'] = currentNode[currentNodeName]['children'] | tempChildren
|
||||||
|
else:
|
||||||
|
currentNode[currentNodeName]['children'] = tempChildren
|
||||||
|
return currentNode
|
||||||
|
|
||||||
|
def createNetworkJSON():
|
||||||
|
if os.path.isfile("network.json"):
|
||||||
|
print("network.json already exists. Leaving in place.")
|
||||||
|
else:
|
||||||
|
print("Generating network.json")
|
||||||
|
bandwidthDL = {}
|
||||||
|
bandwidthUL = {}
|
||||||
|
url = UISPbaseURL + "/nms/api/v2.1/sites?type=site"
|
||||||
|
headers = {'accept':'application/json', 'x-auth-token': uispAuthToken}
|
||||||
|
r = requests.get(url, headers=headers)
|
||||||
|
sites = r.json()
|
||||||
|
url = UISPbaseURL + "/nms/api/v2.1/devices/aps/profiles"
|
||||||
|
headers = {'accept':'application/json', 'x-auth-token': uispAuthToken}
|
||||||
|
r = requests.get(url, headers=headers)
|
||||||
|
apProfiles = r.json()
|
||||||
|
listOfTopLevelParentNodes = []
|
||||||
|
if os.path.isfile("integrationUISPbandwidths.csv"):
|
||||||
|
with open('integrationUISPbandwidths.csv') as csv_file:
|
||||||
|
csv_reader = csv.reader(csv_file, delimiter=',')
|
||||||
|
next(csv_reader)
|
||||||
|
for row in csv_reader:
|
||||||
|
name, download, upload = row
|
||||||
|
download = int(download)
|
||||||
|
upload = int(upload)
|
||||||
|
listOfTopLevelParentNodes.append(name)
|
||||||
|
bandwidthDL[name] = download
|
||||||
|
bandwidthUL[name] = upload
|
||||||
|
for ap in apProfiles:
|
||||||
|
name = ap['device']['name']
|
||||||
|
model = ap['device']['model']
|
||||||
|
apID = ap['device']['id']
|
||||||
|
if model in knownAPmodels:
|
||||||
|
url = UISPbaseURL + "/nms/api/v2.1/devices/airmaxes/" + apID + '?withStations=false'
|
||||||
|
headers = {'accept':'application/json', 'x-auth-token': uispAuthToken}
|
||||||
|
r = requests.get(url, headers=headers)
|
||||||
|
thisAPairmax = r.json()
|
||||||
|
downloadCap = int(round(thisAPairmax['overview']['downlinkCapacity']/1000000))
|
||||||
|
uploadCap = int(round(thisAPairmax['overview']['uplinkCapacity']/1000000))
|
||||||
|
# If operator already included bandwidth definitions for this ParentNode, do not overwrite what they set
|
||||||
|
if name not in listOfTopLevelParentNodes:
|
||||||
|
print("Found " + name)
|
||||||
|
listOfTopLevelParentNodes.append(name)
|
||||||
|
bandwidthDL[name] = downloadCap
|
||||||
|
bandwidthUL[name] = uploadCap
|
||||||
|
for site in sites:
|
||||||
|
name = site['identification']['name']
|
||||||
|
if name not in excludeSites:
|
||||||
|
# If operator already included bandwidth definitions for this ParentNode, do not overwrite what they set
|
||||||
|
if name not in listOfTopLevelParentNodes:
|
||||||
|
print("Found " + name)
|
||||||
|
listOfTopLevelParentNodes.append(name)
|
||||||
|
bandwidthDL[name] = 1000
|
||||||
|
bandwidthUL[name] = 1000
|
||||||
|
with open('integrationUISPbandwidths.csv', 'w') as csvfile:
|
||||||
|
wr = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
|
||||||
|
wr.writerow(['ParentNode', 'Download Mbps', 'Upload Mbps'])
|
||||||
|
for device in listOfTopLevelParentNodes:
|
||||||
|
entry = (device, bandwidthDL[device], bandwidthUL[device])
|
||||||
|
wr.writerow(entry)
|
||||||
|
url = UISPbaseURL + "/nms/api/v2.1/devices?role=ap"
|
||||||
|
headers = {'accept':'application/json', 'x-auth-token': uispAuthToken}
|
||||||
|
r = requests.get(url, headers=headers)
|
||||||
|
accessPoints = r.json()
|
||||||
|
siteIDtoName = {}
|
||||||
|
siteParentDict = {}
|
||||||
|
sitesWithParents = []
|
||||||
|
topLevelSites = []
|
||||||
|
for site in sites:
|
||||||
|
siteIDtoName[site['id']] = site['identification']['name']
|
||||||
|
try:
|
||||||
|
siteParentDict[site['id']] = site['identification']['parent']['id']
|
||||||
|
sitesWithParents.append(site['id'])
|
||||||
|
except:
|
||||||
|
siteParentDict[site['id']] = None
|
||||||
|
if site['identification']['name'] not in excludeSites:
|
||||||
|
topLevelSites.append(site['id'])
|
||||||
|
tLname = siteIDtoName[topLevelSites.pop()]
|
||||||
|
topLevelNode = {
|
||||||
|
tLname :
|
||||||
|
{
|
||||||
|
"downloadBandwidthMbps": bandwidthDL[tLname],
|
||||||
|
"uploadBandwidthMbps": bandwidthUL[tLname],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tree = createTree(sites,apProfiles, bandwidthDL, bandwidthUL, siteParentDict,siteIDtoName,sitesWithParents,topLevelNode)
|
||||||
|
with open('network.json', 'w') as f:
|
||||||
|
json.dump(tree, f, indent=4)
|
||||||
|
|
||||||
|
def createShaper():
|
||||||
|
print("Creating ShapedDevices.csv")
|
||||||
|
devicesToImport = []
|
||||||
|
url = UISPbaseURL + "/nms/api/v2.1/sites?type=site"
|
||||||
|
headers = {'accept':'application/json', 'x-auth-token': uispAuthToken}
|
||||||
|
r = requests.get(url, headers=headers)
|
||||||
|
sites = r.json()
|
||||||
|
siteIDtoName = {}
|
||||||
|
for site in sites:
|
||||||
|
siteIDtoName[site['id']] = site['identification']['name']
|
||||||
|
url = UISPbaseURL + "/nms/api/v2.1/sites?type=client&ucrm=true&ucrmDetails=true"
|
||||||
|
headers = {'accept':'application/json', 'x-auth-token': uispAuthToken}
|
||||||
|
r = requests.get(url, headers=headers)
|
||||||
|
clientSites = r.json()
|
||||||
|
url = UISPbaseURL + "/nms/api/v2.1/devices"
|
||||||
|
headers = {'accept':'application/json', 'x-auth-token': uispAuthToken}
|
||||||
|
r = requests.get(url, headers=headers)
|
||||||
|
allDevices = r.json()
|
||||||
|
ipv4ToIPv6 = {}
|
||||||
|
if findIPv6usingMikrotik:
|
||||||
|
ipv4ToIPv6 = pullMikrotikIPv6()
|
||||||
|
for uispClientSite in clientSites:
|
||||||
|
#if (uispClientSite['identification']['status'] == 'active') and (uispClientSite['identification']['suspended'] == False):
|
||||||
|
if (uispClientSite['identification']['suspended'] == False):
|
||||||
|
foundCPEforThisClientSite = False
|
||||||
|
if (uispClientSite['qos']['downloadSpeed']) and (uispClientSite['qos']['uploadSpeed']):
|
||||||
|
downloadSpeedMbps = int(round(uispClientSite['qos']['downloadSpeed']/1000000))
|
||||||
|
uploadSpeedMbps = int(round(uispClientSite['qos']['uploadSpeed']/1000000))
|
||||||
|
address = uispClientSite['description']['address']
|
||||||
|
uispClientSiteID = uispClientSite['id']
|
||||||
|
|
||||||
|
UCRMclientID = uispClientSite['ucrm']['client']['id']
|
||||||
|
siteName = uispClientSite['identification']['name']
|
||||||
|
AP = 'none'
|
||||||
|
thisSiteDevices = []
|
||||||
|
#Look for station devices, use those to find AP name
|
||||||
|
for device in allDevices:
|
||||||
|
if device['identification']['site'] != None:
|
||||||
|
if device['identification']['site']['id'] == uispClientSite['id']:
|
||||||
|
deviceName = device['identification']['name']
|
||||||
|
deviceRole = device['identification']['role']
|
||||||
|
deviceModel = device['identification']['model']
|
||||||
|
deviceModelName = device['identification']['modelName']
|
||||||
|
if (deviceRole == 'station'):
|
||||||
|
if device['attributes']['apDevice']:
|
||||||
|
AP = device['attributes']['apDevice']['name']
|
||||||
|
#Look for router devices, use those as shaped CPE
|
||||||
|
for device in allDevices:
|
||||||
|
if device['identification']['site'] != None:
|
||||||
|
if device['identification']['site']['id'] == uispClientSite['id']:
|
||||||
|
deviceModel = device['identification']['model']
|
||||||
|
deviceName = device['identification']['name']
|
||||||
|
deviceRole = device['identification']['role']
|
||||||
|
if device['identification']['mac']:
|
||||||
|
deviceMAC = device['identification']['mac'].upper()
|
||||||
|
else:
|
||||||
|
deviceMAC = ''
|
||||||
|
if (deviceRole == 'router') or (deviceModel in knownRouterModels):
|
||||||
|
ipv4 = device['ipAddress']
|
||||||
|
if '/' in ipv4:
|
||||||
|
ipv4 = ipv4.split("/")[0]
|
||||||
|
ipv6 = ''
|
||||||
|
if ipv4 in ipv4ToIPv6.keys():
|
||||||
|
ipv6 = ipv4ToIPv6[ipv4]
|
||||||
|
if isInAllowedSubnets(ipv4):
|
||||||
|
deviceModel = device['identification']['model']
|
||||||
|
deviceModelName = device['identification']['modelName']
|
||||||
|
maxSpeedDown = round(bandwidthOverheadFactor*downloadSpeedMbps)
|
||||||
|
maxSpeedUp = round(bandwidthOverheadFactor*uploadSpeedMbps)
|
||||||
|
minSpeedDown = min(round(maxSpeedDown*.98),maxSpeedDown)
|
||||||
|
minSpeedUp = min(round(maxSpeedUp*.98),maxSpeedUp)
|
||||||
|
#Customers directly connected to Sites
|
||||||
|
if deviceName in exceptionCPEs.keys():
|
||||||
|
AP = exceptionCPEs[deviceName]
|
||||||
|
if AP == 'none':
|
||||||
|
try:
|
||||||
|
AP = siteIDtoName[uispClientSite['identification']['parent']['id']]
|
||||||
|
except:
|
||||||
|
AP = 'none'
|
||||||
|
devicesToImport.append((uispClientSiteID, address, '', deviceName, AP, deviceMAC, ipv4, ipv6, str(minSpeedDown), str(minSpeedUp), str(maxSpeedDown),str(maxSpeedUp),''))
|
||||||
|
foundCPEforThisClientSite = True
|
||||||
|
else:
|
||||||
|
print("Failed to import devices from " + uispClientSite['description']['address'] + ". Missing QoS.")
|
||||||
|
if foundCPEforThisClientSite != True:
|
||||||
|
print("Failed to import devices for " + uispClientSite['description']['address'])
|
||||||
|
|
||||||
|
with open('ShapedDevices.csv', 'w') as csvfile:
|
||||||
|
wr = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
|
||||||
|
wr.writerow(['Circuit ID', 'Circuit Name', 'Device ID', 'Device Name', 'Parent Node', 'MAC', 'IPv4', 'IPv6', 'Download Min', 'Upload Min', 'Download Max', 'Upload Max', 'Comment'])
|
||||||
|
for device in devicesToImport:
|
||||||
|
wr.writerow(device)
|
||||||
|
|
||||||
|
def importFromUISP():
|
||||||
|
createNetworkJSON()
|
||||||
|
createShaper()
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
importFromUISP()
|
||||||
74
v1.3/ispConfig.example.py
Normal file
74
v1.3/ispConfig.example.py
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
# 'fq_codel' or 'cake diffserv4'
|
||||||
|
# 'cake diffserv4' is recommended
|
||||||
|
|
||||||
|
# fqOrCAKE = 'fq_codel'
|
||||||
|
fqOrCAKE = 'cake diffserv4'
|
||||||
|
|
||||||
|
# How many Mbps are available to the edge of this network
|
||||||
|
upstreamBandwidthCapacityDownloadMbps = 1000
|
||||||
|
upstreamBandwidthCapacityUploadMbps = 1000
|
||||||
|
|
||||||
|
# Devices in ShapedDevices.csv without a defined ParentNode will be placed under a generated
|
||||||
|
# parent node, evenly spread out across CPU cores. Here, define the bandwidth limit for each
|
||||||
|
# of those generated parent nodes.
|
||||||
|
generatedPNDownloadMbps = 1000
|
||||||
|
generatedPNUploadMbps = 1000
|
||||||
|
|
||||||
|
# Interface connected to core router
|
||||||
|
interfaceA = 'eth1'
|
||||||
|
|
||||||
|
# Interface connected to edge router
|
||||||
|
interfaceB = 'eth2'
|
||||||
|
|
||||||
|
# Allow shell commands. False causes commands print to console only without being executed.
|
||||||
|
# MUST BE ENABLED FOR PROGRAM TO FUNCTION
|
||||||
|
enableActualShellCommands = True
|
||||||
|
|
||||||
|
# Add 'sudo' before execution of any shell commands. May be required depending on distribution and environment.
|
||||||
|
runShellCommandsAsSudo = False
|
||||||
|
|
||||||
|
# Allows overriding queues / CPU cores used. When set to 0, the max possible queues / CPU cores are utilized. Please leave as 0.
|
||||||
|
queuesAvailableOverride = 0
|
||||||
|
|
||||||
|
# Bandwidth Graphing
|
||||||
|
bandwidthGraphingEnabled = True
|
||||||
|
influxDBurl = "http://localhost:8086"
|
||||||
|
influxDBBucket = "libreqos"
|
||||||
|
influxDBOrg = "Your ISP Name Here"
|
||||||
|
influxDBtoken = ""
|
||||||
|
|
||||||
|
# Latency Graphing
|
||||||
|
latencyGraphingEnabled = False
|
||||||
|
ppingLocation = "pping"
|
||||||
|
|
||||||
|
# NMS/CRM Integration
|
||||||
|
# If a device shows a WAN IP within these subnets, assume they are behind NAT / un-shapable, and ignore them
|
||||||
|
ignoreSubnets = ['192.168.0.0/16']
|
||||||
|
allowedSubnets = ['100.64.0.0/10']
|
||||||
|
# Optional UISP integration
|
||||||
|
automaticImportUISP = False
|
||||||
|
# Everything before /nms/ on your UISP instance
|
||||||
|
uispBaseURL = 'https://examplesite.com'
|
||||||
|
# UISP Auth Token
|
||||||
|
uispAuthToken = ''
|
||||||
|
# UISP | Whether to shape router at customer premises, or instead shape the station radio. When station radio is in
|
||||||
|
# router mode, use 'station'. Otherwise, use 'router'.
|
||||||
|
shapeRouterOrStation = 'router'
|
||||||
|
# List any sites that should not be included, with each site name surrounded by '' and seperated by commas
|
||||||
|
excludeSites = []
|
||||||
|
# If you use IPv6, this can be used to find associated IPv6 prefixes for your clients' IPv4 addresses, and match them to those devices
|
||||||
|
findIPv6usingMikrotik = False
|
||||||
|
# If you want to provide a safe cushion for speed test results to prevent customer complains, you can set this to 1.15 (15% above plan rate).
|
||||||
|
# If not, you can leave as 1.0
|
||||||
|
bandwidthOverheadFactor = 1.0
|
||||||
|
# For edge cases, set the respective ParentNode for these CPEs
|
||||||
|
exceptionCPEs = {}
|
||||||
|
# 'CPE-SomeLocation1': 'AP-SomeLocation1',
|
||||||
|
# 'CPE-SomeLocation2': 'AP-SomeLocation2',
|
||||||
|
#}
|
||||||
|
|
||||||
|
# API Auth
|
||||||
|
apiUsername = "testUser"
|
||||||
|
apiPassword = "changeme8343486806"
|
||||||
|
apiHostIP = "127.0.0.1"
|
||||||
|
apiHostPost = 5000
|
||||||
213
v1.3/lqTools.py
Executable file
213
v1.3/lqTools.py
Executable file
@@ -0,0 +1,213 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
import csv
|
||||||
|
import io
|
||||||
|
import ipaddress
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import subprocess
|
||||||
|
import warnings
|
||||||
|
import argparse
|
||||||
|
from ispConfig import interfaceA, interfaceB, enableActualShellCommands
|
||||||
|
|
||||||
|
def shell(command):
|
||||||
|
if enableActualShellCommands:
|
||||||
|
logging.info(command)
|
||||||
|
commands = command.split(' ')
|
||||||
|
proc = subprocess.Popen(commands, stdout=subprocess.PIPE)
|
||||||
|
for line in io.TextIOWrapper(proc.stdout, encoding="utf-8"): # or another encoding
|
||||||
|
print(line)
|
||||||
|
else:
|
||||||
|
print(command)
|
||||||
|
|
||||||
|
def safeShell(command):
|
||||||
|
safelyRan = True
|
||||||
|
if enableActualShellCommands:
|
||||||
|
commands = command.split(' ')
|
||||||
|
proc = subprocess.Popen(commands, stdout=subprocess.PIPE)
|
||||||
|
for line in io.TextIOWrapper(proc.stdout, encoding="utf-8"): # or another encoding
|
||||||
|
#logging.info(line)
|
||||||
|
print(line)
|
||||||
|
if ("RTNETLINK answers" in line) or ("We have an error talking to the kernel" in line):
|
||||||
|
safelyRan = False
|
||||||
|
else:
|
||||||
|
print(command)
|
||||||
|
safelyRan = True
|
||||||
|
return safelyRan
|
||||||
|
|
||||||
|
def getQdiscForIPaddress(ipAddress):
|
||||||
|
qDiscID = ''
|
||||||
|
foundQdisc = False
|
||||||
|
with open('statsByCircuit.json', 'r') as j:
|
||||||
|
subscriberCircuits = json.loads(j.read())
|
||||||
|
for circuit in subscriberCircuits:
|
||||||
|
for device in circuit['devices']:
|
||||||
|
for ipv4 in device['ipv4s']:
|
||||||
|
if ipv4 == ipAddress:
|
||||||
|
qDiscID = circuit['qdisc']
|
||||||
|
foundQdisc = True
|
||||||
|
for ipv6 in device['ipv6s']:
|
||||||
|
if ipv6 == ipAddress:
|
||||||
|
qDiscID = circuit['qdisc']
|
||||||
|
foundQdisc = True
|
||||||
|
if foundQdisc:
|
||||||
|
return qDiscID
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def printStatsFromIP(ipAddress):
|
||||||
|
qDiscID = getQdiscForIPaddress(ipAddress)
|
||||||
|
if qDiscID != None:
|
||||||
|
interfaces = [interfaceA, interfaceB]
|
||||||
|
for interface in interfaces:
|
||||||
|
command = 'tc -s qdisc show dev ' + interface + ' parent ' + qDiscID
|
||||||
|
commands = command.split(' ')
|
||||||
|
proc = subprocess.Popen(commands, stdout=subprocess.PIPE)
|
||||||
|
for line in io.TextIOWrapper(proc.stdout, encoding="utf-8"): # or another encoding
|
||||||
|
print(line.replace('\n',''))
|
||||||
|
else:
|
||||||
|
print("Invalid IP address provided")
|
||||||
|
|
||||||
|
def printCircuitClassInfo(ipAddress):
|
||||||
|
qDiscID = getQdiscForIPaddress(ipAddress)
|
||||||
|
if qDiscID != None:
|
||||||
|
print("IP: " + ipAddress + " | Class ID: " + qDiscID)
|
||||||
|
print()
|
||||||
|
theClassID = ''
|
||||||
|
interfaces = [interfaceA, interfaceB]
|
||||||
|
downloadMin = ''
|
||||||
|
downloadMax = ''
|
||||||
|
uploadMin = ''
|
||||||
|
uploadMax = ''
|
||||||
|
cburst = ''
|
||||||
|
burst = ''
|
||||||
|
for interface in interfaces:
|
||||||
|
command = 'tc class show dev ' + interface + ' classid ' + qDiscID
|
||||||
|
commands = command.split(' ')
|
||||||
|
proc = subprocess.Popen(commands, stdout=subprocess.PIPE)
|
||||||
|
for line in io.TextIOWrapper(proc.stdout, encoding="utf-8"): # or another encoding
|
||||||
|
if "htb" in line:
|
||||||
|
listOfThings = line.split(" ")
|
||||||
|
if interface == interfaceA:
|
||||||
|
downloadMin = line.split(' rate ')[1].split(' ')[0]
|
||||||
|
downloadMax = line.split(' ceil ')[1].split(' ')[0]
|
||||||
|
burst = line.split(' burst ')[1].split(' ')[0]
|
||||||
|
cburst = line.split(' cburst ')[1].replace('\n','')
|
||||||
|
else:
|
||||||
|
uploadMin = line.split(' rate ')[1].split(' ')[0]
|
||||||
|
uploadMax = line.split(' ceil ')[1].split(' ')[0]
|
||||||
|
print("Download rate/ceil: " + downloadMin + "/" + downloadMax)
|
||||||
|
print("Upload rate/ceil: " + uploadMin + "/" + uploadMax)
|
||||||
|
print("burst/cburst: " + burst + "/" + cburst)
|
||||||
|
else:
|
||||||
|
print("Invalid IP address provided")
|
||||||
|
|
||||||
|
def findClassIDForCircuitByIP(data, inputIP, classID):
|
||||||
|
for node in data:
|
||||||
|
if 'circuits' in data[node]:
|
||||||
|
for circuit in data[node]['circuits']:
|
||||||
|
for device in circuit['devices']:
|
||||||
|
if device['ipv4s']:
|
||||||
|
for ipv4 in device['ipv4s']:
|
||||||
|
if ipv4 == inputIP:
|
||||||
|
classID = circuit['qdisc']
|
||||||
|
if device['ipv6s']:
|
||||||
|
for ipv6 in device['ipv6s']:
|
||||||
|
if inputIP == ipv6:
|
||||||
|
classID = circuit['qdisc']
|
||||||
|
# Recursive call this function for children nodes attached to this node
|
||||||
|
if 'children' in data[node]:
|
||||||
|
classID = findClassIDForCircuitByIP(data[node]['children'], inputIP, classID)
|
||||||
|
return classID
|
||||||
|
|
||||||
|
def changeQueuingStructureCircuitBandwidth(data, classid, minDownload, minUpload, maxDownload, maxUpload):
|
||||||
|
for node in data:
|
||||||
|
if 'circuits' in data[node]:
|
||||||
|
for circuit in data[node]['circuits']:
|
||||||
|
if circuit['qdisc'] == classid:
|
||||||
|
circuit['minDownload'] = minDownload
|
||||||
|
circuit['minUpload'] = minUpload
|
||||||
|
circuit['maxDownload'] = maxDownload
|
||||||
|
circuit['maxUpload'] = maxUpload
|
||||||
|
# Recursive call this function for children nodes attached to this node
|
||||||
|
if 'children' in data[node]:
|
||||||
|
data[node]['children'] = changeQueuingStructureCircuitBandwidth(data[node]['children'], classid, minDownload, minUpload, maxDownload, maxUpload)
|
||||||
|
return data
|
||||||
|
|
||||||
|
def findClassIDForCircuitByID(data, inputID, classID):
|
||||||
|
for node in data:
|
||||||
|
if 'circuits' in data[node]:
|
||||||
|
for circuit in data[node]['circuits']:
|
||||||
|
if circuit['circuitID'] == inputID:
|
||||||
|
classID = circuit['qdisc']
|
||||||
|
# Recursive call this function for children nodes attached to this node
|
||||||
|
if 'children' in data[node]:
|
||||||
|
classID = findClassIDForCircuitByID(data[node]['children'], inputID, classID)
|
||||||
|
return classID
|
||||||
|
|
||||||
|
def changeCircuitBandwidthGivenID(circuitID, minDownload, minUpload, maxDownload, maxUpload):
|
||||||
|
with open('queuingStructure.json') as file:
|
||||||
|
queuingStructure = json.load(file)
|
||||||
|
classID = findClassIDForCircuitByID(queuingStructure, circuitID, None)
|
||||||
|
if classID:
|
||||||
|
didThisCommandRunSafely_1 = safeShell("tc class change dev " + interfaceA + " classid " + classID + " htb rate " + str(minDownload) + "Mbit ceil " + str(maxDownload) + "Mbit")
|
||||||
|
didThisCommandRunSafely_2 = safeShell("tc class change dev " + interfaceB + " classid " + classID + " htb rate " + str(minUpload) + "Mbit ceil " + str(maxUpload) + "Mbit")
|
||||||
|
if (didThisCommandRunSafely_1 == False) or (didThisCommandRunSafely_2 == False):
|
||||||
|
raise ValueError('Execution had errors. Halting now.')
|
||||||
|
queuingStructure = changeQueuingStructureCircuitBandwidth(queuingStructure, classID, minDownload, minUpload, maxDownload, maxUpload)
|
||||||
|
with open('queuingStructure.json', 'w') as infile:
|
||||||
|
json.dump(queuingStructure, infile, indent=4)
|
||||||
|
else:
|
||||||
|
print("Unable to find associated Class ID")
|
||||||
|
|
||||||
|
def changeCircuitBandwidthGivenIP(ipAddress, minDownload, minUpload, maxDownload, maxUpload):
|
||||||
|
with open('queuingStructure.json') as file:
|
||||||
|
queuingStructure = json.load(file)
|
||||||
|
classID = findClassIDForCircuitByIP(queuingStructure, ipAddress, None)
|
||||||
|
if classID:
|
||||||
|
didThisCommandRunSafely_1 = safeShell("tc class change dev " + interfaceA + " classid " + classID + " htb rate " + str(minDownload) + "Mbit ceil " + str(maxDownload) + "Mbit")
|
||||||
|
didThisCommandRunSafely_2 = safeShell("tc class change dev " + interfaceB + " classid " + classID + " htb rate " + str(minUpload) + "Mbit ceil " + str(maxUpload) + "Mbit")
|
||||||
|
if (didThisCommandRunSafely_1 == False) or (didThisCommandRunSafely_2 == False):
|
||||||
|
raise ValueError('Execution had errors. Halting now.')
|
||||||
|
queuingStructure = changeQueuingStructureCircuitBandwidth(queuingStructure, classID, minDownload, minUpload, maxDownload, maxUpload)
|
||||||
|
with open('queuingStructure.json', 'w') as infile:
|
||||||
|
json.dump(queuingStructure, infile, indent=4)
|
||||||
|
else:
|
||||||
|
print("Unable to find associated Class ID")
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
subparsers = parser.add_subparsers(dest='command')
|
||||||
|
|
||||||
|
changeBW = subparsers.add_parser('change-circuit-bandwidth', help='Change bandwidth rates of a given circuit using circuit ID')
|
||||||
|
changeBW.add_argument('min-download', type=int, )
|
||||||
|
changeBW.add_argument('min-upload', type=int,)
|
||||||
|
changeBW.add_argument('max-download', type=int,)
|
||||||
|
changeBW.add_argument('max-upload', type=int,)
|
||||||
|
changeBW.add_argument('circuit-id', type=str,)
|
||||||
|
|
||||||
|
changeBWip = subparsers.add_parser('change-circuit-bandwidth-using-ip', help='Change bandwidth rates of a given circuit using IP')
|
||||||
|
changeBWip.add_argument('min-download', type=int,)
|
||||||
|
changeBWip.add_argument('min-upload', type=int,)
|
||||||
|
changeBWip.add_argument('max-download', type=int,)
|
||||||
|
changeBWip.add_argument('max-upload', type=int,)
|
||||||
|
changeBWip.add_argument('ip-address', type=str,)
|
||||||
|
|
||||||
|
planFromIP = subparsers.add_parser('show-active-plan-from-ip', help="Provide tc class info by IP",)
|
||||||
|
planFromIP.add_argument('ip', type=str,)
|
||||||
|
statsFromIP = subparsers.add_parser('tc-statistics-from-ip', help="Provide tc qdisc stats by IP",)
|
||||||
|
statsFromIP.add_argument('ip', type=str,)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if (args.command == 'change-circuit-bandwidth'):
|
||||||
|
changeCircuitBandwidthGivenID(getattr(args, 'circuit-id'), getattr(args, 'min-download'), getattr(args, 'min-upload'), getattr(args, 'max-download'), getattr(args, 'max-upload'))
|
||||||
|
elif(args.command == 'change-circuit-bandwidth-using-ip'):
|
||||||
|
changeCircuitBandwidthGivenIP(getattr(args, 'ip'), getattr(args, 'min-download'), getattr(args, 'min-upload'), getattr(args, 'max-download'), getattr(args, 'max-upload'))
|
||||||
|
elif (args.command == 'tc-statistics-from-ip'):
|
||||||
|
printStatsFromIP(args.ip)
|
||||||
|
elif (args.command == 'show-active-plan-from-ip'):
|
||||||
|
printCircuitClassInfo(args.ip)
|
||||||
|
else:
|
||||||
|
print("Invalid parameters. Use --help to learn more.")
|
||||||
2
v1.3/mikrotikDHCPRouterList.csv
Normal file
2
v1.3/mikrotikDHCPRouterList.csv
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
Router Name / ID,IP,API Username,API Password, API Port
|
||||||
|
main,100.64.0.1,admin,password,8728
|
||||||
|
52
v1.3/mikrotikFindIPv6.py
Normal file
52
v1.3/mikrotikFindIPv6.py
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
import routeros_api
|
||||||
|
import csv
|
||||||
|
|
||||||
|
def pullMikrotikIPv6():
|
||||||
|
ipv4ToIPv6 = {}
|
||||||
|
routerList = []
|
||||||
|
with open('mikrotikDHCPRouterList.csv') as csv_file:
|
||||||
|
csv_reader = csv.reader(csv_file, delimiter=',')
|
||||||
|
next(csv_reader)
|
||||||
|
for row in csv_reader:
|
||||||
|
RouterName, IP, Username, Password, apiPort = row
|
||||||
|
routerList.append((RouterName, IP, Username, Password, apiPort))
|
||||||
|
for router in routerList:
|
||||||
|
RouterName, IP, inputUsername, inputPassword = router
|
||||||
|
connection = routeros_api.RouterOsApiPool(IP, username=inputUsername, password=inputPassword, port=apiPort, use_ssl=False, ssl_verify=False, ssl_verify_hostname=False, plaintext_login=True)
|
||||||
|
api = connection.get_api()
|
||||||
|
macToIPv4 = {}
|
||||||
|
macToIPv6 = {}
|
||||||
|
clientAddressToIPv6 = {}
|
||||||
|
list_dhcp = api.get_resource('/ip/dhcp-server/lease')
|
||||||
|
entries = list_dhcp.get()
|
||||||
|
for entry in entries:
|
||||||
|
try:
|
||||||
|
macToIPv4[entry['mac-address']] = entry['address']
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
list_dhcp = api.get_resource('/ipv6/dhcp-server/binding')
|
||||||
|
entries = list_dhcp.get()
|
||||||
|
for entry in entries:
|
||||||
|
try:
|
||||||
|
clientAddressToIPv6[entry['client-address']] = entry['address']
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
list_dhcp = api.get_resource('/ipv6/neighbor')
|
||||||
|
entries = list_dhcp.get()
|
||||||
|
for entry in entries:
|
||||||
|
try:
|
||||||
|
realIPv6 = clientAddressToIPv6[entry['address']]
|
||||||
|
macToIPv6[entry['mac-address']] = realIPv6
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
for mac, ipv6 in macToIPv6.items():
|
||||||
|
try:
|
||||||
|
ipv4 = macToIPv4[mac]
|
||||||
|
ipv4ToIPv6[ipv4] = ipv6
|
||||||
|
except:
|
||||||
|
print('Failed to find associated IPv4 for ' + ipv6)
|
||||||
|
return ipv4ToIPv6
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
print(pullMikrotikIPv6())
|
||||||
75
v1.3/network.example.json
Normal file
75
v1.3/network.example.json
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
{
|
||||||
|
"Site_1":
|
||||||
|
{
|
||||||
|
"downloadBandwidthMbps":1000,
|
||||||
|
"uploadBandwidthMbps":1000,
|
||||||
|
"children":
|
||||||
|
{
|
||||||
|
"AP_A":
|
||||||
|
{
|
||||||
|
"downloadBandwidthMbps":500,
|
||||||
|
"uploadBandwidthMbps":500
|
||||||
|
},
|
||||||
|
"Site_3":
|
||||||
|
{
|
||||||
|
"downloadBandwidthMbps":500,
|
||||||
|
"uploadBandwidthMbps":500,
|
||||||
|
"children":
|
||||||
|
{
|
||||||
|
"PoP_5":
|
||||||
|
{
|
||||||
|
"downloadBandwidthMbps":200,
|
||||||
|
"uploadBandwidthMbps":200,
|
||||||
|
"children":
|
||||||
|
{
|
||||||
|
"AP_9":
|
||||||
|
{
|
||||||
|
"downloadBandwidthMbps":120,
|
||||||
|
"uploadBandwidthMbps":120
|
||||||
|
},
|
||||||
|
"PoP_6":
|
||||||
|
{
|
||||||
|
"downloadBandwidthMbps":60,
|
||||||
|
"uploadBandwidthMbps":60,
|
||||||
|
"children":
|
||||||
|
{
|
||||||
|
"AP_11":
|
||||||
|
{
|
||||||
|
"downloadBandwidthMbps":30,
|
||||||
|
"uploadBandwidthMbps":30
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Site_2":
|
||||||
|
{
|
||||||
|
"downloadBandwidthMbps":500,
|
||||||
|
"uploadBandwidthMbps":500,
|
||||||
|
"children":
|
||||||
|
{
|
||||||
|
"PoP_1":
|
||||||
|
{
|
||||||
|
"downloadBandwidthMbps":200,
|
||||||
|
"uploadBandwidthMbps":200,
|
||||||
|
"children":
|
||||||
|
{
|
||||||
|
"AP_7":
|
||||||
|
{
|
||||||
|
"downloadBandwidthMbps":100,
|
||||||
|
"uploadBandwidthMbps":100
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"AP_1":
|
||||||
|
{
|
||||||
|
"downloadBandwidthMbps":150,
|
||||||
|
"uploadBandwidthMbps":150
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
43
v1.3/scheduler.py
Normal file
43
v1.3/scheduler.py
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
import time
|
||||||
|
import schedule
|
||||||
|
from LibreQoS import refreshShapers, refreshShapersUpdateOnly
|
||||||
|
from graphBandwidth import refreshBandwidthGraphs
|
||||||
|
from graphLatency import refreshLatencyGraphs
|
||||||
|
from ispConfig import bandwidthGraphingEnabled, latencyGraphingEnabled, automaticImportUISP
|
||||||
|
if automaticImportUISP:
|
||||||
|
from integrationUISP import importFromUISP
|
||||||
|
|
||||||
|
def importAndShapeFullReload():
|
||||||
|
if automaticImportUISP:
|
||||||
|
try:
|
||||||
|
importFromUISP()
|
||||||
|
except:
|
||||||
|
print("Failed to import from UISP")
|
||||||
|
refreshShapers()
|
||||||
|
|
||||||
|
def importAndShapePartialReload():
|
||||||
|
if automaticImportUISP:
|
||||||
|
try:
|
||||||
|
importFromUISP()
|
||||||
|
except:
|
||||||
|
print("Failed to import from UISP")
|
||||||
|
refreshShapersUpdateOnly()
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
importAndShapeFullReload()
|
||||||
|
schedule.every().day.at("04:00").do(importAndShapeFullReload)
|
||||||
|
schedule.every(30).minutes.do(importAndShapePartialReload)
|
||||||
|
while True:
|
||||||
|
schedule.run_pending()
|
||||||
|
if bandwidthGraphingEnabled:
|
||||||
|
try:
|
||||||
|
refreshBandwidthGraphs()
|
||||||
|
except:
|
||||||
|
print("Failed to update bandwidth graphs")
|
||||||
|
if latencyGraphingEnabled:
|
||||||
|
try:
|
||||||
|
refreshLatencyGraphs(10)
|
||||||
|
except:
|
||||||
|
print("Failed to update latency graphs")
|
||||||
|
else:
|
||||||
|
time.sleep(10)
|
||||||
1
v1.3/xdp-cpumap-tc
Submodule
1
v1.3/xdp-cpumap-tc
Submodule
Submodule v1.3/xdp-cpumap-tc added at 12ea5973ba
Reference in New Issue
Block a user