add v1.2 alpha content

This commit is contained in:
lemnonheads 2022-09-05 07:54:46 -06:00
parent a6aa909000
commit d7c8ff4530
9 changed files with 1928 additions and 1 deletions

299
v1.2/LibreQoS.py Normal file
View File

@ -0,0 +1,299 @@
# v1.2 alpha
import csv
import io
import ipaddress
import json
import os
import subprocess
from datetime import datetime
import multiprocessing
from ispConfig import fqOrCAKE, upstreamBandwidthCapacityDownloadMbps, upstreamBandwidthCapacityUploadMbps, \
defaultClassCapacityDownloadMbps, defaultClassCapacityUploadMbps, interfaceA, interfaceB, enableActualShellCommands, \
runShellCommandsAsSudo
def shell(command):
if enableActualShellCommands:
if runShellCommandsAsSudo:
command = 'sudo ' + command
commands = command.split(' ')
print(command)
proc = subprocess.Popen(commands, stdout=subprocess.PIPE)
for line in io.TextIOWrapper(proc.stdout, encoding="utf-8"): # or another encoding
print(line)
else:
print(command)
def clearPriorSettings(interfaceA, interfaceB):
if enableActualShellCommands:
shell('tc filter delete dev ' + interfaceA)
shell('tc filter delete dev ' + interfaceA + ' root')
shell('tc qdisc delete dev ' + interfaceA + ' root')
shell('tc qdisc delete dev ' + interfaceA)
shell('tc filter delete dev ' + interfaceB)
shell('tc filter delete dev ' + interfaceB + ' root')
shell('tc qdisc delete dev ' + interfaceB + ' root')
shell('tc qdisc delete dev ' + interfaceB)
def refreshShapers():
tcpOverheadFactor = 1.09
# Load Subscriber Circuits & Devices
subscriberCircuits = []
knownCircuitIDs = []
with open('ShapedDevices.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
next(csv_reader)
for row in csv_reader:
circuitID, circuitName, deviceID, deviceName, ParentNode, mac, ipv4, ipv6, downloadMin, uploadMin, downloadMax, uploadMax = row
if circuitID != "":
if circuitID in knownCircuitIDs:
for circuit in subscriberCircuits:
if circuit['circuitID'] == circuitID:
if circuit['ParentNode'] != ParentNode:
errorMessageString = "Device " + deviceName + " with deviceID " + deviceID + " had different Parent Node from other devices of circuit ID #" + circuitID
raise ValueError(errorMessageString)
devicesListForCircuit = circuit['devices']
thisDevice = {
"deviceID": deviceID,
"deviceName": deviceName,
"mac": mac,
"ipv4": ipv4,
"ipv6": ipv6,
}
devicesListForCircuit.append(thisDevice)
circuit['devices'] = devicesListForCircuit
else:
knownCircuitIDs.append(circuitID)
ipv4 = ipv4.strip()
ipv6 = ipv6.strip()
if ParentNode == "":
ParentNode = "none"
ParentNode = ParentNode.strip()
deviceListForCircuit = []
thisDevice = {
"deviceID": deviceID,
"deviceName": deviceName,
"mac": mac,
"ipv4": ipv4,
"ipv6": ipv6,
}
deviceListForCircuit.append(thisDevice)
thisCircuit = {
"circuitID": circuitID,
"circuitName": circuitName,
"ParentNode": ParentNode,
"devices" = deviceListForCircuit
"downloadMin": round(int(downloadMin)*tcpOverheadFactor),
"uploadMin": round(int(uploadMin)*tcpOverheadFactor),
"downloadMax": round(int(downloadMax)*tcpOverheadFactor),
"uploadMax": round(int(uploadMax)*tcpOverheadFactor),
"qdisc": '',
}
subscriberCircuits.append(thisCircuit)
else:
ipv4 = ipv4.strip()
ipv6 = ipv6.strip()
if ParentNode == "":
ParentNode = "none"
ParentNode = ParentNode.strip()
deviceListForCircuit = []
thisDevice = {
"deviceID": deviceID,
"deviceName": deviceName,
"mac": mac,
"ipv4": ipv4,
"ipv6": ipv6,
}
deviceListForCircuit.append(thisDevice)
thisCircuit = {
"circuitID": circuitID,
"circuitName": circuitName,
"ParentNode": ParentNode,
"devices" = deviceListForCircuit
"downloadMin": round(int(downloadMin)*tcpOverheadFactor),
"uploadMin": round(int(uploadMin)*tcpOverheadFactor),
"downloadMax": round(int(downloadMax)*tcpOverheadFactor),
"uploadMax": round(int(uploadMax)*tcpOverheadFactor),
"qdisc": '',
}
subscriberCircuits.append(thisCircuit)
#Load network heirarchy
with open('network.json', 'r') as j:
network = json.loads(j.read())
#Find the bandwidth minimums for each node by combining mimimums of devices lower in that node's heirarchy
def findBandwidthMins(data, depth):
tabs = ' ' * depth
minDownload = 0
minUpload = 0
for elem in data:
for device in devices:
if elem == device['ParentNode']:
minDownload += device['downloadMin']
minUpload += device['uploadMin']
if 'children' in data[elem]:
minDL, minUL = findBandwidthMins(data[elem]['children'], depth+1)
minDownload += minDL
minUpload += minUL
data[elem]['downloadBandwidthMbpsMin'] = minDownload
data[elem]['uploadBandwidthMbpsMin'] = minUpload
return minDownload, minUpload
minDownload, minUpload = findBandwidthMins(network, 0)
#Clear Prior Settings
clearPriorSettings(interfaceA, interfaceB)
# Find queues and CPU cores available. Use min between those two as queuesAvailable
queuesAvailable = 0
path = '/sys/class/net/' + interfaceA + '/queues/'
directory_contents = os.listdir(path)
for item in directory_contents:
if "tx-" in str(item):
queuesAvailable += 1
print("NIC queues:\t" + str(queuesAvailable))
cpuCount = multiprocessing.cpu_count()
print("CPU cores:\t" + str(cpuCount))
queuesAvailable = min(queuesAvailable,cpuCount)
# XDP-CPUMAP-TC
shell('./xdp-cpumap-tc/bin/xps_setup.sh -d ' + interfaceA + ' --default --disable')
shell('./xdp-cpumap-tc/bin/xps_setup.sh -d ' + interfaceB + ' --default --disable')
shell('./xdp-cpumap-tc/src/xdp_iphash_to_cpu --dev ' + interfaceA + ' --lan')
shell('./xdp-cpumap-tc/src/xdp_iphash_to_cpu --dev ' + interfaceB + ' --wan')
shell('./xdp-cpumap-tc/src/xdp_iphash_to_cpu_cmdline --clear')
shell('./xdp-cpumap-tc/src/tc_classify --dev-egress ' + interfaceA)
shell('./xdp-cpumap-tc/src/tc_classify --dev-egress ' + interfaceB)
# Create MQ qdisc for each interface
thisInterface = interfaceA
shell('tc qdisc replace dev ' + thisInterface + ' root handle 7FFF: mq')
for queue in range(queuesAvailable):
shell('tc qdisc add dev ' + thisInterface + ' parent 7FFF:' + hex(queue+1) + ' handle ' + hex(queue+1) + ': htb default 2')
shell('tc class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ': classid ' + hex(queue+1) + ':1 htb rate '+ str(upstreamBandwidthCapacityDownloadMbps) + 'mbit ceil ' + str(upstreamBandwidthCapacityDownloadMbps) + 'mbit')
shell('tc qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 ' + fqOrCAKE)
# Default class - traffic gets passed through this limiter with lower priority if not otherwise classified by the Shaper.csv
# Only 1/4 of defaultClassCapacity is guarenteed (to prevent hitting ceiling of upstream), for the most part it serves as an "up to" ceiling.
# Default class can use up to defaultClassCapacityDownloadMbps when that bandwidth isn't used by known hosts.
shell('tc class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 classid ' + hex(queue+1) + ':2 htb rate ' + str(defaultClassCapacityDownloadMbps/4) + 'mbit ceil ' + str(defaultClassCapacityDownloadMbps) + 'mbit prio 5')
shell('tc qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':2 ' + fqOrCAKE)
thisInterface = interfaceB
shell('tc qdisc replace dev ' + thisInterface + ' root handle 7FFF: mq')
for queue in range(queuesAvailable):
shell('tc qdisc add dev ' + thisInterface + ' parent 7FFF:' + hex(queue+1) + ' handle ' + hex(queue+1) + ': htb default 2')
shell('tc class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ': classid ' + hex(queue+1) + ':1 htb rate '+ str(upstreamBandwidthCapacityUploadMbps) + 'mbit ceil ' + str(upstreamBandwidthCapacityUploadMbps) + 'mbit')
shell('tc qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 ' + fqOrCAKE)
# Default class - traffic gets passed through this limiter with lower priority if not otherwise classified by the Shaper.csv.
# Only 1/4 of defaultClassCapacity is guarenteed (to prevent hitting ceiling of upstream), for the most part it serves as an "up to" ceiling.
# Default class can use up to defaultClassCapacityUploadMbps when that bandwidth isn't used by known hosts.
shell('tc class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 classid ' + hex(queue+1) + ':2 htb rate ' + str(defaultClassCapacityUploadMbps/4) + 'mbit ceil ' + str(defaultClassCapacityUploadMbps) + 'mbit prio 5')
shell('tc qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':2 ' + fqOrCAKE)
print()
#Parse network.json. For each tier, create corresponding HTB and leaf classes
devicesShaped = []
parentNodes = []
def traverseNetwork(data, depth, major, minor, queue, parentClassID, parentMaxDL, parentMaxUL):
tabs = ' ' * depth
for elem in data:
print(tabs + elem)
elemClassID = hex(major) + ':' + hex(minor)
#Cap based on this node's max bandwidth, or parent node's max bandwidth, whichever is lower
elemDownloadMax = min(data[elem]['downloadBandwidthMbps'],parentMaxDL)
elemUploadMax = min(data[elem]['uploadBandwidthMbps'],parentMaxUL)
#Based on calculations done in findBandwidthMins(), determine optimal HTB rates (mins) and ceils (maxs)
#The max calculation is to avoid 0 values, and the min calculation is to ensure rate is not higher than ceil
elemDownloadMin = round(elemDownloadMax*.95)
elemUploadMin = round(elemUploadMax*.95)
print(tabs + "Download: " + str(elemDownloadMin) + " to " + str(elemDownloadMax) + " Mbps")
print(tabs + "Upload: " + str(elemUploadMin) + " to " + str(elemUploadMax) + " Mbps")
print(tabs, end='')
shell('tc class add dev ' + interfaceA + ' parent ' + parentClassID + ' classid ' + hex(minor) + ' htb rate '+ str(round(elemDownloadMin)) + 'mbit ceil '+ str(round(elemDownloadMax)) + 'mbit prio 3')
print(tabs, end='')
shell('tc class add dev ' + interfaceB + ' parent ' + parentClassID + ' classid ' + hex(minor) + ' htb rate '+ str(round(elemUploadMin)) + 'mbit ceil '+ str(round(elemUploadMax)) + 'mbit prio 3')
print()
thisParentNode = {
"parentNodeName": elem,
"classID": elemClassID,
"downloadMax": elemDownloadMax,
"uploadMax": elemUploadMax,
}
parentNodes.append(thisParentNode)
minor += 1
for circuit in subscriberCircuits:
#If a device from Shaper.csv lists this elem as its Parent Node, attach it as a leaf to this elem HTB
if elem == device['ParentNode']:
maxDownload = min(device['downloadMax'],elemDownloadMax)
maxUpload = min(device['uploadMax'],elemUploadMax)
minDownload = min(device['downloadMin'],maxDownload)
minUpload = min(device['uploadMin'],maxUpload)
print(tabs + ' ' + device['hostname'])
print(tabs + ' ' + "Download: " + str(minDownload) + " to " + str(maxDownload) + " Mbps")
print(tabs + ' ' + "Upload: " + str(minUpload) + " to " + str(maxUpload) + " Mbps")
print(tabs + ' ', end='')
shell('tc class add dev ' + interfaceA + ' parent ' + elemClassID + ' classid ' + hex(minor) + ' htb rate '+ str(minDownload) + 'mbit ceil '+ str(maxDownload) + 'mbit prio 3')
print(tabs + ' ', end='')
shell('tc qdisc add dev ' + interfaceA + ' parent ' + hex(major) + ':' + hex(minor) + ' ' + fqOrCAKE)
print(tabs + ' ', end='')
shell('tc class add dev ' + interfaceB + ' parent ' + elemClassID + ' classid ' + hex(minor) + ' htb rate '+ str(minUpload) + 'mbit ceil '+ str(maxUpload) + 'mbit prio 3')
print(tabs + ' ', end='')
shell('tc qdisc add dev ' + interfaceB + ' parent ' + hex(major) + ':' + hex(minor) + ' ' + fqOrCAKE)
for device in circuit['devices']:
if device['ipv4']:
parentString = hex(major) + ':'
flowIDstring = hex(major) + ':' + hex(minor)
if '/' in device['ipv4']:
hosts = list(ipaddress.ip_network(device['ipv4']).hosts())
for host in hosts:
print(tabs + ' ', end='')
shell('./xdp-cpumap-tc/src/xdp_iphash_to_cpu_cmdline --add --ip ' + str(host) + ' --cpu ' + hex(queue-1) + ' --classid ' + flowIDstring)
else:
print(tabs + ' ', end='')
shell('./xdp-cpumap-tc/src/xdp_iphash_to_cpu_cmdline --add --ip ' + device['ipv4'] + ' --cpu ' + hex(queue-1) + ' --classid ' + flowIDstring)
device['qdisc'] = flowIDstring
if device['deviceName'] not in devicesShaped:
devicesShaped.append(device['deviceName'])
print()
minor += 1
#Recursive call this function for children nodes attached to this node
if 'children' in data[elem]:
#We need to keep tabs on the minor counter, because we can't have repeating class IDs. Here, we bring back the minor counter from the recursive function
minor = traverseNetwork(data[elem]['children'], depth+1, major, minor+1, queue, elemClassID, elemDownloadMax, elemUploadMax)
#If top level node, increment to next queue / cpu core
if depth == 0:
if queue >= queuesAvailable:
queue = 1
major = queue
else:
queue += 1
major += 1
return minor
#Here is the actual call to the recursive traverseNetwork() function. finalMinor is not used.
finalMinor = traverseNetwork(network, 0, major=1, minor=3, queue=1, parentClassID="1:1", parentMaxDL=upstreamBandwidthCapacityDownloadMbps, parentMaxUL=upstreamBandwidthCapacityUploadMbps)
#Recap
for device in devices:
if device['deviceName'] not in devicesShaped:
print('Device ' + device['deviceName'] + ' with device ID of ' + device['deviceID'] + ' was not shaped. Please check to ensure its Parent Node is listed in network.json.')
#Save for stats
with open('statsByCircuit.json', 'w') as infile:
json.dump(subscriberCircuits, infile)
with open('statsByParentNode.json', 'w') as infile:
json.dump(parentNodes, infile)
# Done
currentTimeString = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
print("Successful run completed on " + currentTimeString)
if __name__ == '__main__':
refreshShapers()
print("Program complete")

12
v1.2/ShapedDevices.csv Normal file
View File

@ -0,0 +1,12 @@
Circuit ID,Circuit Name,Device ID,Device Name,Parent Node,MAC,IPv4,IPv6,Download Min,Upload Min,Download Max,Upload Max
,"968 Circle St., Gurnee, IL 60031",1,Device 1,AP_A,,100.64.0.1,,25,5,155,20
,"31 Marconi Street, Lake In The Hills, IL 60156",2,Device 2,AP_A,,100.64.0.2,,25,5,105,18
,"255 NW. Newport Ave., Jamestown, NY 14701",3,Device 3,AP_9,,100.64.0.3,,25,5,105,18
,"8493 Campfire Street, Peabody, MA 01960",4,Device 4,AP_9,,100.64.0.4,,25,5,105,18
2794,"6 Littleton Drive, Ringgold, GA 30736",5,Device 5,AP_11,,100.64.0.5,,25,5,105,18
2794,"6 Littleton Drive, Ringgold, GA 30736",6,Device 6,AP_11,,100.64.0.6,,25,5,105,18
,"93 Oklahoma Ave., Parsippany, NJ 07054",7,Device 7,AP_1,,100.64.0.7,,25,5,155,20
,"74 Bishop Ave., Bakersfield, CA 93306",8,Device 8,AP_1,,100.64.0.8,,25,5,105,18
,"9598 Peg Shop Drive, Lutherville Timonium, MD 21093",9,Device 9,AP_7,,100.64.0.9,,25,5,105,18
,"115 Gartner Rd., Gettysburg, PA 17325",10,Device 10,AP_7,,100.64.0.10,,25,5,105,18
,"525 Birchpond St., Romulus, MI 48174",11,Device 11,Site_1,,100.64.0.11,,25,5,105,18
1 Circuit ID Circuit Name Device ID Device Name Parent Node MAC IPv4 IPv6 Download Min Upload Min Download Max Upload Max
2 968 Circle St., Gurnee, IL 60031 1 Device 1 AP_A 100.64.0.1 25 5 155 20
3 31 Marconi Street, Lake In The Hills, IL 60156 2 Device 2 AP_A 100.64.0.2 25 5 105 18
4 255 NW. Newport Ave., Jamestown, NY 14701 3 Device 3 AP_9 100.64.0.3 25 5 105 18
5 8493 Campfire Street, Peabody, MA 01960 4 Device 4 AP_9 100.64.0.4 25 5 105 18
6 2794 6 Littleton Drive, Ringgold, GA 30736 5 Device 5 AP_11 100.64.0.5 25 5 105 18
7 2794 6 Littleton Drive, Ringgold, GA 30736 6 Device 6 AP_11 100.64.0.6 25 5 105 18
8 93 Oklahoma Ave., Parsippany, NJ 07054 7 Device 7 AP_1 100.64.0.7 25 5 155 20
9 74 Bishop Ave., Bakersfield, CA 93306 8 Device 8 AP_1 100.64.0.8 25 5 105 18
10 9598 Peg Shop Drive, Lutherville Timonium, MD 21093 9 Device 9 AP_7 100.64.0.9 25 5 105 18
11 115 Gartner Rd., Gettysburg, PA 17325 10 Device 10 AP_7 100.64.0.10 25 5 105 18
12 525 Birchpond St., Romulus, MI 48174 11 Device 11 Site_1 100.64.0.11 25 5 105 18

178
v1.2/graphBandwidth.py Normal file
View File

@ -0,0 +1,178 @@
import os
import subprocess
from subprocess import PIPE
import io
import decimal
import json
from ispConfig import fqOrCAKE, interfaceA, interfaceB, influxDBBucket, influxDBOrg, influxDBtoken, influxDBurl
from datetime import date, datetime, timedelta
import decimal
from influxdb_client import InfluxDBClient, Point, Dialect
from influxdb_client.client.write_api import SYNCHRONOUS
import dateutil.parser
def getCircuitStats(subscriberCircuits):
interfaces = [interfaceA, interfaceB]
for interface in interfaces:
command = 'tc -j -s qdisc show dev ' + interface
commands = command.split(' ')
tcShowResults = subprocess.run(commands, stdout=subprocess.PIPE).stdout.decode('utf-8')
if interface == interfaceA:
interfaceAjson = json.loads(tcShowResults)
else:
interfaceBjson = json.loads(tcShowResults)
for circuit in subscriberCircuits:
if 'timeQueried' in circuit:
circuit['priorQueryTime'] = circuit['timeQueried']
for interface in interfaces:
if interface == interfaceA:
jsonVersion = interfaceAjson
else:
jsonVersion = interfaceBjson
for element in jsonVersion:
if "parent" in element:
parentFixed = '0x' + element['parent'].split(':')[0] + ':' + '0x' + element['parent'].split(':')[1]
if parentFixed == circuit['qdisc']:
drops = int(element['drops'])
packets = int(element['packets'])
bytesSent = int(element['bytes'])
if interface == interfaceA:
if 'bytesSentDownload' in circuit:
circuit['priorQueryBytesDownload'] = circuit['bytesSentDownload']
circuit['bytesSentDownload'] = bytesSent
else:
if 'bytesSentUpload' in circuit:
circuit['priorQueryBytesUpload'] = circuit['bytesSentUpload']
circuit['bytesSentUpload'] = bytesSent
circuit['timeQueried'] = datetime.now().isoformat()
for circuit in subscriberCircuits:
if 'priorQueryTime' in circuit:
try:
bytesDLSinceLastQuery = circuit['bytesSentDownload'] - circuit['priorQueryBytesDownload']
bytesULSinceLastQuery = circuit['bytesSentUpload'] - circuit['priorQueryBytesUpload']
except:
bytesDLSinceLastQuery = 0
bytesULSinceLastQuery = 0
currentQueryTime = datetime.fromisoformat(circuit['timeQueried'])
priorQueryTime = datetime.fromisoformat(circuit['priorQueryTime'])
delta = currentQueryTime - priorQueryTime
deltaSeconds = delta.total_seconds()
if deltaSeconds > 0:
bitsDownload = round((((bytesDLSinceLastQuery*8))/deltaSeconds))
bitsUpload = round((((bytesULSinceLastQuery*8))/deltaSeconds))
else:
bitsDownload = 0
bitsUpload = 0
circuit['bitsDownloadSinceLastQuery'] = bitsDownload
circuit['bitsUploadSinceLastQuery'] = bitsUpload
else:
circuit['bitsDownloadSinceLastQuery'] = 0
circuit['bitsUploadSinceLastQuery'] = 0
return (subscriberCircuits)
def getParentNodeStats(parentNodes, subscriberCircuits):
for parentNode in parentNodes:
thisNodeBitsDownload = 0
thisNodeBitsUpload = 0
for circuit in subscriberCircuits:
if circuit['ParentNode'] == parentNode['parentNodeName']:
thisNodeBitsDownload += circuit['bitsDownloadSinceLastQuery']
thisNodeBitsUpload += circuit['bitsUploadSinceLastQuery']
parentNode['bitsDownloadSinceLastQuery'] = thisNodeBitsDownload
parentNode['bitsUploadSinceLastQuery'] = thisNodeBitsUpload
return parentNodes
def getParentNodeDict(data, depth, parentNodeNameDict):
if parentNodeNameDict == None:
parentNodeNameDict = {}
for elem in data:
if 'children' in data[elem]:
for child in data[elem]['children']:
parentNodeNameDict[child] = elem
tempDict = getParentNodeDict(data[elem]['children'], depth+1, parentNodeNameDict)
parentNodeNameDict = dict(parentNodeNameDict, **tempDict)
return parentNodeNameDict
def parentNodeNameDictPull():
#Load network heirarchy
with open('network.json', 'r') as j:
network = json.loads(j.read())
parentNodeNameDict = getParentNodeDict(network, 0, None)
return parentNodeNameDict
def refreshBandwidthGraphs():
startTime = datetime.now()
with open('statsByParentNode.json', 'r') as j:
parentNodes = json.loads(j.read())
with open('statsByCircuit.json', 'r') as j:
subscriberCircuits = json.loads(j.read())
parentNodeNameDict = parentNodeNameDictPull()
print("Retrieving circuit statistics")
subscriberCircuits = getCircuitStats(subscriberCircuits)
print("Computing parent node statistics")
parentNodes = getParentNodeStats(parentNodes, subscriberCircuits)
print("Writing data to InfluxDB")
bucket = influxDBBucket
org = influxDBOrg
token = influxDBtoken
url=influxDBurl
client = InfluxDBClient(
url=url,
token=token,
org=org
)
write_api = client.write_api(write_options=SYNCHRONOUS)
queriesToSend = []
for circuit in subscriberCircuits:
bitsDownload = int(circuit['bitsDownloadSinceLastQuery'])
bitsUpload = int(circuit['bitsUploadSinceLastQuery'])
if (bitsDownload > 0) and (bitsUpload > 0):
percentUtilizationDownload = round((bitsDownload / round(circuit['downloadMax']*1000000)),4)
percentUtilizationUpload = round((bitsUpload / round(circuit['uploadMax']*1000000)),4)
p = Point('Bandwidth').tag("Circuit", circuit['circuitName']).tag("ParentNode", circuit['ParentNode']).tag("Type", "Circuit").field("Download", bitsDownload)
queriesToSend.append(p)
p = Point('Bandwidth').tag("Circuit", circuit['circuitName']).tag("ParentNode", circuit['ParentNode']).tag("Type", "Circuit").field("Upload", bitsUpload)
queriesToSend.append(p)
p = Point('Utilization').tag("Circuit", circuit['circuitName']).tag("ParentNode", circuit['ParentNode']).tag("Type", "Circuit").field("Download", percentUtilizationDownload)
queriesToSend.append(p)
p = Point('Utilization').tag("Circuit", circuit['circuitName']).tag("ParentNode", circuit['ParentNode']).tag("Type", "Circuit").field("Upload", percentUtilizationUpload)
queriesToSend.append(p)
for parentNode in parentNodes:
bitsDownload = int(parentNode['bitsDownloadSinceLastQuery'])
bitsUpload = int(parentNode['bitsUploadSinceLastQuery'])
if (bitsDownload > 0) and (bitsUpload > 0):
percentUtilizationDownload = round((bitsDownload / round(parentNode['downloadMax']*1000000)),4)
percentUtilizationUpload = round((bitsUpload / round(parentNode['uploadMax']*1000000)),4)
p = Point('Bandwidth').tag("Device", parentNode['parentNodeName']).tag("ParentNode", parentNode['parentNodeName']).tag("Type", "Parent Node").field("Download", bitsDownload)
queriesToSend.append(p)
p = Point('Bandwidth').tag("Device", parentNode['parentNodeName']).tag("ParentNode", parentNode['parentNodeName']).tag("Type", "Parent Node").field("Upload", bitsUpload)
queriesToSend.append(p)
p = Point('Utilization').tag("Device", parentNode['parentNodeName']).tag("ParentNode", parentNode['parentNodeName']).tag("Type", "Parent Node").field("Download", percentUtilizationDownload)
queriesToSend.append(p)
p = Point('Utilization').tag("Device", parentNode['parentNodeName']).tag("ParentNode", parentNode['parentNodeName']).tag("Type", "Parent Node").field("Upload", percentUtilizationUpload)
queriesToSend.append(p)
write_api.write(bucket=bucket, record=queriesToSend)
print("Added " + str(len(queriesToSend)) + " points to InfluxDB.")
client.close()
with open('statsByParentNode.json', 'w') as infile:
json.dump(parentNodes, infile)
with open('statsByCircuit.json', 'w') as infile:
json.dump(subscriberCircuits, infile)
endTime = datetime.now()
durationSeconds = round((endTime - startTime).total_seconds(),2)
print("Graphs updated within " + str(durationSeconds) + " seconds.")
if __name__ == '__main__':
refreshBandwidthGraphs()

137
v1.2/graphLatency.py Normal file
View File

@ -0,0 +1,137 @@
import os
import subprocess
from subprocess import PIPE
import io
import decimal
import json
from ispConfig import fqOrCAKE, interfaceA, interfaceB, influxDBBucket, influxDBOrg, influxDBtoken, influxDBurl, ppingLocation
from datetime import date, datetime, timedelta
import decimal
from influxdb_client import InfluxDBClient, Point, Dialect
from influxdb_client.client.write_api import SYNCHRONOUS
import dateutil.parser
def getLatencies(subscriberCircuits, secondsToRun):
interfaces = [interfaceA, interfaceB]
tcpLatency = 0
listOfAllDiffs = []
maxLatencyRecordable = 200
matchableIPs = []
for circuit in subscriberCircuits:
for device in circuit['devices']:
matchableIPs.append(device['ipv4'])
rttDict = {}
jitterDict = {}
#for interface in interfaces:
command = "./pping -i " + interfaceA + " -s " + str(secondsToRun) + " -m"
commands = command.split(' ')
wd = ppingLocation
tcShowResults = subprocess.run(command, shell=True, cwd=wd,stdout=subprocess.PIPE, stderr=subprocess.DEVNULL).stdout.decode('utf-8').splitlines()
for line in tcShowResults:
if len(line) > 59:
rtt1 = float(line[18:27])*1000
rtt2 = float(line[27:36]) *1000
toAndFrom = line[38:].split(' ')[3]
fromIP = toAndFrom.split('+')[0].split(':')[0]
toIP = toAndFrom.split('+')[1].split(':')[0]
matchedIP = ''
if fromIP in matchableIPs:
matchedIP = fromIP
elif toIP in matchableIPs:
matchedIP = toIP
jitter = rtt1 - rtt2
#Cap ceil
if rtt1 >= maxLatencyRecordable:
rtt1 = 200
#Lowest observed rtt
if matchedIP in rttDict:
if rtt1 < rttDict[matchedIP]:
rttDict[matchedIP] = rtt1
jitterDict[matchedIP] = jitter
else:
rttDict[matchedIP] = rtt1
jitterDict[matchedIP] = jitter
for circuit in subscriberCircuits:
for device in circuit['devices']:
diffsForThisDevice = []
if device['ipv4'] in rttDict:
device['tcpLatency'] = rttDict[device['ipv4']]
else:
device['tcpLatency'] = None
if device['ipv4'] in jitterDict:
device['tcpJitter'] = jitterDict[device['ipv4']]
else:
device['tcpJitter'] = None
return subscriberCircuits
def getParentNodeStats(parentNodes, subscriberCircuits):
for parentNode in parentNodes:
acceptableLatencies = []
for circuit in subscriberCircuits:
for device in circuit['devices']:
if device['ParentNode'] == parentNode['parentNodeName']:
if device['tcpLatency'] != None:
acceptableLatencies.append(device['tcpLatency'])
if len(acceptableLatencies) > 0:
parentNode['tcpLatency'] = sum(acceptableLatencies)/len(acceptableLatencies)
else:
parentNode['tcpLatency'] = None
return parentNodes
def refreshLatencyGraphs(secondsToRun):
startTime = datetime.now()
with open('statsByParentNode.json', 'r') as j:
parentNodes = json.loads(j.read())
with open('statsByCircuit.json', 'r') as j:
subscriberCircuits = json.loads(j.read())
print("Retrieving circuit statistics")
subscriberCircuits = getLatencies(subscriberCircuits, secondsToRun)
print("Computing parent node statistics")
parentNodes = getParentNodeStats(parentNodes, devices)
print("Writing data to InfluxDB")
bucket = influxDBBucket
org = influxDBOrg
token = influxDBtoken
url = influxDBurl
client = InfluxDBClient(
url=url,
token=token,
org=org
)
write_api = client.write_api(write_options=SYNCHRONOUS)
queriesToSend = []
for circuit in subscriberCircuits:
for device in circuit['devices']:
if device['tcpLatency'] != None:
p = Point('Latency').tag("Device", device['deviceName']).tag("ParentNode", device['ParentNode']).tag("Type", "Device").field("TCP Latency", device['tcpLatency'])
queriesToSend.append(p)
for parentNode in parentNodes:
if parentNode['tcpLatency'] != None:
p = Point('Latency').tag("Device", parentNode['parentNodeName']).tag("ParentNode", parentNode['parentNodeName']).tag("Type", "Parent Node").field("TCP Latency", parentNode['tcpLatency'])
queriesToSend.append(p)
write_api.write(bucket=bucket, record=queriesToSend)
print("Added " + str(len(queriesToSend)) + " points to InfluxDB.")
client.close()
#with open('statsByParentNode.json', 'w') as infile:
# json.dump(parentNodes, infile)
#with open('statsByDevice.json', 'w') as infile:
# json.dump(devices, infile)
endTime = datetime.now()
durationSeconds = round((endTime - startTime).total_seconds())
print("Graphs updated within " + str(durationSeconds) + " seconds.")
if __name__ == '__main__':
refreshLatencyGraphs(10)

File diff suppressed because it is too large Load Diff

61
v1.2/ispConfig.py Normal file
View File

@ -0,0 +1,61 @@
# 'fq_codel' or 'cake diffserv4'
# 'cake diffserv4' is recommended
# fqOrCAKE = 'fq_codel'
fqOrCAKE = 'cake diffserv4'
# How many Mbps are available to the edge of this network
upstreamBandwidthCapacityDownloadMbps = 1000
upstreamBandwidthCapacityUploadMbps = 1000
# Traffic from devices not specified in Shaper.csv will be rate limited by an HTB of this many Mbps
defaultClassCapacityDownloadMbps = 500
defaultClassCapacityUploadMbps = 500
# Interface connected to core router
interfaceA = 'eth1'
# Interface connected to edge router
interfaceB = 'eth2'
# Shape by Site in addition to by AP and Client
# Now deprecated, was only used prior to v1.1
# shapeBySite = True
# Allow shell commands. False causes commands print to console only without being executed. MUST BE ENABLED FOR
# PROGRAM TO FUNCTION
enableActualShellCommands = True
# Add 'sudo' before execution of any shell commands. May be required depending on distribution and environment.
runShellCommandsAsSudo = False
# Bandwidth Graphing
bandwidthGraphingEnabled = True
influxDBurl = "http://localhost:8086"
influxDBBucket = "libreqos"
influxDBOrg = "Your ISP Name Here"
influxDBtoken = ""
# Latency Graphing
latencyGraphingEnabled = False
ppingLocation = "pping"
# NMS/CRM Integration
# If a device shows a WAN IP within these subnets, assume they are behind NAT / un-shapable, and ignore them
ignoreSubnets = ['192.168.0.0/16']
# Optional UISP integration
automaticImportUISP = False
# Everything before /nms/ on your UISP instance
uispBaseURL = 'https://examplesite.com'
# UISP Auth Token
uispAuthToken = ''
# UISP | Whether to shape router at customer premises, or instead shape the station radio. When station radio is in
# router mode, use 'station'. Otherwise, use 'router'.
shapeRouterOrStation = 'router'
# API Auth
apiUsername = "testUser"
apiPassword = "changeme8343486806"
apiHostIP = "127.0.0.1"
apiHostPost = 5000

78
v1.2/network.json Normal file
View File

@ -0,0 +1,78 @@
{
"Site_1":
{
"downloadBandwidthMbps":1000,
"uploadBandwidthMbps":1000,
"children":
{
"AP_A":
{
"downloadBandwidthMbps":500,
"uploadBandwidthMbps":500
},
"Site_3":
{
"downloadBandwidthMbps":500,
"uploadBandwidthMbps":500,
"children":
{
"PoP_5":
{
"downloadBandwidthMbps":200,
"uploadBandwidthMbps":200,
"children":
{
"AP_9":
{
"downloadBandwidthMbps":120,
"uploadBandwidthMbps":120
},
"PoP_6":
{
"downloadBandwidthMbps":60,
"uploadBandwidthMbps":60,
"children":
{
"AP_11":
{
"downloadBandwidthMbps":30,
"uploadBandwidthMbps":30
}
}
}
}
}
}
}
}
},
"Site_2":
{
"downloadBandwidthMbps":500,
"uploadBandwidthMbps":500,
"children":
{
"PoP_1":
{
"downloadBandwidthMbps":200,
"uploadBandwidthMbps":200,
"children":
{
"AP_7":
{
"downloadBandwidthMbps":100,
"uploadBandwidthMbps":100
}
}
}
},
"children":
{
"AP_1":
{
"downloadBandwidthMbps":150,
"uploadBandwidthMbps":150
}
}
}
}

View File

@ -1 +0,0 @@
placeholder

27
v1.2/scheduled.py Normal file
View File

@ -0,0 +1,27 @@
import time
import schedule
from LibreQoS import refreshShapers
from graphBandwidth import refreshBandwidthGraphs
from graphLatency import refreshLatencyGraphs
from ispConfig import bandwidthGraphingEnabled, latencyGraphingEnabled
def importandshape():
refreshShapers()
if __name__ == '__main__':
importandshape()
schedule.every().day.at("04:00").do(importandshape)
while True:
schedule.run_pending()
if bandwidthGraphingEnabled:
try:
refreshBandwidthGraphs()
except:
print("Failed to update bandwidth graphs")
if latencyGraphingEnabled:
try:
refreshLatencyGraphs(10)
except:
print("Failed to update latency graphs")
else:
time.sleep(10)