Delete old/v1.1 directory

This commit is contained in:
Robert Chacón
2023-03-22 15:45:13 -06:00
committed by GitHub
parent fe7b7aa312
commit 2bf244e0d8
13 changed files with 0 additions and 2532 deletions

View File

@@ -1,244 +0,0 @@
# v1.1 beta
import csv
import io
import ipaddress
import json
import os
import subprocess
from datetime import datetime
import multiprocessing
from ispConfig import fqOrCAKE, upstreamBandwidthCapacityDownloadMbps, upstreamBandwidthCapacityUploadMbps, \
defaultClassCapacityDownloadMbps, defaultClassCapacityUploadMbps, interfaceA, interfaceB, enableActualShellCommands, \
runShellCommandsAsSudo
def shell(command):
if enableActualShellCommands:
if runShellCommandsAsSudo:
command = 'sudo ' + command
commands = command.split(' ')
print(command)
proc = subprocess.Popen(commands, stdout=subprocess.PIPE)
for line in io.TextIOWrapper(proc.stdout, encoding="utf-8"): # or another encoding
print(line)
else:
print(command)
def clearPriorSettings(interfaceA, interfaceB):
if enableActualShellCommands:
shell('tc filter delete dev ' + interfaceA)
shell('tc filter delete dev ' + interfaceA + ' root')
shell('tc qdisc delete dev ' + interfaceA + ' root')
shell('tc qdisc delete dev ' + interfaceA)
shell('tc filter delete dev ' + interfaceB)
shell('tc filter delete dev ' + interfaceB + ' root')
shell('tc qdisc delete dev ' + interfaceB + ' root')
shell('tc qdisc delete dev ' + interfaceB)
def refreshShapers():
tcpOverheadFactor = 1.09
# Load Devices
devices = []
with open('Shaper.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
next(csv_reader)
for row in csv_reader:
deviceID, ParentNode, mac, hostname,ipv4, ipv6, downloadMin, uploadMin, downloadMax, uploadMax = row
ipv4 = ipv4.strip()
ipv6 = ipv6.strip()
if ParentNode == "":
ParentNode = "none"
ParentNode = ParentNode.strip()
thisDevice = {
"id": deviceID,
"mac": mac,
"ParentNode": ParentNode,
"hostname": hostname,
"ipv4": ipv4,
"ipv6": ipv6,
"downloadMin": round(int(downloadMin)*tcpOverheadFactor),
"uploadMin": round(int(uploadMin)*tcpOverheadFactor),
"downloadMax": round(int(downloadMax)*tcpOverheadFactor),
"uploadMax": round(int(uploadMax)*tcpOverheadFactor),
"qdisc": '',
}
devices.append(thisDevice)
#Load network heirarchy
with open('network.json', 'r') as j:
network = json.loads(j.read())
#Find the bandwidth minimums for each node by combining mimimums of devices lower in that node's heirarchy
def findBandwidthMins(data, depth):
tabs = ' ' * depth
minDownload = 0
minUpload = 0
for elem in data:
for device in devices:
if elem == device['ParentNode']:
minDownload += device['downloadMin']
minUpload += device['uploadMin']
if 'children' in data[elem]:
minDL, minUL = findBandwidthMins(data[elem]['children'], depth+1)
minDownload += minDL
minUpload += minUL
data[elem]['downloadBandwidthMbpsMin'] = minDownload
data[elem]['uploadBandwidthMbpsMin'] = minUpload
return minDownload, minUpload
minDownload, minUpload = findBandwidthMins(network, 0)
#Clear Prior Settings
clearPriorSettings(interfaceA, interfaceB)
# Find queues and CPU cores available. Use min between those two as queuesAvailable
queuesAvailable = 0
path = '/sys/class/net/' + interfaceA + '/queues/'
directory_contents = os.listdir(path)
for item in directory_contents:
if "tx-" in str(item):
queuesAvailable += 1
print("NIC queues:\t" + str(queuesAvailable))
cpuCount = multiprocessing.cpu_count()
print("CPU cores:\t" + str(cpuCount))
queuesAvailable = min(queuesAvailable,cpuCount)
# XDP-CPUMAP-TC
shell('./xdp-cpumap-tc/bin/xps_setup.sh -d ' + interfaceA + ' --default --disable')
shell('./xdp-cpumap-tc/bin/xps_setup.sh -d ' + interfaceB + ' --default --disable')
shell('./xdp-cpumap-tc/src/xdp_iphash_to_cpu --dev ' + interfaceA + ' --lan')
shell('./xdp-cpumap-tc/src/xdp_iphash_to_cpu --dev ' + interfaceB + ' --wan')
shell('./xdp-cpumap-tc/src/xdp_iphash_to_cpu_cmdline --clear')
shell('./xdp-cpumap-tc/src/tc_classify --dev-egress ' + interfaceA)
shell('./xdp-cpumap-tc/src/tc_classify --dev-egress ' + interfaceB)
# Create MQ qdisc for each interface
thisInterface = interfaceA
shell('tc qdisc replace dev ' + thisInterface + ' root handle 7FFF: mq')
for queue in range(queuesAvailable):
shell('tc qdisc add dev ' + thisInterface + ' parent 7FFF:' + hex(queue+1) + ' handle ' + hex(queue+1) + ': htb default 2')
shell('tc class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ': classid ' + hex(queue+1) + ':1 htb rate '+ str(upstreamBandwidthCapacityDownloadMbps) + 'mbit ceil ' + str(upstreamBandwidthCapacityDownloadMbps) + 'mbit')
shell('tc qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 ' + fqOrCAKE)
# Default class - traffic gets passed through this limiter with lower priority if not otherwise classified by the Shaper.csv
# Only 1/4 of defaultClassCapacity is guarenteed (to prevent hitting ceiling of upstream), for the most part it serves as an "up to" ceiling.
# Default class can use up to defaultClassCapacityDownloadMbps when that bandwidth isn't used by known hosts.
shell('tc class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 classid ' + hex(queue+1) + ':2 htb rate ' + str(defaultClassCapacityDownloadMbps/4) + 'mbit ceil ' + str(defaultClassCapacityDownloadMbps) + 'mbit prio 5')
shell('tc qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':2 ' + fqOrCAKE)
thisInterface = interfaceB
shell('tc qdisc replace dev ' + thisInterface + ' root handle 7FFF: mq')
for queue in range(queuesAvailable):
shell('tc qdisc add dev ' + thisInterface + ' parent 7FFF:' + hex(queue+1) + ' handle ' + hex(queue+1) + ': htb default 2')
shell('tc class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ': classid ' + hex(queue+1) + ':1 htb rate '+ str(upstreamBandwidthCapacityUploadMbps) + 'mbit ceil ' + str(upstreamBandwidthCapacityUploadMbps) + 'mbit')
shell('tc qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 ' + fqOrCAKE)
# Default class - traffic gets passed through this limiter with lower priority if not otherwise classified by the Shaper.csv.
# Only 1/4 of defaultClassCapacity is guarenteed (to prevent hitting ceiling of upstream), for the most part it serves as an "up to" ceiling.
# Default class can use up to defaultClassCapacityUploadMbps when that bandwidth isn't used by known hosts.
shell('tc class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 classid ' + hex(queue+1) + ':2 htb rate ' + str(defaultClassCapacityUploadMbps/4) + 'mbit ceil ' + str(defaultClassCapacityUploadMbps) + 'mbit prio 5')
shell('tc qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':2 ' + fqOrCAKE)
print()
#Parse network.json. For each tier, create corresponding HTB and leaf classes
devicesShaped = []
parentNodes = []
def traverseNetwork(data, depth, major, minor, queue, parentClassID, parentMaxDL, parentMaxUL):
tabs = ' ' * depth
for elem in data:
print(tabs + elem)
elemClassID = hex(major) + ':' + hex(minor)
#Cap based on this node's max bandwidth, or parent node's max bandwidth, whichever is lower
elemDownloadMax = min(data[elem]['downloadBandwidthMbps'],parentMaxDL)
elemUploadMax = min(data[elem]['uploadBandwidthMbps'],parentMaxUL)
#Based on calculations done in findBandwidthMins(), determine optimal HTB rates (mins) and ceils (maxs)
#The max calculation is to avoid 0 values, and the min calculation is to ensure rate is not higher than ceil
elemDownloadMin = round(elemDownloadMax*.95)
elemUploadMin = round(elemUploadMax*.95)
print(tabs + "Download: " + str(elemDownloadMin) + " to " + str(elemDownloadMax) + " Mbps")
print(tabs + "Upload: " + str(elemUploadMin) + " to " + str(elemUploadMax) + " Mbps")
print(tabs, end='')
shell('tc class add dev ' + interfaceA + ' parent ' + parentClassID + ' classid ' + hex(minor) + ' htb rate '+ str(round(elemDownloadMin)) + 'mbit ceil '+ str(round(elemDownloadMax)) + 'mbit prio 3')
print(tabs, end='')
shell('tc class add dev ' + interfaceB + ' parent ' + parentClassID + ' classid ' + hex(minor) + ' htb rate '+ str(round(elemUploadMin)) + 'mbit ceil '+ str(round(elemUploadMax)) + 'mbit prio 3')
print()
thisParentNode = {
"parentNodeName": elem,
"classID": elemClassID,
"downloadMax": elemDownloadMax,
"uploadMax": elemUploadMax,
}
parentNodes.append(thisParentNode)
minor += 1
for device in devices:
#If a device from Shaper.csv lists this elem as its Parent Node, attach it as a leaf to this elem HTB
if elem == device['ParentNode']:
maxDownload = min(device['downloadMax'],elemDownloadMax)
maxUpload = min(device['uploadMax'],elemUploadMax)
minDownload = min(device['downloadMin'],maxDownload)
minUpload = min(device['uploadMin'],maxUpload)
print(tabs + ' ' + device['hostname'])
print(tabs + ' ' + "Download: " + str(minDownload) + " to " + str(maxDownload) + " Mbps")
print(tabs + ' ' + "Upload: " + str(minUpload) + " to " + str(maxUpload) + " Mbps")
print(tabs + ' ', end='')
shell('tc class add dev ' + interfaceA + ' parent ' + elemClassID + ' classid ' + hex(minor) + ' htb rate '+ str(minDownload) + 'mbit ceil '+ str(maxDownload) + 'mbit prio 3')
print(tabs + ' ', end='')
shell('tc qdisc add dev ' + interfaceA + ' parent ' + hex(major) + ':' + hex(minor) + ' ' + fqOrCAKE)
print(tabs + ' ', end='')
shell('tc class add dev ' + interfaceB + ' parent ' + elemClassID + ' classid ' + hex(minor) + ' htb rate '+ str(minUpload) + 'mbit ceil '+ str(maxUpload) + 'mbit prio 3')
print(tabs + ' ', end='')
shell('tc qdisc add dev ' + interfaceB + ' parent ' + hex(major) + ':' + hex(minor) + ' ' + fqOrCAKE)
if device['ipv4']:
parentString = hex(major) + ':'
flowIDstring = hex(major) + ':' + hex(minor)
if '/' in device['ipv4']:
hosts = list(ipaddress.ip_network(device['ipv4']).hosts())
for host in hosts:
print(tabs + ' ', end='')
shell('./xdp-cpumap-tc/src/xdp_iphash_to_cpu_cmdline --add --ip ' + str(host) + ' --cpu ' + hex(queue-1) + ' --classid ' + flowIDstring)
else:
print(tabs + ' ', end='')
shell('./xdp-cpumap-tc/src/xdp_iphash_to_cpu_cmdline --add --ip ' + device['ipv4'] + ' --cpu ' + hex(queue-1) + ' --classid ' + flowIDstring)
device['qdisc'] = flowIDstring
if device['hostname'] not in devicesShaped:
devicesShaped.append(device['hostname'])
print()
minor += 1
#Recursive call this function for children nodes attached to this node
if 'children' in data[elem]:
#We need to keep tabs on the minor counter, because we can't have repeating class IDs. Here, we bring back the minor counter from the recursive function
minor = traverseNetwork(data[elem]['children'], depth+1, major, minor+1, queue, elemClassID, elemDownloadMax, elemUploadMax)
#If top level node, increment to next queue / cpu core
if depth == 0:
if queue >= queuesAvailable:
queue = 1
major = queue
else:
queue += 1
major += 1
return minor
#Here is the actual call to the recursive traverseNetwork() function. finalMinor is not used.
finalMinor = traverseNetwork(network, 0, major=1, minor=3, queue=1, parentClassID="1:1", parentMaxDL=upstreamBandwidthCapacityDownloadMbps, parentMaxUL=upstreamBandwidthCapacityUploadMbps)
#Recap
for device in devices:
if device['hostname'] not in devicesShaped:
print('Device ' + device['hostname'] + ' was not shaped. Please check to ensure its parent Node is listed in network.json.')
#Save for stats
with open('statsByDevice.json', 'w') as infile:
json.dump(devices, infile)
with open('statsByParentNode.json', 'w') as infile:
json.dump(parentNodes, infile)
# Done
currentTimeString = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
print("Successful run completed on " + currentTimeString)
if __name__ == '__main__':
refreshShapers()
print("Program complete")

View File

@@ -1,28 +0,0 @@
# v1.1 (IPv4) (Beta)
Released: 2022
<img alt="LibreQoS" src="https://raw.githubusercontent.com/rchac/LibreQoS/main/docs/v1.1-alpha-preview.jpg"></a>
## Installation Guide
- 📄 [LibreQoS v1.1 Installation & Usage Guide Physical Server and Ubuntu 21.10](https://github.com/rchac/LibreQoS/wiki/LibreQoS-v1.1-Installation-&-Usage-Guide-Physical-Server-and-Ubuntu-21.10)
## Features
- Tested up to 11Gbps asymmetrical throughput in real world deployment with 5000+ clients.
- Network hierarchy can be mapped to the network.json file. This allows for both simple network heirarchies (Site>AP>Client) as well as much more complex ones (Site>Site>Micro-PoP>AP>Site>AP>Client).
- Graphing of bandwidth to InfluxDB. Parses bandwidth data from "tc -s qdisc show" command, minimizing CPU use.
- Graphing of TCP latency to InfluxDB - via PPing integration.
## Considerations
- Any top-level parent node is tied to a single CPU core. Top-level nodes are evenly distributed across CPUs. Since each CPU can usually only accommodate up to 4Gbps, ensure any single top-level parent node will not require more than 4Gbps throughput.
## Limitations
- As with 0.9 and v1.0, not yet dual stack, clients can only be shaped by IPv4 address until IPv6 support is added to XDP-CPUMAP-TC. Once that happens we can then shape IPv6 as well.
- XDP's cpumap-redirect achieves higher throughput on a server with direct access to the NIC (XDP offloading possible) vs as a VM with bridges (generic XDP).

View File

@@ -1,12 +0,0 @@
ID,AP,MAC,Hostname,IPv4,IPv6,Download Min,Upload Min,Download Max,Upload Max
,AP_A,,Device 1,100.64.0.1,,25,5,155,20
,AP_A,,Device 2,100.64.0.2,,25,5,105,18
,AP_9,,Device 3,100.64.0.3,,25,5,105,18
,AP_9,,Device 4,100.64.0.4,,25,5,105,18
,AP_11,,Device 5,100.64.0.5,,25,5,105,18
,AP_11,,Device 6,100.64.0.6,,25,5,90,15
,AP_1,,Device 7,100.64.0.7,,25,5,155,20
,AP_1,,Device 8,100.64.0.8,,25,5,105,18
,AP_7,,Device 9,100.64.0.9,,25,5,105,18
,AP_7,,Device 10,100.64.0.10,,25,5,105,18
,Site_1,,Device 11,100.64.0.11,,25,5,105,18
1 ID AP MAC Hostname IPv4 IPv6 Download Min Upload Min Download Max Upload Max
2 AP_A Device 1 100.64.0.1 25 5 155 20
3 AP_A Device 2 100.64.0.2 25 5 105 18
4 AP_9 Device 3 100.64.0.3 25 5 105 18
5 AP_9 Device 4 100.64.0.4 25 5 105 18
6 AP_11 Device 5 100.64.0.5 25 5 105 18
7 AP_11 Device 6 100.64.0.6 25 5 90 15
8 AP_1 Device 7 100.64.0.7 25 5 155 20
9 AP_1 Device 8 100.64.0.8 25 5 105 18
10 AP_7 Device 9 100.64.0.9 25 5 105 18
11 AP_7 Device 10 100.64.0.10 25 5 105 18
12 Site_1 Device 11 100.64.0.11 25 5 105 18

View File

@@ -1,153 +0,0 @@
import os
import subprocess
from subprocess import PIPE
import io
import decimal
import json
from operator import itemgetter
from prettytable import PrettyTable
from ispConfig import fqOrCAKE, interfaceA, interfaceB, influxDBBucket, influxDBOrg, influxDBtoken, influxDBurl
from datetime import date, datetime, timedelta
import decimal
from itertools import groupby
from influxdb_client import InfluxDBClient, Point, Dialect
from influxdb_client.client.write_api import SYNCHRONOUS
import dateutil.parser
def getDeviceStats(devices):
interfaces = [interfaceA, interfaceB]
for interface in interfaces:
command = 'tc -j -s qdisc show dev ' + interface
commands = command.split(' ')
tcShowResults = subprocess.run(commands, stdout=subprocess.PIPE).stdout.decode('utf-8')
if interface == interfaceA:
interfaceAjson = json.loads(tcShowResults)
else:
interfaceBjson = json.loads(tcShowResults)
for device in devices:
if 'timeQueried' in device:
device['priorQueryTime'] = device['timeQueried']
for interface in interfaces:
if interface == interfaceA:
jsonVersion = interfaceAjson
else:
jsonVersion = interfaceBjson
for element in jsonVersion:
if "parent" in element:
if element['parent'] == device['qdisc']:
drops = int(element['drops'])
packets = int(element['packets'])
bytesSent = int(element['bytes'])
if interface == interfaceA:
if 'bytesSentDownload' in device:
device['priorQueryBytesDownload'] = device['bytesSentDownload']
device['bytesSentDownload'] = bytesSent
else:
if 'bytesSentUpload' in device:
device['priorQueryBytesUpload'] = device['bytesSentUpload']
device['bytesSentUpload'] = bytesSent
device['timeQueried'] = datetime.now().isoformat()
for device in devices:
if 'priorQueryTime' in device:
bytesDLSinceLastQuery = device['bytesSentDownload'] - device['priorQueryBytesDownload']
bytesULSinceLastQuery = device['bytesSentUpload'] - device['priorQueryBytesUpload']
currentQueryTime = datetime.fromisoformat(device['timeQueried'])
priorQueryTime = datetime.fromisoformat(device['priorQueryTime'])
delta = currentQueryTime - priorQueryTime
deltaSeconds = delta.total_seconds()
if deltaSeconds > 0:
mbpsDownload = ((bytesDLSinceLastQuery/125000))/deltaSeconds
mbpsUpload = ((bytesULSinceLastQuery/125000))/deltaSeconds
else:
mbpsDownload = 0
mbpsUpload = 0
device['mbpsDownloadSinceLastQuery'] = mbpsDownload
device['mbpsUploadSinceLastQuery'] = mbpsUpload
else:
device['mbpsDownloadSinceLastQuery'] = 0
device['mbpsUploadSinceLastQuery'] = 0
return (devices)
def getParentNodeStats(parentNodes, devices):
for parentNode in parentNodes:
thisNodeMbpsDownload = 0
thisNodeMbpsUpload = 0
for device in devices:
if device['ParentNode'] == parentNode['parentNodeName']:
thisNodeMbpsDownload += device['mbpsDownloadSinceLastQuery']
thisNodeMbpsUpload += device['mbpsUploadSinceLastQuery']
parentNode['mbpsDownloadSinceLastQuery'] = thisNodeMbpsDownload
parentNode['mbpsUploadSinceLastQuery'] = thisNodeMbpsUpload
return parentNodes
def refreshGraphs():
startTime = datetime.now()
with open('statsByParentNode.json', 'r') as j:
parentNodes = json.loads(j.read())
with open('statsByDevice.json', 'r') as j:
devices = json.loads(j.read())
print("Retrieving device statistics")
devices = getDeviceStats(devices)
print("Computing parent node statistics")
parentNodes = getParentNodeStats(parentNodes, devices)
print("Writing data to InfluxDB")
bucket = influxDBBucket
org = influxDBOrg
token = influxDBtoken
url = influxDBurl
client = InfluxDBClient(
url=url,
token=token,
org=org
)
write_api = client.write_api(write_options=SYNCHRONOUS)
queriesToSend = []
for device in devices:
mbpsDownload = float(device['mbpsDownloadSinceLastQuery'])
mbpsUpload = float(device['mbpsUploadSinceLastQuery'])
if (mbpsDownload > 0) and (mbpsUpload > 0):
percentUtilizationDownload = float(mbpsDownload / device['downloadMax'])
percentUtilizationUpload = float(mbpsUpload / device['uploadMax'])
p = Point('Bandwidth').tag("Device", device['hostname']).tag("ParentNode", device['ParentNode']).field("Download", mbpsDownload)
queriesToSend.append(p)
p = Point('Bandwidth').tag("Device", device['hostname']).tag("ParentNode", device['ParentNode']).field("Upload", mbpsUpload)
queriesToSend.append(p)
p = Point('Utilization').tag("Device", device['hostname']).tag("ParentNode", device['ParentNode']).field("Download", percentUtilizationDownload)
queriesToSend.append(p)
p = Point('Utilization').tag("Device", device['hostname']).tag("ParentNode", device['ParentNode']).field("Upload", percentUtilizationUpload)
queriesToSend.append(p)
for parentNode in parentNodes:
mbpsDownload = float(parentNode['mbpsDownloadSinceLastQuery'])
mbpsUpload = float(parentNode['mbpsUploadSinceLastQuery'])
if (mbpsDownload > 0) and (mbpsUpload > 0):
percentUtilizationDownload = float(mbpsDownload / parentNode['downloadMax'])
percentUtilizationUpload = float(mbpsUpload / parentNode['uploadMax'])
p = Point('Bandwidth').tag("Device", parentNode['parentNodeName']).tag("ParentNode", parentNode['parentNodeName']).field("Download", mbpsDownload)
queriesToSend.append(p)
p = Point('Bandwidth').tag("Device", parentNode['parentNodeName']).tag("ParentNode", parentNode['parentNodeName']).field("Upload", mbpsUpload)
queriesToSend.append(p)
p = Point('Utilization').tag("Device", parentNode['parentNodeName']).tag("ParentNode", parentNode['parentNodeName']).field("Download", percentUtilizationDownload)
queriesToSend.append(p)
p = Point('Utilization').tag("Device", parentNode['parentNodeName']).tag("ParentNode", parentNode['parentNodeName']).field("Upload", percentUtilizationUpload)
write_api.write(bucket=bucket, record=queriesToSend)
print("Added " + str(len(queriesToSend)) + " points to InfluxDB.")
client.close()
with open('statsByParentNode.json', 'w') as infile:
json.dump(parentNodes, infile)
with open('statsByDevice.json', 'w') as infile:
json.dump(devices, infile)
endTime = datetime.now()
durationSeconds = round((endTime - startTime).total_seconds())
print("Graphs updated within " + str(durationSeconds) + " seconds.")
if __name__ == '__main__':
refreshGraphs()

View File

@@ -1,186 +0,0 @@
import subprocess
import json
import subprocess
from datetime import datetime
from influxdb_client import InfluxDBClient, Point
from influxdb_client.client.write_api import SYNCHRONOUS
from ispConfig import interfaceA, interfaceB, influxDBBucket, influxDBOrg, influxDBtoken, influxDBurl
def getInterfaceStats(interface):
command = 'tc -j -s qdisc show dev ' + interface
jsonAr = json.loads(subprocess.run(command.split(' '), stdout=subprocess.PIPE).stdout.decode('utf-8'))
jsonDict = {}
for element in filter(lambda e: 'parent' in e, jsonAr):
flowID = ':'.join(map(lambda p: f'0x{p}', element['parent'].split(':')[0:2]))
jsonDict[flowID] = element
del jsonAr
return jsonDict
def chunk_list(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def getDeviceStats(devices):
interfaces = [interfaceA, interfaceB]
ifaceStats = list(map(getInterfaceStats, interfaces))
for device in devices:
if 'timeQueried' in device:
device['priorQueryTime'] = device['timeQueried']
for (interface, stats, dirSuffix) in zip(interfaces, ifaceStats, ['Download', 'Upload']):
element = stats[device['qdisc']] if device['qdisc'] in stats else False
if element:
bytesSent = int(element['bytes'])
drops = int(element['drops'])
packets = int(element['packets'])
if 'bytesSent' + dirSuffix in device:
device['priorQueryBytes' + dirSuffix] = device['bytesSent' + dirSuffix]
device['bytesSent' + dirSuffix] = bytesSent
if 'dropsSent' + dirSuffix in device:
device['priorDropsSent' + dirSuffix] = device['dropsSent' + dirSuffix]
device['dropsSent' + dirSuffix] = drops
if 'packetsSent' + dirSuffix in device:
device['priorPacketsSent' + dirSuffix] = device['packetsSent' + dirSuffix]
device['packetsSent' + dirSuffix] = packets
device['timeQueried'] = datetime.now().isoformat()
for device in devices:
device['bitsDownloadSinceLastQuery'] = device['bitsUploadSinceLastQuery'] = 0
if 'priorQueryTime' in device:
try:
bytesDLSinceLastQuery = device['bytesSentDownload'] - device['priorQueryBytesDownload']
bytesULSinceLastQuery = device['bytesSentUpload'] - device['priorQueryBytesUpload']
except:
bytesDLSinceLastQuery = bytesULSinceLastQuery = 0
currentQueryTime = datetime.fromisoformat(device['timeQueried'])
priorQueryTime = datetime.fromisoformat(device['priorQueryTime'])
deltaSeconds = (currentQueryTime - priorQueryTime).total_seconds()
device['bitsDownloadSinceLastQuery'] = round(
((bytesDLSinceLastQuery * 8) / deltaSeconds)) if deltaSeconds > 0 else 0
device['bitsUploadSinceLastQuery'] = round(
((bytesULSinceLastQuery * 8) / deltaSeconds)) if deltaSeconds > 0 else 0
return devices
def getParentNodeStats(parentNodes, devices):
for parentNode in parentNodes:
thisNodeBitsDownload = 0
thisNodeBitsUpload = 0
for device in devices:
if device['ParentNode'] == parentNode['parentNodeName']:
thisNodeBitsDownload += device['bitsDownloadSinceLastQuery']
thisNodeBitsUpload += device['bitsUploadSinceLastQuery']
parentNode['bitsDownloadSinceLastQuery'] = thisNodeBitsDownload
parentNode['bitsUploadSinceLastQuery'] = thisNodeBitsUpload
return parentNodes
def getParentNodeDict(data, depth, parentNodeNameDict):
if parentNodeNameDict == None:
parentNodeNameDict = {}
for elem in data:
if 'children' in data[elem]:
for child in data[elem]['children']:
parentNodeNameDict[child] = elem
tempDict = getParentNodeDict(data[elem]['children'], depth + 1, parentNodeNameDict)
parentNodeNameDict = dict(parentNodeNameDict, **tempDict)
return parentNodeNameDict
def parentNodeNameDictPull():
# Load network heirarchy
with open('network.json', 'r') as j:
network = json.loads(j.read())
parentNodeNameDict = getParentNodeDict(network, 0, None)
return parentNodeNameDict
def refreshBandwidthGraphs():
startTime = datetime.now()
with open('statsByParentNode.json', 'r') as j:
parentNodes = json.loads(j.read())
with open('statsByDevice.json', 'r') as j:
devices = json.loads(j.read())
parentNodeNameDict = parentNodeNameDictPull()
print("Retrieving device statistics")
devices = getDeviceStats(devices)
print("Computing parent node statistics")
parentNodes = getParentNodeStats(parentNodes, devices)
print("Writing data to InfluxDB")
client = InfluxDBClient(
url=influxDBurl,
token=influxDBtoken,
org=influxDBOrg
)
write_api = client.write_api(write_options=SYNCHRONOUS)
chunkedDevices = list(chunk_list(devices, 200))
queriesToSendCount = 0
for chunk in chunkedDevices:
queriesToSend = []
for device in chunk:
bitsDownload = int(device['bitsDownloadSinceLastQuery'])
bitsUpload = int(device['bitsUploadSinceLastQuery'])
if (bitsDownload > 0) and (bitsUpload > 0):
percentUtilizationDownload = round((bitsDownload / round(device['downloadMax'] * 1000000)), 4)
percentUtilizationUpload = round((bitsUpload / round(device['uploadMax'] * 1000000)), 4)
p = Point('Bandwidth').tag("Device", device['hostname']).tag("ParentNode", device['ParentNode']).tag("Type", "Device").field("Download", bitsDownload).field("Upload", bitsUpload)
queriesToSend.append(p)
p = Point('Utilization').tag("Device", device['hostname']).tag("ParentNode", device['ParentNode']).tag("Type", "Device").field("Download", percentUtilizationDownload).field("Upload", percentUtilizationUpload)
queriesToSend.append(p)
write_api.write(bucket=influxDBBucket, record=queriesToSend)
# print("Added " + str(len(queriesToSend)) + " points to InfluxDB.")
queriesToSendCount += len(queriesToSend)
queriesToSend = []
for parentNode in parentNodes:
bitsDownload = int(parentNode['bitsDownloadSinceLastQuery'])
bitsUpload = int(parentNode['bitsUploadSinceLastQuery'])
if (bitsDownload > 0) and (bitsUpload > 0):
percentUtilizationDownload = round((bitsDownload / round(parentNode['downloadMax'] * 1000000)), 4)
percentUtilizationUpload = round((bitsUpload / round(parentNode['uploadMax'] * 1000000)), 4)
p = Point('Bandwidth').tag("Device", parentNode['parentNodeName']).tag("ParentNode", parentNode['parentNodeName']).tag("Type", "Parent Node").field("Download", bitsDownload).field("Upload", bitsUpload)
queriesToSend.append(p)
p = Point('Utilization').tag("Device", parentNode['parentNodeName']).tag("ParentNode", parentNode['parentNodeName']).tag("Type", "Parent Node").field("Download", percentUtilizationDownload).field("Upload", percentUtilizationUpload)
queriesToSend.append(p)
write_api.write(bucket=influxDBBucket, record=queriesToSend)
# print("Added " + str(len(queriesToSend)) + " points to InfluxDB.")
queriesToSendCount += len(queriesToSend)
print("Added " + str(queriesToSendCount) + " points to InfluxDB.")
client.close()
with open('statsByParentNode.json', 'w') as infile:
json.dump(parentNodes, infile)
with open('statsByDevice.json', 'w') as infile:
json.dump(devices, infile)
endTime = datetime.now()
durationSeconds = round((endTime - startTime).total_seconds(), 2)
print("Graphs updated within " + str(durationSeconds) + " seconds.")
if __name__ == '__main__':
refreshBandwidthGraphs()

View File

@@ -1,132 +0,0 @@
import os
import subprocess
from subprocess import PIPE
import io
import decimal
import json
from ispConfig import fqOrCAKE, interfaceA, interfaceB, influxDBBucket, influxDBOrg, influxDBtoken, influxDBurl, ppingLocation
from datetime import date, datetime, timedelta
import decimal
from influxdb_client import InfluxDBClient, Point, Dialect
from influxdb_client.client.write_api import SYNCHRONOUS
import dateutil.parser
def getLatencies(devices, secondsToRun):
interfaces = [interfaceA, interfaceB]
tcpLatency = 0
listOfAllDiffs = []
maxLatencyRecordable = 200
matchableIPs = []
for device in devices:
matchableIPs.append(device['ipv4'])
rttDict = {}
jitterDict = {}
#for interface in interfaces:
command = "./pping -i " + interfaceA + " -s " + str(secondsToRun) + " -m"
commands = command.split(' ')
wd = ppingLocation
tcShowResults = subprocess.run(command, shell=True, cwd=wd,stdout=subprocess.PIPE, stderr=subprocess.DEVNULL).stdout.decode('utf-8').splitlines()
for line in tcShowResults:
if len(line) > 59:
rtt1 = float(line[18:27])*1000
rtt2 = float(line[27:36]) *1000
toAndFrom = line[38:].split(' ')[3]
fromIP = toAndFrom.split('+')[0].split(':')[0]
toIP = toAndFrom.split('+')[1].split(':')[0]
matchedIP = ''
if fromIP in matchableIPs:
matchedIP = fromIP
elif toIP in matchableIPs:
matchedIP = toIP
jitter = rtt1 - rtt2
#Cap ceil
if rtt1 >= maxLatencyRecordable:
rtt1 = 200
#Lowest observed rtt
if matchedIP in rttDict:
if rtt1 < rttDict[matchedIP]:
rttDict[matchedIP] = rtt1
jitterDict[matchedIP] = jitter
else:
rttDict[matchedIP] = rtt1
jitterDict[matchedIP] = jitter
for device in devices:
diffsForThisDevice = []
if device['ipv4'] in rttDict:
device['tcpLatency'] = rttDict[device['ipv4']]
else:
device['tcpLatency'] = None
if device['ipv4'] in jitterDict:
device['tcpJitter'] = jitterDict[device['ipv4']]
else:
device['tcpJitter'] = None
return devices
def getParentNodeStats(parentNodes, devices):
for parentNode in parentNodes:
acceptableLatencies = []
for device in devices:
if device['ParentNode'] == parentNode['parentNodeName']:
if device['tcpLatency'] != None:
acceptableLatencies.append(device['tcpLatency'])
if len(acceptableLatencies) > 0:
parentNode['tcpLatency'] = sum(acceptableLatencies)/len(acceptableLatencies)
else:
parentNode['tcpLatency'] = None
return parentNodes
def refreshLatencyGraphs(secondsToRun):
startTime = datetime.now()
with open('statsByParentNode.json', 'r') as j:
parentNodes = json.loads(j.read())
with open('statsByDevice.json', 'r') as j:
devices = json.loads(j.read())
print("Retrieving device statistics")
devices = getLatencies(devices, secondsToRun)
print("Computing parent node statistics")
parentNodes = getParentNodeStats(parentNodes, devices)
print("Writing data to InfluxDB")
bucket = influxDBBucket
org = influxDBOrg
token = influxDBtoken
url = influxDBurl
client = InfluxDBClient(
url=url,
token=token,
org=org
)
write_api = client.write_api(write_options=SYNCHRONOUS)
queriesToSend = []
for device in devices:
if device['tcpLatency'] != None:
p = Point('Latency').tag("Device", device['hostname']).tag("ParentNode", device['ParentNode']).tag("Type", "Device").field("TCP Latency", device['tcpLatency'])
queriesToSend.append(p)
for parentNode in parentNodes:
if parentNode['tcpLatency'] != None:
p = Point('Latency').tag("Device", parentNode['parentNodeName']).tag("ParentNode", parentNode['parentNodeName']).tag("Type", "Parent Node").field("TCP Latency", parentNode['tcpLatency'])
queriesToSend.append(p)
write_api.write(bucket=bucket, record=queriesToSend)
print("Added " + str(len(queriesToSend)) + " points to InfluxDB.")
client.close()
#with open('statsByParentNode.json', 'w') as infile:
# json.dump(parentNodes, infile)
#with open('statsByDevice.json', 'w') as infile:
# json.dump(devices, infile)
endTime = datetime.now()
durationSeconds = round((endTime - startTime).total_seconds())
print("Graphs updated within " + str(durationSeconds) + " seconds.")
if __name__ == '__main__':
refreshLatencyGraphs(10)

File diff suppressed because it is too large Load Diff

View File

@@ -1,122 +0,0 @@
import requests
import csv
import ipaddress
from ispConfig import uispBaseURL, uispAuthToken, shapeRouterOrStation, ignoreSubnets
import shutil
stationModels = ['LBE-5AC-Gen2', 'LBE-5AC-Gen2', 'LBE-5AC-LR', 'AF-LTU5', 'AFLTULR', 'AFLTUPro', 'LTU-LITE']
routerModels = ['ACB-AC', 'ACB-ISP']
def pullShapedDevices():
devices = []
uispSitesToImport = []
url = uispBaseURL + "/nms/api/v2.1/sites?type=client&ucrm=true&ucrmDetails=true"
headers = {'accept':'application/json', 'x-auth-token': uispAuthToken}
r = requests.get(url, headers=headers)
jsonData = r.json()
uispDevicesToImport = []
for uispClientSite in jsonData:
if (uispClientSite['identification']['status'] == 'active'):
if (uispClientSite['qos']['downloadSpeed']) and (uispClientSite['qos']['uploadSpeed']):
downloadSpeedMbps = int(round(uispClientSite['qos']['downloadSpeed']/1000000))
uploadSpeedMbps = int(round(uispClientSite['qos']['uploadSpeed']/1000000))
address = uispClientSite['description']['address']
uispClientSiteID = uispClientSite['id']
devicesInUISPsite = getUISPdevicesAtClientSite(uispClientSiteID)
UCRMclientID = uispClientSite['ucrm']['client']['id']
AP = 'none'
thisSiteDevices = []
#Look for station devices, use those to find AP name
for device in devicesInUISPsite:
deviceName = device['identification']['name']
deviceRole = device['identification']['role']
deviceModel = device['identification']['model']
deviceModelName = device['identification']['modelName']
if (deviceRole == 'station') or (deviceModel in stationModels):
if device['attributes']['apDevice']:
AP = device['attributes']['apDevice']['name']
if shapeRouterOrStation == 'router':
#Look for router devices, use those as shaped CPE
for device in devicesInUISPsite:
deviceName = device['identification']['name']
deviceRole = device['identification']['role']
deviceMAC = device['identification']['mac']
deviceIPstring = device['ipAddress']
if '/' in deviceIPstring:
deviceIPstring = deviceIPstring.split("/")[0]
deviceModel = device['identification']['model']
deviceModelName = device['identification']['modelName']
if (deviceRole == 'router') or (deviceModel in routerModels):
print("Added " + ":\t" + deviceName)
devices.append((UCRMclientID, AP,deviceMAC, deviceName, deviceIPstring,'', str(downloadSpeedMbps/4), str(uploadSpeedMbps/4), str(downloadSpeedMbps),str(uploadSpeedMbps)))
elif shapeRouterOrStation == 'station':
#Look for station devices, use those as shaped CPE
for device in devicesInUISPsite:
deviceName = device['identification']['name']
deviceRole = device['identification']['role']
deviceMAC = device['identification']['mac']
deviceIPstring = device['ipAddress']
if '/' in deviceIPstring:
deviceIPstring = deviceIPstring.split("/")[0]
deviceModel = device['identification']['model']
deviceModelName = device['identification']['modelName']
if (deviceRole == 'station') or (deviceModel in stationModels):
print("Added " + ":\t" + deviceName)
devices.append((UCRMclientID, AP,deviceMAC, deviceName, deviceIPstring,'', str(round(downloadSpeedMbps/4)), str(round(uploadSpeedMbps/4)), str(downloadSpeedMbps),str(uploadSpeedMbps)))
uispSitesToImport.append(thisSiteDevices)
print("Imported " + address)
else:
print("Failed to import devices from " + uispClientSite['description']['address'] + ". Missing QoS.")
return devices
def getUISPdevicesAtClientSite(siteID):
url = uispBaseURL + "/nms/api/v2.1/devices?siteId=" + siteID
headers = {'accept':'application/json', 'x-auth-token': UISPuthToken}
r = requests.get(url, headers=headers)
return (r.json())
def updateFromUISP():
# Copy file shaper to backup in case of power loss during write of new version
shutil.copy('Shaper.csv', 'Shaper.csv.bak')
devicesFromShaperCSV = []
with open('Shaper.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
next(csv_reader)
for row in csv_reader:
deviceID, ParentNode, mac, hostname,ipv4, ipv6, downloadMin, uploadMin, downloadMax, uploadMax = row
ipv4 = ipv4.strip()
ipv6 = ipv6.strip()
ParentNode = ParentNode.strip()
devicesFromShaperCSV.append((deviceID, ParentNode, mac, hostname,ipv4, ipv6, downloadMin, uploadMin, downloadMax, uploadMax))
#Make list of IPs, so that we can check if a device being imported is already entered in Shaper.csv
devicesPulledFromUISP = pullShapedDevices()
mergedDevicesList = devicesFromShaperCSV
ipv4List = []
ipv6List = []
for device in devicesFromShaperCSV:
deviceID, ParentNode, mac, hostname,ipv4, ipv6, downloadMin, uploadMin, downloadMax, uploadMax = device
if (ipv4 != ''):
ipv4List.append(ipv4)
if (ipv6 != ''):
ipv6List.append(ipv6)
#For each device brought in from UISP, check if its in excluded subnets. If not, add it to Shaper.csv
for device in devicesPulledFromUISP:
deviceID, ParentNode, mac, hostname,ipv4, ipv6, downloadMin, uploadMin, downloadMax, uploadMax = device
isThisIPexcludable = False
for subnet in ignoreSubnets:
if ipaddress.ip_address(ipv4) in ipaddress.ip_network(subnet):
isThisIPexcludable = True
if (isThisIPexcludable == False) and (ipv4 not in ipv4List):
mergedDevicesList.append(device)
with open('Shaper.csv', 'w') as csvfile:
wr = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
wr.writerow(['ID', 'AP', 'MAC', 'Hostname', 'IPv4', 'IPv6', 'Download Min', 'Upload Min', 'Download Max', 'Upload Max'])
for device in mergedDevicesList:
wr.writerow(device)
if __name__ == '__main__':
updateFromUISP()

View File

@@ -1,58 +0,0 @@
# 'fq_codel' or 'cake diffserv4'
# 'cake diffserv4' is recommended
# fqOrCAKE = 'fq_codel'
fqOrCAKE = 'cake diffserv4'
# How many Mbps are available to the edge of this network
upstreamBandwidthCapacityDownloadMbps = 1000
upstreamBandwidthCapacityUploadMbps = 1000
# Traffic from devices not specified in Shaper.csv will be rate limited by an HTB of this many Mbps
defaultClassCapacityDownloadMbps = 500
defaultClassCapacityUploadMbps = 500
# Interface connected to core router
interfaceA = 'eth1'
# Interface connected to edge router
interfaceB = 'eth2'
# Shape by Site in addition to by AP and Client
# Now deprecated, was only used prior to v1.1
# shapeBySite = True
# Allow shell commands. False causes commands print to console only without being executed. MUST BE ENABLED FOR
# PROGRAM TO FUNCTION
enableActualShellCommands = True
# Add 'sudo' before execution of any shell commands. May be required depending on distribution and environment.
runShellCommandsAsSudo = False
# Graphing
graphingEnabled = True
ppingLocation = "pping"
influxDBurl = "http://localhost:8086"
influxDBBucket = "libreqos"
influxDBOrg = "Your ISP Name Here"
influxDBtoken = ""
# NMS/CRM Integration
# If a device shows a WAN IP within these subnets, assume they are behind NAT / un-shapable, and ignore them
ignoreSubnets = ['192.168.0.0/16']
# Optional UISP integration
automaticImportUISP = False
# Everything before /nms/ on your UISP instance
uispBaseURL = 'https://examplesite.com'
# UISP Auth Token
uispAuthToken = ''
# UISP | Whether to shape router at customer premises, or instead shape the station radio. When station radio is in
# router mode, use 'station'. Otherwise, use 'router'.
shapeRouterOrStation = 'router'
# API Auth
apiUsername = "testUser"
apiPassword = "changeme8343486806"
apiHostIP = "127.0.0.1"
apiHostPost = 5000

View File

@@ -1,356 +0,0 @@
from flask import Flask
from flask_restful import Resource, Api, reqparse
from flask_httpauth import HTTPBasicAuth
import ast
import csv
from werkzeug.security import generate_password_hash, check_password_hash
from ispConfig import apiUsername, apiPassword, apiHostIP, apiHostPost
from LibreQoS import refreshShapers
app = Flask(__name__)
api = Api(app)
auth = HTTPBasicAuth()
users = {
apiUsername: generate_password_hash(apiPassword)
}
@auth.verify_password
def verify_password(username, password):
if username in users and check_password_hash(users.get(username), password):
return username
class Devices(Resource):
# Get
@auth.login_required
def get(self):
devices = []
with open('Shaper.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
header_store = next(csv_reader)
for row in csv_reader:
deviceID, parentNode, mac, hostname,ipv4, ipv6, downloadMin, uploadMin, downloadMax, uploadMax = row
ipv4 = ipv4.strip()
ipv6 = ipv6.strip()
if parentNode == "":
parentNode = "none"
parentNode = parentNode.strip()
thisDevice = {
"id": deviceID,
"mac": mac,
"parentNode": parentNode,
"hostname": hostname,
"ipv4": ipv4,
"ipv6": ipv6,
"downloadMin": int(downloadMin),
"uploadMin": int(uploadMin),
"downloadMax": int(downloadMax),
"uploadMax": int(uploadMax),
"qdisc": '',
}
devices.append(thisDevice)
return {'data': devices}, 200 # return data and 200 OK code
# Post
@auth.login_required
def post(self):
devices = []
idOnlyList = []
ipv4onlyList = []
ipv6onlyList = []
hostnameOnlyList = []
with open('Shaper.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
header_store = next(csv_reader)
for row in csv_reader:
deviceID, parentNode, mac, hostname,ipv4, ipv6, downloadMin, uploadMin, downloadMax, uploadMax = row
ipv4 = ipv4.strip()
ipv6 = ipv6.strip()
if parentNode == "":
parentNode = "none"
parentNode = parentNode.strip()
thisDevice = {
"id": deviceID,
"mac": mac,
"parentNode": parentNode,
"hostname": hostname,
"ipv4": ipv4,
"ipv6": ipv6,
"downloadMin": int(downloadMin),
"uploadMin": int(uploadMin),
"downloadMax": int(downloadMax),
"uploadMax": int(uploadMax),
"qdisc": '',
}
devices.append(thisDevice)
ipv4onlyList.append(ipv4)
ipv6onlyList.append(ipv6)
idOnlyList.append(deviceID)
hostnameOnlyList.append(hostname)
parser = reqparse.RequestParser() # initialize
parser.add_argument('id', required=False)
parser.add_argument('mac', required=False)
parser.add_argument('parentNode', required=False)
parser.add_argument('hostname', required=False)
parser.add_argument('ipv4', required=False)
parser.add_argument('ipv6', required=False)
parser.add_argument('downloadMin', required=True)
parser.add_argument('uploadMin', required=True)
parser.add_argument('downloadMax', required=True)
parser.add_argument('uploadMax', required=True)
parser.add_argument('qdisc', required=False)
args = parser.parse_args() # parse arguments to dictionary
args['downloadMin'] = int(args['downloadMin'])
args['uploadMin'] = int(args['uploadMin'])
args['downloadMax'] = int(args['downloadMax'])
args['uploadMax'] = int(args['uploadMax'])
if (args['id'] in idOnlyList):
return {
'message': f"'{args['id']}' already exists."
}, 401
elif (args['ipv4'] in ipv4onlyList):
return {
'message': f"'{args['ipv4']}' already exists."
}, 401
elif (args['ipv6'] in ipv6onlyList):
return {
'message': f"'{args['ipv6']}' already exists."
}, 401
elif (args['hostname'] in hostnameOnlyList):
return {
'message': f"'{args['hostname']}' already exists."
}, 401
else:
if args['parentNode'] == None:
args['parentNode'] = "none"
newDevice = {
"id": args['id'],
"mac": args['mac'],
"parentNode": args['parentNode'],
"hostname": args['hostname'],
"ipv4": args['ipv4'],
"ipv6": args['ipv6'],
"downloadMin": int(args['downloadMin']),
"uploadMin": int(args['uploadMin']),
"downloadMax": int(args['downloadMax']),
"uploadMax": int(args['uploadMax']),
"qdisc": '',
}
entryExistsAlready = False
revisedDevices = []
revisedDevices.append(newDevice)
# create new Shaper.csv containing new values
with open('Shaper.csv', 'w') as csvfile:
wr = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
wr.writerow(header_store)
for device in revisedDevices:
wr.writerow((device['id'], device['parentNode'], device['mac'], device['hostname'] , device['ipv4'], device['ipv6'], device['downloadMin'], device['uploadMin'], device['downloadMax'], device['uploadMax']))
return {'data': newDevice}, 200 # return data with 200 OK
# Put
@auth.login_required
def put(self):
devices = []
idOnlyList = []
ipv4onlyList = []
ipv6onlyList = []
hostnameOnlyList = []
with open('Shaper.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
header_store = next(csv_reader)
for row in csv_reader:
deviceID, parentNode, mac, hostname,ipv4, ipv6, downloadMin, uploadMin, downloadMax, uploadMax = row
ipv4 = ipv4.strip()
ipv6 = ipv6.strip()
if parentNode == "":
parentNode = "none"
parentNode = parentNode.strip()
thisDevice = {
"id": deviceID,
"mac": mac,
"parentNode": parentNode,
"hostname": hostname,
"ipv4": ipv4,
"ipv6": ipv6,
"downloadMin": int(downloadMin),
"uploadMin": int(uploadMin),
"downloadMax": int(downloadMax),
"uploadMax": int(uploadMax),
"qdisc": '',
}
devices.append(thisDevice)
ipv4onlyList.append(ipv4)
ipv6onlyList.append(ipv6)
idOnlyList.append(deviceID)
hostnameOnlyList.append(hostname)
parser = reqparse.RequestParser() # initialize
parser.add_argument('id', required=False)
parser.add_argument('mac', required=False)
parser.add_argument('parentNode', required=False)
parser.add_argument('hostname', required=False)
parser.add_argument('ipv4', required=False)
parser.add_argument('ipv6', required=False)
parser.add_argument('downloadMin', required=True)
parser.add_argument('uploadMin', required=True)
parser.add_argument('downloadMax', required=True)
parser.add_argument('uploadMax', required=True)
parser.add_argument('qdisc', required=False)
args = parser.parse_args() # parse arguments to dictionary
args['downloadMin'] = int(args['downloadMin'])
args['uploadMin'] = int(args['uploadMin'])
args['downloadMax'] = int(args['downloadMax'])
args['uploadMax'] = int(args['uploadMax'])
if (args['id'] in idOnlyList) or (args['ipv4'] in ipv4onlyList) or (args['ipv6'] in ipv6onlyList) or (args['hostname'] in hostnameOnlyList):
if args['parentNode'] == None:
args['parentNode'] = "none"
newDevice = {
"id": args['id'],
"mac": args['mac'],
"parentNode": args['parentNode'],
"hostname": args['hostname'],
"ipv4": args['ipv4'],
"ipv6": args['ipv6'],
"downloadMin": int(args['downloadMin']),
"uploadMin": int(args['uploadMin']),
"downloadMax": int(args['downloadMax']),
"uploadMax": int(args['uploadMax']),
"qdisc": '',
}
successfullyFoundMatch = False
revisedDevices = []
for device in devices:
if (device['id'] == args['id']) or (device['mac'] == args['mac']) or (device['hostname'] == args['hostname']) or (device['ipv4'] == args['ipv4']) or (device['ipv6'] == args['ipv6']):
revisedDevices.append(newDevice)
successfullyFoundMatch = True
else:
revisedDevices.append(device)
# create new Shaper.csv containing new values
with open('Shaper.csv', 'w') as csvfile:
wr = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
wr.writerow(header_store)
for device in revisedDevices:
wr.writerow((device['id'], device['parentNode'], device['mac'], device['hostname'] , device['ipv4'], device['ipv6'], device['downloadMin'], device['uploadMin'], device['downloadMax'], device['uploadMax']))
return {'data': newDevice}, 200 # return data with 200 OK
else:
return {
'message': f" Matching device entry not found."
}, 404
# Delete
@auth.login_required
def delete(self):
devices = []
idOnlyList = []
ipv4onlyList = []
ipv6onlyList = []
hostnameOnlyList = []
with open('Shaper.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
header_store = next(csv_reader)
for row in csv_reader:
deviceID, parentNode, mac, hostname,ipv4, ipv6, downloadMin, uploadMin, downloadMax, uploadMax = row
ipv4 = ipv4.strip()
ipv6 = ipv6.strip()
if parentNode == "":
parentNode = "none"
parentNode = parentNode.strip()
thisDevice = {
"id": deviceID,
"mac": mac,
"parentNode": parentNode,
"hostname": hostname,
"ipv4": ipv4,
"ipv6": ipv6,
"downloadMin": int(downloadMin),
"uploadMin": int(uploadMin),
"downloadMax": int(downloadMax),
"uploadMax": int(uploadMax),
"qdisc": '',
}
devices.append(thisDevice)
ipv4onlyList.append(ipv4)
ipv6onlyList.append(ipv6)
idOnlyList.append(deviceID)
hostnameOnlyList.append(hostname)
parser = reqparse.RequestParser() # initialize
parser.add_argument('id', required=False)
parser.add_argument('mac', required=False)
parser.add_argument('parentNode', required=False)
parser.add_argument('hostname', required=False)
parser.add_argument('ipv4', required=False)
parser.add_argument('ipv6', required=False)
parser.add_argument('downloadMin', required=False)
parser.add_argument('uploadMin', required=False)
parser.add_argument('downloadMax', required=False)
parser.add_argument('uploadMax', required=False)
parser.add_argument('qdisc', required=False)
args = parser.parse_args() # parse arguments to dictionary
if (args['id'] in idOnlyList) or (args['ipv4'] in ipv4onlyList) or (args['ipv6'] in ipv6onlyList) or (args['hostname'] in hostnameOnlyList):
successfullyFoundMatch = False
revisedDevices = []
for device in devices:
if (device['id'] == args['id']) or (device['mac'] == args['mac']) or (device['hostname'] == args['hostname']) or (device['ipv4'] == args['ipv4']) or (device['ipv6'] == args['ipv6']):
# Simply do not add device to revisedDevices
successfullyFoundMatch = True
else:
revisedDevices.append(device)
# create new Shaper.csv containing new values
with open('Shaper.csv', 'w') as csvfile:
wr = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
wr.writerow(header_store)
for device in revisedDevices:
wr.writerow((device['id'], device['parentNode'], device['mac'], device['hostname'] , device['ipv4'], device['ipv6'], device['downloadMin'], device['uploadMin'], device['downloadMax'], device['uploadMax']))
return {
'message': "Matching device entry successfully deleted."
}, 200 # return data with 200 OK
else:
return {
'message': f" Matching device entry not found."
}, 404
class Shaper(Resource):
# Post
@auth.login_required
def post(self):
parser = reqparse.RequestParser() # initialize
parser.add_argument('refresh', required=True)
args = parser.parse_args() # parse arguments to dictionary
if (args['refresh'] == True):
refreshShapers()
return {
'message': "Successfully refreshed LibreQoS device shaping."
}, 200 # return data and 200 OK code
api.add_resource(Devices, '/devices') # '/devices' is our 1st entry point
api.add_resource(Shaper, '/shaper') # '/shaper' is our 2nd entry point
if __name__ == '__main__':
from waitress import serve
#app.run(debug=True) # debug mode
serve(app, host=apiHostIP, port=apiHostPost)

View File

@@ -1,78 +0,0 @@
{
"Site_1":
{
"downloadBandwidthMbps":1000,
"uploadBandwidthMbps":1000,
"children":
{
"AP_A":
{
"downloadBandwidthMbps":500,
"uploadBandwidthMbps":500
},
"Site_3":
{
"downloadBandwidthMbps":500,
"uploadBandwidthMbps":500,
"children":
{
"PoP_5":
{
"downloadBandwidthMbps":200,
"uploadBandwidthMbps":200,
"children":
{
"AP_9":
{
"downloadBandwidthMbps":120,
"uploadBandwidthMbps":120
},
"PoP_6":
{
"downloadBandwidthMbps":60,
"uploadBandwidthMbps":60,
"children":
{
"AP_11":
{
"downloadBandwidthMbps":30,
"uploadBandwidthMbps":30
}
}
}
}
}
}
}
}
},
"Site_2":
{
"downloadBandwidthMbps":500,
"uploadBandwidthMbps":500,
"children":
{
"PoP_1":
{
"downloadBandwidthMbps":200,
"uploadBandwidthMbps":200,
"children":
{
"AP_7":
{
"downloadBandwidthMbps":100,
"uploadBandwidthMbps":100
}
}
}
},
"children":
{
"AP_1":
{
"downloadBandwidthMbps":150,
"uploadBandwidthMbps":150
}
}
}
}

View File

@@ -1,26 +0,0 @@
import time
import schedule
from LibreQoS import refreshShapers
from graphBandwidth import refreshBandwidthGraphs
from graphLatency import refreshLatencyGraphs
from ispConfig import graphingEnabled, automaticImportUISP
from integrationUISP import updateFromUISP
def importandshape():
if automaticImportUISP:
updateFromUISP()
refreshShapers()
if __name__ == '__main__':
importandshape()
schedule.every().day.at("04:00").do(importandshape)
while True:
schedule.run_pending()
if graphingEnabled:
try:
refreshBandwidthGraphs()
refreshLatencyGraphs(10)
except:
print("Failed to update graphs")
else:
time.sleep(60) # wait x seconds