mirror of
https://github.com/LibreQoE/LibreQoS.git
synced 2025-01-09 05:53:04 -06:00
Delete 0.9 directory
This commit is contained in:
parent
de4fb68488
commit
d0fddccc98
@ -1,6 +0,0 @@
|
||||
AP,Max Download,Max Upload
|
||||
A,500,100
|
||||
C,225,50
|
||||
F,500,100
|
||||
R,225,50
|
||||
X,500,100
|
|
234
0.9/LibreQoS.py
234
0.9/LibreQoS.py
@ -1,234 +0,0 @@
|
||||
# Copyright (C) 2020 Robert Chacón
|
||||
# This file is part of LibreQoS.
|
||||
#
|
||||
# LibreQoS is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# LibreQoS is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with LibreQoS. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
# _ _ _ ___ ____
|
||||
# | | (_) |__ _ __ ___ / _ \ ___/ ___|
|
||||
# | | | | '_ \| '__/ _ \ | | |/ _ \___ \
|
||||
# | |___| | |_) | | | __/ |_| | (_) |__) |
|
||||
# |_____|_|_.__/|_| \___|\__\_\\___/____/
|
||||
# v.0.90-alpha
|
||||
#
|
||||
import random
|
||||
import logging
|
||||
import os
|
||||
import io
|
||||
import json
|
||||
import csv
|
||||
import subprocess
|
||||
from subprocess import PIPE
|
||||
import ipaddress
|
||||
from ipaddress import IPv4Address, IPv6Address
|
||||
import time
|
||||
from datetime import date, datetime
|
||||
from ispConfig import fqOrCAKE, upstreamBandwidthCapacityDownloadMbps, upstreamBandwidthCapacityUploadMbps, defaultClassCapacityDownloadMbps, defaultClassCapacityUploadMbps, interfaceA, interfaceB, enableActualShellCommands, runShellCommandsAsSudo
|
||||
import collections
|
||||
|
||||
def shell(command):
|
||||
if enableActualShellCommands:
|
||||
if runShellCommandsAsSudo:
|
||||
command = 'sudo ' + command
|
||||
commands = command.split(' ')
|
||||
print(command)
|
||||
proc = subprocess.Popen(commands, stdout=subprocess.PIPE)
|
||||
for line in io.TextIOWrapper(proc.stdout, encoding="utf-8"): # or another encoding
|
||||
print(line)
|
||||
else:
|
||||
print(command)
|
||||
|
||||
def clearPriorSettings(interfaceA, interfaceB):
|
||||
shell('tc filter delete dev ' + interfaceA)
|
||||
shell('tc filter delete dev ' + interfaceA + ' root')
|
||||
shell('tc qdisc delete dev ' + interfaceA + ' root')
|
||||
shell('tc qdisc delete dev ' + interfaceA)
|
||||
shell('tc filter delete dev ' + interfaceB)
|
||||
shell('tc filter delete dev ' + interfaceB + ' root')
|
||||
shell('tc qdisc delete dev ' + interfaceB + ' root')
|
||||
shell('tc qdisc delete dev ' + interfaceB)
|
||||
if runShellCommandsAsSudo:
|
||||
clearMemoryCache()
|
||||
|
||||
def refreshShapers():
|
||||
devices = []
|
||||
accessPointDownloadMbps = {}
|
||||
accessPointUploadMbps = {}
|
||||
filterHandleCounter = 101
|
||||
#Load Access Points
|
||||
with open('AccessPoints.csv') as csv_file:
|
||||
csv_reader = csv.reader(csv_file, delimiter=',')
|
||||
next(csv_reader)
|
||||
for row in csv_reader:
|
||||
AP, download, upload = row
|
||||
accessPointDownloadMbps[AP] = int(download)
|
||||
accessPointUploadMbps[AP] = int(upload)
|
||||
#Load Devices
|
||||
with open('Shaper.csv') as csv_file:
|
||||
csv_reader = csv.reader(csv_file, delimiter=',')
|
||||
next(csv_reader)
|
||||
for row in csv_reader:
|
||||
deviceID, AP, mac, hostname,ipv4, ipv6, downloadMin, uploadMin, downloadMax, uploadMax = row
|
||||
ipv4 = ipv4.strip()
|
||||
ipv6 = ipv6.strip()
|
||||
if AP == "":
|
||||
AP = "none"
|
||||
AP = AP.strip()
|
||||
thisDevice = {
|
||||
"id": deviceID,
|
||||
"mac": mac,
|
||||
"AP": AP,
|
||||
"hostname": hostname,
|
||||
"ipv4": ipv4,
|
||||
"ipv6": ipv6,
|
||||
"downloadMin": int(downloadMin),
|
||||
"uploadMin": int(uploadMin),
|
||||
"downloadMax": int(downloadMax),
|
||||
"uploadMax": int(uploadMax),
|
||||
"qdisc": '',
|
||||
}
|
||||
# If an AP is specified for a device in Shaper.csv, but AP is not listed in AccessPoints.csv, raise exception
|
||||
if (AP != "none") and (AP not in accessPointDownloadMbps):
|
||||
raise ValueError('AP ' + AP + ' not listed in AccessPoints.csv')
|
||||
devices.append(thisDevice)
|
||||
|
||||
# If no AP is specified for a device in Shaper.csv, it is placed under this 'default' AP shaper, set to bandwidth max at edge
|
||||
accessPointDownloadMbps['none'] = upstreamBandwidthCapacityDownloadMbps
|
||||
accessPointUploadMbps['none'] = upstreamBandwidthCapacityUploadMbps
|
||||
|
||||
#Sort into bins by AP
|
||||
result = collections.defaultdict(list)
|
||||
|
||||
for d in devices:
|
||||
result[d['AP']].append(d)
|
||||
|
||||
devicesByAP = list(result.values())
|
||||
|
||||
clearPriorSettings(interfaceA, interfaceB)
|
||||
|
||||
#XDP-CPUMAP-TC
|
||||
shell('./xdp-cpumap-tc/bin/xps_setup.sh -d ' + interfaceA + ' --default')
|
||||
shell('./xdp-cpumap-tc/bin/xps_setup.sh -d ' + interfaceB + ' --default')
|
||||
|
||||
shell('./xdp-cpumap-tc/src/xdp_iphash_to_cpu --dev ' + interfaceA + ' --lan')
|
||||
shell('./xdp-cpumap-tc/src/xdp_iphash_to_cpu --dev ' + interfaceB + ' --wan')
|
||||
shell('./xdp-cpumap-tc/src/xdp_iphash_to_cpu_cmdline --clear')
|
||||
|
||||
shell('./xdp-cpumap-tc/src/tc_classify --dev-egress ' + interfaceA)
|
||||
shell('./xdp-cpumap-tc/src/tc_classify --dev-egress ' + interfaceB)
|
||||
|
||||
#Create MQ
|
||||
cpusAvailable = 0
|
||||
|
||||
path = '/sys/class/net/' + interfaceA + '/queues/'
|
||||
directory_contents = os.listdir(path)
|
||||
print(directory_contents)
|
||||
for item in directory_contents:
|
||||
if "tx-" in str(item):
|
||||
cpusAvailable += 1
|
||||
thisInterface = interfaceA
|
||||
shell('tc qdisc replace dev ' + thisInterface + ' root handle 7FFF: mq')
|
||||
for cpu in range(cpusAvailable):
|
||||
shell('tc qdisc add dev ' + thisInterface + ' parent 7FFF:' + str(cpu+1) + ' handle ' + str(cpu+1) + ': htb default 2')
|
||||
shell('tc class add dev ' + thisInterface + ' parent ' + str(cpu+1) + ': classid ' + str(cpu+1) + ':1 htb rate '+ str(upstreamBandwidthCapacityDownloadMbps) + 'mbit ceil ' + str(upstreamBandwidthCapacityDownloadMbps) + 'mbit')
|
||||
shell('tc qdisc add dev ' + thisInterface + ' parent ' + str(cpu+1) + ':1 ' + fqOrCAKE)
|
||||
#Default class - traffic gets passed through this limiter if not otherwise classified by the Shaper.csv
|
||||
shell('tc class add dev ' + thisInterface + ' parent ' + str(cpu+1) + ':1 classid ' + str(cpu+1) + ':2 htb rate ' + str(defaultClassCapacityDownloadMbps) + 'mbit ceil ' + str(defaultClassCapacityDownloadMbps) + 'mbit prio 5')
|
||||
shell('tc qdisc add dev ' + thisInterface + ' parent ' + str(cpu+1) + ':2 ' + fqOrCAKE)
|
||||
|
||||
thisInterface = interfaceB
|
||||
shell('tc qdisc replace dev ' + thisInterface + ' root handle 7FFF: mq')
|
||||
for cpu in range(cpusAvailable):
|
||||
shell('tc qdisc add dev ' + thisInterface + ' parent 7FFF:' + str(cpu+1) + ' handle ' + str(cpu+1) + ': htb default 2')
|
||||
shell('tc class add dev ' + thisInterface + ' parent ' + str(cpu+1) + ': classid ' + str(cpu+1) + ':1 htb rate '+ str(upstreamBandwidthCapacityDownloadMbps) + 'mbit ceil ' + str(upstreamBandwidthCapacityDownloadMbps) + 'mbit')
|
||||
shell('tc qdisc add dev ' + thisInterface + ' parent ' + str(cpu+1) + ':1 ' + fqOrCAKE)
|
||||
#Default class - traffic gets passed through this limiter if not otherwise classified by the Shaper.csv
|
||||
shell('tc class add dev ' + thisInterface + ' parent ' + str(cpu+1) + ':1 classid ' + str(cpu+1) + ':2 htb rate ' + str(defaultClassCapacityDownloadMbps) + 'mbit ceil ' + str(defaultClassCapacityDownloadMbps) + 'mbit prio 5')
|
||||
shell('tc qdisc add dev ' + thisInterface + ' parent ' + str(cpu+1) + ':2 ' + fqOrCAKE)
|
||||
|
||||
currentCPUcounter = 1
|
||||
ipv4Filters = []
|
||||
ipv6Filters = []
|
||||
|
||||
cpuMinorCounterDict = {}
|
||||
|
||||
for cpu in range(cpusAvailable):
|
||||
cpuMinorCounterDict[cpu] = 3
|
||||
|
||||
for AP in devicesByAP:
|
||||
#Create HTBs by AP
|
||||
currentAPname = AP[0]['AP']
|
||||
thisAPdownload = accessPointDownloadMbps[currentAPname]
|
||||
thisAPupload = accessPointUploadMbps[currentAPname]
|
||||
|
||||
major = currentCPUcounter
|
||||
minor = cpuMinorCounterDict[currentCPUcounter]
|
||||
#HTBs for each AP
|
||||
thisHTBclassID = str(currentCPUcounter) + ':' + str(minor)
|
||||
# Guarentee AP gets at least 1/2 of its radio capacity, allow up to its max radio capacity when network not at peak load
|
||||
shell('tc class add dev ' + interfaceA + ' parent ' + str(currentCPUcounter) + ':1 classid ' + str(minor) + ' htb rate '+ str(round(thisAPdownload/2)) + 'mbit ceil '+ str(round(thisAPdownload)) + 'mbit prio 3')
|
||||
shell('tc qdisc add dev ' + interfaceA + ' parent ' + str(currentCPUcounter) + ':' + str(minor) + ' ' + fqOrCAKE)
|
||||
shell('tc class add dev ' + interfaceB + ' parent ' + str(major) + ':1 classid ' + str(minor) + ' htb rate '+ str(round(thisAPupload/2)) + 'mbit ceil '+ str(round(thisAPupload)) + 'mbit prio 3')
|
||||
shell('tc qdisc add dev ' + interfaceB + ' parent ' + str(major) + ':' + str(minor) + ' ' + fqOrCAKE)
|
||||
minor += 1
|
||||
for device in AP:
|
||||
#QDiscs for each AP
|
||||
downloadMin = device['downloadMin']
|
||||
downloadMax = device['downloadMax']
|
||||
uploadMin = device['uploadMin']
|
||||
uploadMax = device['uploadMax']
|
||||
shell('tc class add dev ' + interfaceA + ' parent ' + thisHTBclassID + ' classid ' + str(minor) + ' htb rate '+ str(downloadMin) + 'mbit ceil '+ str(downloadMax) + 'mbit prio 3')
|
||||
shell('tc qdisc add dev ' + interfaceA + ' parent ' + str(major) + ':' + str(minor) + ' ' + fqOrCAKE)
|
||||
shell('tc class add dev ' + interfaceB + ' parent ' + thisHTBclassID + ' classid ' + str(minor) + ' htb rate '+ str(uploadMin) + 'mbit ceil '+ str(uploadMax) + 'mbit prio 3')
|
||||
shell('tc qdisc add dev ' + interfaceB + ' parent ' + str(major) + ':' + str(minor) + ' ' + fqOrCAKE)
|
||||
if device['ipv4']:
|
||||
parentString = str(major) + ':'
|
||||
flowIDstring = str(major) + ':' + str(minor)
|
||||
ipv4Filters.append((device['ipv4'], parentString, flowIDstring, currentCPUcounter))
|
||||
if device['ipv6']:
|
||||
parentString = str(major) + ':'
|
||||
flowIDstring = str(major) + ':' + str(minor)
|
||||
ipv6Filters.append((device['ipv6'], parentString, flowIDstring, currentCPUcounter))
|
||||
deviceQDiscID = str(major) + ':' + str(minor)
|
||||
device['qdisc'] = str(major) + ':' + str(minor)
|
||||
minor += 1
|
||||
cpuMinorCounterDict[currentCPUcounter] = minor
|
||||
|
||||
currentCPUcounter += 1
|
||||
if currentCPUcounter > cpusAvailable:
|
||||
currentCPUcounter = 1
|
||||
|
||||
#IPv4 Filters
|
||||
hashTableCounter = 3 + cpusAvailable
|
||||
for cpu in range(cpusAvailable):
|
||||
for ipv4Filter in ipv4Filters:
|
||||
ipv4, parent, classid, filterCpuNum = ipv4Filter
|
||||
if filterCpuNum is cpu:
|
||||
#if '/' in ipv4:
|
||||
# ipv4 = ipv4.split('/')[0]
|
||||
filterHandle = hex(filterHandleCounter)
|
||||
shell('./xdp-cpumap-tc/src/xdp_iphash_to_cpu_cmdline --add --ip ' + ipv4 + ' --cpu ' + str(filterCpuNum-1) + ' --classid ' + classid)
|
||||
filterHandleCounter += 1
|
||||
filterHandleCounter += 1
|
||||
|
||||
#Save devices to file to allow for statistics runs
|
||||
with open('devices.json', 'w') as outfile:
|
||||
json.dump(devices, outfile)
|
||||
|
||||
#Done
|
||||
currentTimeString = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
|
||||
print("Successful run completed on " + currentTimeString)
|
||||
|
||||
if __name__ == '__main__':
|
||||
refreshShapers()
|
||||
print("Program complete")
|
@ -1,12 +0,0 @@
|
||||
Requires XDP-CPUMAP-TC
|
||||
Only works on dedicated linux machine at the moment, not on VMs.
|
||||
Will soon work on VMs once <a href="https://www.spinics.net/lists/bpf/msg42260.html">this patch</a> is part of the mainline linux kernel.
|
||||
|
||||
```
|
||||
cd LibreQoS
|
||||
git clone https://github.com/xdp-project/xdp-cpumap-tc.git
|
||||
cd /xdp-cpumap-tc/src/
|
||||
git submodule update --init
|
||||
sudo apt install clang gcc llvm libelf-dev
|
||||
make
|
||||
```
|
@ -1,6 +0,0 @@
|
||||
ID,AP,MAC,Hostname,IPv4,IPv6,Download Min,Upload Min,Download Max,Upload Max
|
||||
3001,A,32:3B:FE:B0:92:C1,CPE-Customer1,100.126.0.77,2001:495:1f0f:58a::4/64,25,8,115,18
|
||||
3002,C,AE:EC:D3:70:DD:36,CPE-Customer2,100.126.0.78,2001:495:1f0f:58a::8/64,25,8,115,18
|
||||
3003,F,1C:1E:60:69:88:9A,CPE-Customer3,100.126.0.79,2001:495:1f0f:58a::12/64,25,8,115,18
|
||||
3004,R,11:B1:63:C4:DA:4C,CPE-Customer4,100.126.0.80,2001:495:1f0f:58a::16/64,25,8,115,18
|
||||
3005,X,46:2F:B5:C2:0B:15,CPE-Customer5,100.126.0.81,2001:495:1f0f:58a::20/64,25,8,115,18
|
|
@ -1,25 +0,0 @@
|
||||
#'fq_codel' or 'cake'
|
||||
# Cake requires many specific packages and kernel changes:
|
||||
# https://www.bufferbloat.net/projects/codel/wiki/Cake/
|
||||
# https://github.com/dtaht/tc-adv
|
||||
fqOrCAKE = 'fq_codel'
|
||||
|
||||
# How many Mbps are available to the edge of this network
|
||||
upstreamBandwidthCapacityDownloadMbps = 1000
|
||||
upstreamBandwidthCapacityUploadMbps = 1000
|
||||
|
||||
# Traffic from devices not specified in Shaper.csv will be rate limited by an HTB of this many Mbps
|
||||
defaultClassCapacityDownloadMbps = 1000
|
||||
defaultClassCapacityUploadMbps = 1000
|
||||
|
||||
# Interface connected to core router
|
||||
interfaceA = 'eth1'
|
||||
|
||||
# Interface connected to edge router
|
||||
interfaceB = 'eth2'
|
||||
|
||||
# Allow shell commands. False causes commands print to console only without being executed. MUST BE ENABLED FOR PROGRAM TO FUNCTION
|
||||
enableActualShellCommands = True
|
||||
|
||||
# Add 'sudo' before execution of any shell commands. May be required depending on distribution and environment.
|
||||
runShellCommandsAsSudo = False
|
@ -1,11 +0,0 @@
|
||||
import time
|
||||
import schedule
|
||||
from datetime import date
|
||||
from LibreQoS import refreshShapers
|
||||
|
||||
if __name__ == '__main__':
|
||||
refreshShapers()
|
||||
schedule.every().day.at("04:00").do(refreshShapers)
|
||||
while True:
|
||||
schedule.run_pending()
|
||||
time.sleep(60) # wait one minute
|
176
0.9/stats.py
176
0.9/stats.py
@ -1,176 +0,0 @@
|
||||
import os
|
||||
import subprocess
|
||||
from subprocess import PIPE
|
||||
import io
|
||||
import decimal
|
||||
import json
|
||||
from operator import itemgetter
|
||||
from prettytable import PrettyTable
|
||||
from ispConfig import fqOrCAKE
|
||||
|
||||
def getStatistics():
|
||||
tcShowResults = []
|
||||
command = 'tc -s qdisc show'
|
||||
commands = command.split(' ')
|
||||
proc = subprocess.Popen(commands, stdout=subprocess.PIPE)
|
||||
for line in io.TextIOWrapper(proc.stdout, encoding="utf-8"): # or another encoding
|
||||
tcShowResults.append(line)
|
||||
allQDiscStats = []
|
||||
thisFlow = {}
|
||||
thisFlowStats = {}
|
||||
withinCorrectChunk = False
|
||||
for line in tcShowResults:
|
||||
expecting = "qdisc " + fqOrCAKE
|
||||
if expecting in line:
|
||||
thisFlow['qDiscID'] = line.split(' ')[6]
|
||||
withinCorrectChunk = True
|
||||
elif ("Sent " in line) and withinCorrectChunk:
|
||||
items = line.split(' ')
|
||||
thisFlowStats['GigabytesSent'] = str(round((int(items[2]) * 0.000000001), 1))
|
||||
thisFlowStats['PacketsSent'] = int(items[4])
|
||||
thisFlowStats['droppedPackets'] = int(items[7].replace(',',''))
|
||||
thisFlowStats['overlimitsPackets'] = int(items[9])
|
||||
thisFlowStats['requeuedPackets'] = int(items[11].replace(')',''))
|
||||
if thisFlowStats['PacketsSent'] > 0:
|
||||
overlimitsFreq = (thisFlowStats['overlimitsPackets']/thisFlowStats['PacketsSent'])
|
||||
else:
|
||||
overlimitsFreq = -1
|
||||
elif ('backlog' in line) and withinCorrectChunk:
|
||||
items = line.split(' ')
|
||||
thisFlowStats['backlogBytes'] = int(items[2].replace('b',''))
|
||||
thisFlowStats['backlogPackets'] = int(items[3].replace('p',''))
|
||||
thisFlowStats['requeues'] = int(items[5])
|
||||
elif ('maxpacket' in line) and withinCorrectChunk:
|
||||
items = line.split(' ')
|
||||
thisFlowStats['maxPacket'] = int(items[3])
|
||||
thisFlowStats['dropOverlimit'] = int(items[5])
|
||||
thisFlowStats['newFlowCount'] = int(items[7])
|
||||
thisFlowStats['ecnMark'] = int(items[9])
|
||||
elif ("new_flows_len" in line) and withinCorrectChunk:
|
||||
items = line.split(' ')
|
||||
thisFlowStats['newFlowsLen'] = int(items[3])
|
||||
thisFlowStats['oldFlowsLen'] = int(items[5])
|
||||
if thisFlowStats['PacketsSent'] == 0:
|
||||
thisFlowStats['percentageDropped'] = 0
|
||||
else:
|
||||
thisFlowStats['percentageDropped'] = thisFlowStats['droppedPackets']/thisFlowStats['PacketsSent']
|
||||
withinCorrectChunk = False
|
||||
thisFlow['stats'] = thisFlowStats
|
||||
allQDiscStats.append(thisFlow)
|
||||
thisFlowStats = {}
|
||||
thisFlow = {}
|
||||
#Load shapableDevices
|
||||
updatedFlowStats = []
|
||||
with open('devices.json', 'r') as infile:
|
||||
devices = json.load(infile)
|
||||
for shapableDevice in devices:
|
||||
shapableDeviceqdiscSrc = shapableDevice['qdiscSrc']
|
||||
shapableDeviceqdiscDst = shapableDevice['qdiscDst']
|
||||
for device in allQDiscStats:
|
||||
deviceFlowID = device['qDiscID']
|
||||
if shapableDeviceqdiscSrc == deviceFlowID:
|
||||
name = shapableDevice['hostname']
|
||||
AP = shapableDevice['AP']
|
||||
ipv4 = shapableDevice['ipv4']
|
||||
ipv6 = shapableDevice['ipv6']
|
||||
srcOrDst = 'src'
|
||||
tempDict = {'name': name, 'AP': AP, 'ipv4': ipv4, 'ipv6': ipv6, 'srcOrDst': srcOrDst}
|
||||
device['identification'] = tempDict
|
||||
updatedFlowStats.append(device)
|
||||
if shapableDeviceqdiscDst == deviceFlowID:
|
||||
name = shapableDevice['hostname']
|
||||
AP = shapableDevice['AP']
|
||||
ipv4 = shapableDevice['ipv4']
|
||||
ipv6 = shapableDevice['ipv6']
|
||||
srcOrDst = 'dst'
|
||||
tempDict = {'name': name, 'AP': AP, 'ipv4': ipv4, 'ipv6': ipv6, 'srcOrDst': srcOrDst}
|
||||
device['identification'] = tempDict
|
||||
updatedFlowStats.append(device)
|
||||
mergedStats = []
|
||||
for item in updatedFlowStats:
|
||||
if item['identification']['srcOrDst'] == 'src':
|
||||
newStat = {
|
||||
'identification': {
|
||||
'name': item['identification']['name'],
|
||||
'AP': item['identification']['AP'],
|
||||
'ipv4': item['identification']['ipv4'],
|
||||
'ipv6': item['identification']['ipv6']
|
||||
},
|
||||
'src': {
|
||||
'GigabytesSent': item['stats']['GigabytesSent'],
|
||||
'PacketsSent': item['stats']['PacketsSent'],
|
||||
'droppedPackets': item['stats']['droppedPackets'],
|
||||
'overlimitsPackets': item['stats']['overlimitsPackets'],
|
||||
'requeuedPackets': item['stats']['requeuedPackets'],
|
||||
'backlogBytes': item['stats']['backlogBytes'],
|
||||
'backlogPackets': item['stats']['backlogPackets'],
|
||||
'requeues': item['stats']['requeues'],
|
||||
'maxPacket': item['stats']['maxPacket'],
|
||||
'dropOverlimit': item['stats']['dropOverlimit'],
|
||||
'newFlowCount': item['stats']['newFlowCount'],
|
||||
'ecnMark': item['stats']['ecnMark'],
|
||||
'newFlowsLen': item['stats']['newFlowsLen'],
|
||||
'oldFlowsLen': item['stats']['oldFlowsLen'],
|
||||
'percentageDropped': item['stats']['percentageDropped'],
|
||||
}
|
||||
}
|
||||
mergedStats.append(newStat)
|
||||
for item in updatedFlowStats:
|
||||
if item['identification']['srcOrDst'] == 'dst':
|
||||
ipv4 = item['identification']['ipv4']
|
||||
ipv6 = item['identification']['ipv6']
|
||||
newStat = {
|
||||
'dst': {
|
||||
'GigabytesSent': item['stats']['GigabytesSent'],
|
||||
'PacketsSent': item['stats']['PacketsSent'],
|
||||
'droppedPackets': item['stats']['droppedPackets'],
|
||||
'overlimitsPackets': item['stats']['overlimitsPackets'],
|
||||
'requeuedPackets': item['stats']['requeuedPackets'] ,
|
||||
'backlogBytes': item['stats']['backlogBytes'],
|
||||
'backlogPackets': item['stats']['backlogPackets'],
|
||||
'requeues': item['stats']['requeues'],
|
||||
'maxPacket': item['stats']['maxPacket'],
|
||||
'dropOverlimit': item['stats']['dropOverlimit'],
|
||||
'newFlowCount': item['stats']['newFlowCount'],
|
||||
'ecnMark': item['stats']['ecnMark'],
|
||||
'newFlowsLen': item['stats']['newFlowsLen'],
|
||||
'oldFlowsLen': item['stats']['oldFlowsLen'],
|
||||
'percentageDropped': item['stats']['percentageDropped']
|
||||
}
|
||||
}
|
||||
for item2 in mergedStats:
|
||||
if ipv4 in item2['identification']['ipv4']:
|
||||
item2 = item2.update(newStat)
|
||||
elif ipv6 in item2['identification']['ipv6']:
|
||||
item2 = item2.update(newStat)
|
||||
return mergedStats
|
||||
|
||||
if __name__ == '__main__':
|
||||
mergedStats = getStatistics()
|
||||
|
||||
# Display table of Customer CPEs with most packets dropped
|
||||
x = PrettyTable()
|
||||
x.field_names = ["Device", "AP", "IPv4", "IPv6", "UL Dropped", "DL Dropped", "GB Down/Up"]
|
||||
sortableList = []
|
||||
pickTop = 30
|
||||
for stat in mergedStats:
|
||||
name = stat['identification']['name']
|
||||
AP = stat['identification']['AP']
|
||||
ipv4 = stat['identification']['ipv4']
|
||||
ipv6 = stat['identification']['ipv6']
|
||||
srcDropped = stat['src']['percentageDropped']
|
||||
dstDropped = stat['dst']['percentageDropped']
|
||||
GBuploadedString = stat['src']['GigabytesSent']
|
||||
GBdownloadedString = stat['dst']['GigabytesSent']
|
||||
GBstring = GBuploadedString + '/' + GBdownloadedString
|
||||
avgDropped = (srcDropped + dstDropped)/2
|
||||
sortableList.append((name, AP, ipv4, ipv6, srcDropped, dstDropped, avgDropped, GBstring))
|
||||
res = sorted(sortableList, key = itemgetter(4), reverse = True)[:pickTop]
|
||||
for stat in res:
|
||||
name, AP, ipv4, ipv6, srcDropped, dstDropped, avgDropped, GBstring = stat
|
||||
if not name:
|
||||
name = ipv4
|
||||
srcDroppedString = "{0:.4%}".format(srcDropped)
|
||||
dstDroppedString = "{0:.4%}".format(dstDropped)
|
||||
x.add_row([name, AP, ipv4, ipv6, srcDroppedString, dstDroppedString, GBstring])
|
||||
print(x)
|
Loading…
Reference in New Issue
Block a user