mirror of
https://github.com/LibreQoE/LibreQoS.git
synced 2025-02-25 18:55:32 -06:00
Update LibreQoS.py
This commit is contained in:
parent
48e0c2e0ea
commit
0370f7ced8
245
v1.2/LibreQoS.py
245
v1.2/LibreQoS.py
@ -18,7 +18,7 @@ import shutil
|
|||||||
|
|
||||||
from ispConfig import fqOrCAKE, upstreamBandwidthCapacityDownloadMbps, upstreamBandwidthCapacityUploadMbps, \
|
from ispConfig import fqOrCAKE, upstreamBandwidthCapacityDownloadMbps, upstreamBandwidthCapacityUploadMbps, \
|
||||||
defaultClassCapacityDownloadMbps, defaultClassCapacityUploadMbps, interfaceA, interfaceB, enableActualShellCommands, \
|
defaultClassCapacityDownloadMbps, defaultClassCapacityUploadMbps, interfaceA, interfaceB, enableActualShellCommands, \
|
||||||
runShellCommandsAsSudo, generatedPNDownloadMbps, generatedPNUploadMbps
|
runShellCommandsAsSudo, generatedPNDownloadMbps, generatedPNUploadMbps, usingXDP
|
||||||
|
|
||||||
def shell(command):
|
def shell(command):
|
||||||
if enableActualShellCommands:
|
if enableActualShellCommands:
|
||||||
@ -38,24 +38,27 @@ def checkIfFirstRunSinceBoot():
|
|||||||
lastRun = datetime.strptime(file.read(), "%d-%b-%Y (%H:%M:%S.%f)")
|
lastRun = datetime.strptime(file.read(), "%d-%b-%Y (%H:%M:%S.%f)")
|
||||||
systemRunningSince = datetime.fromtimestamp(psutil.boot_time())
|
systemRunningSince = datetime.fromtimestamp(psutil.boot_time())
|
||||||
if systemRunningSince > lastRun:
|
if systemRunningSince > lastRun:
|
||||||
print("First time run since system boot. Will load xdp-cpumap-tc from scratch.")
|
print("First time run since system boot.")
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
print("Not first time run since system boot. Will clear xdp filter rules and reload.")
|
print("Not first time run since system boot.")
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
print("First time run since system boot. Will attach xdp.")
|
print("First time run since system boot.")
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def clearPriorSettings(interfaceA, interfaceB):
|
def clearPriorSettings(interfaceA, interfaceB):
|
||||||
if enableActualShellCommands:
|
if enableActualShellCommands:
|
||||||
#shell('tc filter delete dev ' + interfaceA)
|
# If not using XDP, clear tc filter
|
||||||
#shell('tc filter delete dev ' + interfaceA + ' root')
|
if usingXDP == False:
|
||||||
|
#two of these are probably redundant. Will remove later once determined which those are.
|
||||||
|
shell('tc filter delete dev ' + interfaceA)
|
||||||
|
shell('tc filter delete dev ' + interfaceA + ' root')
|
||||||
|
shell('tc filter delete dev ' + interfaceB)
|
||||||
|
shell('tc filter delete dev ' + interfaceB + ' root')
|
||||||
shell('tc qdisc delete dev ' + interfaceA + ' root')
|
shell('tc qdisc delete dev ' + interfaceA + ' root')
|
||||||
#shell('tc qdisc delete dev ' + interfaceA)
|
|
||||||
#shell('tc filter delete dev ' + interfaceB)
|
|
||||||
#shell('tc filter delete dev ' + interfaceB + ' root')
|
|
||||||
shell('tc qdisc delete dev ' + interfaceB + ' root')
|
shell('tc qdisc delete dev ' + interfaceB + ' root')
|
||||||
|
#shell('tc qdisc delete dev ' + interfaceA)
|
||||||
#shell('tc qdisc delete dev ' + interfaceB)
|
#shell('tc qdisc delete dev ' + interfaceB)
|
||||||
|
|
||||||
def findQueuesAvailable():
|
def findQueuesAvailable():
|
||||||
@ -198,6 +201,9 @@ def validateNetworkAndDevices():
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def refreshShapers():
|
def refreshShapers():
|
||||||
|
# Starting
|
||||||
|
print("refreshShapers starting at " + datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
|
||||||
|
|
||||||
# Warn user if enableActualShellCommands is False, because that would mean no actual commands are executing
|
# Warn user if enableActualShellCommands is False, because that would mean no actual commands are executing
|
||||||
if enableActualShellCommands == False:
|
if enableActualShellCommands == False:
|
||||||
warnings.warn("enableActualShellCommands is set to False. None of the commands below will actually be executed. Simulated run.", stacklevel=2)
|
warnings.warn("enableActualShellCommands is set to False. None of the commands below will actually be executed. Simulated run.", stacklevel=2)
|
||||||
@ -222,7 +228,7 @@ def refreshShapers():
|
|||||||
safeToRunRefresh = True
|
safeToRunRefresh = True
|
||||||
else:
|
else:
|
||||||
if (isThisFirstRunSinceBoot == False):
|
if (isThisFirstRunSinceBoot == False):
|
||||||
warnings.warn("Validation failed. Because this is not the first run since boot - will exit.")
|
warnings.warn("Validation failed. Because this is not the first run since boot (queues already set up) - will now exit.")
|
||||||
safeToRunRefresh = False
|
safeToRunRefresh = False
|
||||||
else:
|
else:
|
||||||
warnings.warn("Validation failed. However - because this is the first run since boot - will load queues from last good config")
|
warnings.warn("Validation failed. However - because this is the first run since boot - will load queues from last good config")
|
||||||
@ -367,8 +373,11 @@ def refreshShapers():
|
|||||||
with open(networkJSONfile, 'r') as j:
|
with open(networkJSONfile, 'r') as j:
|
||||||
network = json.loads(j.read())
|
network = json.loads(j.read())
|
||||||
|
|
||||||
# Pull rx/tx queues / CPU cores avaialble
|
# Pull rx/tx queues / CPU cores available
|
||||||
queuesAvailable = findQueuesAvailable()
|
if usingXDP:
|
||||||
|
queuesAvailable = findQueuesAvailable()
|
||||||
|
else:
|
||||||
|
queuesAvailable = 1
|
||||||
|
|
||||||
# Generate Parent Nodes. Spread ShapedDevices.csv which lack defined ParentNode across these (balance across CPUs)
|
# Generate Parent Nodes. Spread ShapedDevices.csv which lack defined ParentNode across these (balance across CPUs)
|
||||||
generatedPNs = []
|
generatedPNs = []
|
||||||
@ -407,6 +416,12 @@ def refreshShapers():
|
|||||||
|
|
||||||
minDownload, minUpload = findBandwidthMins(network, 0)
|
minDownload, minUpload = findBandwidthMins(network, 0)
|
||||||
|
|
||||||
|
# Define lists for hash filters
|
||||||
|
ipv4FiltersSrc = []
|
||||||
|
ipv4FiltersDst = []
|
||||||
|
ipv6FiltersSrc = []
|
||||||
|
ipv6FiltersDst = []
|
||||||
|
|
||||||
# Parse network structure. For each tier, create corresponding HTB and leaf classes. Prepare for execution later
|
# Parse network structure. For each tier, create corresponding HTB and leaf classes. Prepare for execution later
|
||||||
linuxTCcommands = []
|
linuxTCcommands = []
|
||||||
xdpCPUmapCommands = []
|
xdpCPUmapCommands = []
|
||||||
@ -451,7 +466,16 @@ def refreshShapers():
|
|||||||
for device in circuit['devices']:
|
for device in circuit['devices']:
|
||||||
if device['ipv4s']:
|
if device['ipv4s']:
|
||||||
for ipv4 in device['ipv4s']:
|
for ipv4 in device['ipv4s']:
|
||||||
xdpCPUmapCommands.append('./xdp-cpumap-tc/src/xdp_iphash_to_cpu_cmdline --add --ip ' + str(ipv4) + ' --cpu ' + hex(queue-1) + ' --classid ' + flowIDstring)
|
if usingXDP:
|
||||||
|
xdpCPUmapCommands.append('./xdp-cpumap-tc/src/xdp_iphash_to_cpu_cmdline --add --ip ' + str(ipv4) + ' --cpu ' + hex(queue-1) + ' --classid ' + flowIDstring)
|
||||||
|
else:
|
||||||
|
ipv4FiltersSrc.append((ipv4, parentString, flowIDstring))
|
||||||
|
ipv4FiltersDst.append((ipv4, parentString, flowIDstring))
|
||||||
|
if not usingXDP:
|
||||||
|
if device['ipv6s']:
|
||||||
|
for ipv6 in device['ipv6s']:
|
||||||
|
ipv6FiltersSrc.append((ipv6, parentString, flowIDstring))
|
||||||
|
ipv6FiltersDst.append((ipv6, parentString, flowIDstring))
|
||||||
if device['deviceName'] not in devicesShaped:
|
if device['deviceName'] not in devicesShaped:
|
||||||
devicesShaped.append(device['deviceName'])
|
devicesShaped.append(device['deviceName'])
|
||||||
minor += 1
|
minor += 1
|
||||||
@ -468,13 +492,78 @@ def refreshShapers():
|
|||||||
queue += 1
|
queue += 1
|
||||||
major += 1
|
major += 1
|
||||||
return minor
|
return minor
|
||||||
|
|
||||||
# Print structure of network.json in debug or verbose mode
|
# Print structure of network.json in debug or verbose mode
|
||||||
logging.info(json.dumps(network, indent=4))
|
logging.info(json.dumps(network, indent=4))
|
||||||
|
|
||||||
# Here is the actual call to the recursive traverseNetwork() function. finalMinor is not used.
|
# Here is the actual call to the recursive traverseNetwork() function. finalMinor is not used.
|
||||||
finalMinor = traverseNetwork(network, 0, major=1, minor=3, queue=1, parentClassID="1:1", parentMaxDL=upstreamBandwidthCapacityDownloadMbps, parentMaxUL=upstreamBandwidthCapacityUploadMbps)
|
finalMinor = traverseNetwork(network, 0, major=1, minor=3, queue=1, parentClassID="1:1", parentMaxDL=upstreamBandwidthCapacityDownloadMbps, parentMaxUL=upstreamBandwidthCapacityUploadMbps)
|
||||||
|
|
||||||
|
# If XDP off - prepare commands for Hash Tables
|
||||||
|
|
||||||
|
# IPv4 Hash Filters
|
||||||
|
# Dst
|
||||||
|
linuxTCcommands.append('filter add dev ' + interfaceA + ' parent 0x1: protocol all u32')
|
||||||
|
linuxTCcommands.append('filter add dev ' + interfaceA + ' parent 0x1: protocol ip handle 3: u32 divisor 256')
|
||||||
|
filterHandleCounter = 101
|
||||||
|
for i in range (256):
|
||||||
|
hexID = str(hex(i))#.replace('0x','')
|
||||||
|
for ipv4Filter in ipv4FiltersDst:
|
||||||
|
ipv4, parent, classid = ipv4Filter
|
||||||
|
if '/' in ipv4:
|
||||||
|
ipv4 = ipv4.split('/')[0]
|
||||||
|
if (ipv4.split('.', 3)[3]) == str(i):
|
||||||
|
filterHandle = hex(filterHandleCounter)
|
||||||
|
linuxTCcommands.append('filter add dev ' + interfaceA + ' handle ' + filterHandle + ' protocol ip parent 0x1: u32 ht 3:' + hexID + ': match ip dst ' + ipv4 + ' flowid ' + classid)
|
||||||
|
filterHandleCounter += 1
|
||||||
|
linuxTCcommands.append('filter add dev ' + interfaceA + ' protocol ip parent 0x1: u32 ht 800: match ip dst 0.0.0.0/0 hashkey mask 0x000000ff at 16 link 3:')
|
||||||
|
# Src
|
||||||
|
linuxTCcommands.append('filter add dev ' + interfaceB + ' parent 0x1: protocol all u32')
|
||||||
|
linuxTCcommands.append('filter add dev ' + interfaceB + ' parent 0x1: protocol ip handle 4: u32 divisor 256')
|
||||||
|
filterHandleCounter = 101
|
||||||
|
for i in range (256):
|
||||||
|
hexID = str(hex(i))#.replace('0x','')
|
||||||
|
for ipv4Filter in ipv4FiltersSrc:
|
||||||
|
ipv4, parent, classid = ipv4Filter
|
||||||
|
if '/' in ipv4:
|
||||||
|
ipv4 = ipv4.split('/')[0]
|
||||||
|
if (ipv4.split('.', 3)[3]) == str(i):
|
||||||
|
filterHandle = hex(filterHandleCounter)
|
||||||
|
linuxTCcommands.append('filter add dev ' + interfaceB + ' handle ' + filterHandle + ' protocol ip parent 0x1: u32 ht 4:' + hexID + ': match ip src ' + ipv4 + ' flowid ' + classid)
|
||||||
|
filterHandleCounter += 1
|
||||||
|
linuxTCcommands.append('filter add dev ' + interfaceB + ' protocol ip parent 0x1: u32 ht 800: match ip src 0.0.0.0/0 hashkey mask 0x000000ff at 12 link 4:')
|
||||||
|
# IPv6 Hash Filters
|
||||||
|
# Dst
|
||||||
|
linuxTCcommands.append('tc filter add dev ' + interfaceA + ' parent 0x1: handle 5: protocol ipv6 u32 divisor 256')
|
||||||
|
filterHandleCounter = 101
|
||||||
|
for ipv6Filter in ipv6FiltersDst:
|
||||||
|
ipv6, parent, classid = ipv6Filter
|
||||||
|
withoutCIDR = ipv6.split('/')[0]
|
||||||
|
third = str(ipaddress.IPv6Address(withoutCIDR).exploded).split(':',5)[3]
|
||||||
|
usefulPart = third[:2]
|
||||||
|
hexID = usefulPart
|
||||||
|
filterHandle = hex(filterHandleCounter)
|
||||||
|
linuxTCcommands.append('filter add dev ' + interfaceA + ' handle ' + filterHandle + ' protocol ipv6 parent 0x1: u32 ht 5:' + hexID + ': match ip6 dst ' + ipv6 + ' flowid ' + classid)
|
||||||
|
filterHandleCounter += 1
|
||||||
|
filterHandle = hex(filterHandleCounter)
|
||||||
|
linuxTCcommands.append('filter add dev ' + interfaceA + ' protocol ipv6 parent 0x1: u32 ht 800:: match ip6 dst ::/0 hashkey mask 0x0000ff00 at 28 link 5:')
|
||||||
|
filterHandleCounter += 1
|
||||||
|
# Src
|
||||||
|
linuxTCcommands.append('tc filter add dev ' + interfaceB + ' parent 0x1: handle 6: protocol ipv6 u32 divisor 256')
|
||||||
|
filterHandleCounter = 101
|
||||||
|
for ipv6Filter in ipv6FiltersSrc:
|
||||||
|
ipv6, parent, classid = ipv6Filter
|
||||||
|
withoutCIDR = ipv6.split('/')[0]
|
||||||
|
third = str(ipaddress.IPv6Address(withoutCIDR).exploded).split(':',5)[3]
|
||||||
|
usefulPart = third[:2]
|
||||||
|
hexID = usefulPart
|
||||||
|
filterHandle = hex(filterHandleCounter)
|
||||||
|
linuxTCcommands.append('filter add dev ' + interfaceB + ' handle ' + filterHandle + ' protocol ipv6 parent 0x1: u32 ht 6:' + hexID + ': match ip6 src ' + ipv6 + ' flowid ' + classid)
|
||||||
|
filterHandleCounter += 1
|
||||||
|
filterHandle = hex(filterHandleCounter)
|
||||||
|
linuxTCcommands.append('filter add dev ' + interfaceB + ' protocol ipv6 parent 0x1: u32 ht 800:: match ip6 src ::/0 hashkey mask 0x0000ff00 at 12 link 6:')
|
||||||
|
filterHandleCounter += 1
|
||||||
|
|
||||||
# Record start time of actual filter reload
|
# Record start time of actual filter reload
|
||||||
reloadStartTime = datetime.now()
|
reloadStartTime = datetime.now()
|
||||||
|
|
||||||
@ -483,47 +572,75 @@ def refreshShapers():
|
|||||||
|
|
||||||
# If this is the first time LibreQoS.py has run since system boot, load the XDP program and disable XPS
|
# If this is the first time LibreQoS.py has run since system boot, load the XDP program and disable XPS
|
||||||
# Otherwise, just clear the existing IP filter rules for xdp
|
# Otherwise, just clear the existing IP filter rules for xdp
|
||||||
if isThisFirstRunSinceBoot:
|
# If XDP is disabled, skips this entirely
|
||||||
# Set up XDP-CPUMAP-TC
|
if usingXDP:
|
||||||
shell('./xdp-cpumap-tc/bin/xps_setup.sh -d ' + interfaceA + ' --default --disable')
|
if isThisFirstRunSinceBoot:
|
||||||
shell('./xdp-cpumap-tc/bin/xps_setup.sh -d ' + interfaceB + ' --default --disable')
|
# Set up XDP-CPUMAP-TC
|
||||||
shell('./xdp-cpumap-tc/src/xdp_iphash_to_cpu --dev ' + interfaceA + ' --lan')
|
shell('./xdp-cpumap-tc/bin/xps_setup.sh -d ' + interfaceA + ' --default --disable')
|
||||||
shell('./xdp-cpumap-tc/src/xdp_iphash_to_cpu --dev ' + interfaceB + ' --wan')
|
shell('./xdp-cpumap-tc/bin/xps_setup.sh -d ' + interfaceB + ' --default --disable')
|
||||||
if enableActualShellCommands:
|
shell('./xdp-cpumap-tc/src/xdp_iphash_to_cpu --dev ' + interfaceA + ' --lan')
|
||||||
# Here we use os.system for the command, because otherwise it sometimes gltiches out with Popen in shell()
|
shell('./xdp-cpumap-tc/src/xdp_iphash_to_cpu --dev ' + interfaceB + ' --wan')
|
||||||
result = os.system('./xdp-cpumap-tc/src/xdp_iphash_to_cpu_cmdline --clear')
|
if enableActualShellCommands:
|
||||||
shell('./xdp-cpumap-tc/src/tc_classify --dev-egress ' + interfaceA)
|
# Here we use os.system for the command, because otherwise it sometimes gltiches out with Popen in shell()
|
||||||
shell('./xdp-cpumap-tc/src/tc_classify --dev-egress ' + interfaceB)
|
result = os.system('./xdp-cpumap-tc/src/xdp_iphash_to_cpu_cmdline --clear')
|
||||||
|
shell('./xdp-cpumap-tc/src/tc_classify --dev-egress ' + interfaceA)
|
||||||
|
shell('./xdp-cpumap-tc/src/tc_classify --dev-egress ' + interfaceB)
|
||||||
|
else:
|
||||||
|
if enableActualShellCommands:
|
||||||
|
result = os.system('./xdp-cpumap-tc/src/xdp_iphash_to_cpu_cmdline --clear')
|
||||||
|
|
||||||
|
if usingXDP:
|
||||||
|
# Create MQ qdisc for each CPU core / rx-tx queue (XDP method - requires IPv4)
|
||||||
|
thisInterface = interfaceA
|
||||||
|
shell('tc qdisc replace dev ' + thisInterface + ' root handle 7FFF: mq')
|
||||||
|
for queue in range(queuesAvailable):
|
||||||
|
shell('tc qdisc add dev ' + thisInterface + ' parent 7FFF:' + hex(queue+1) + ' handle ' + hex(queue+1) + ': htb default 2')
|
||||||
|
shell('tc class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ': classid ' + hex(queue+1) + ':1 htb rate '+ str(upstreamBandwidthCapacityDownloadMbps) + 'mbit ceil ' + str(upstreamBandwidthCapacityDownloadMbps) + 'mbit')
|
||||||
|
shell('tc qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 ' + fqOrCAKE)
|
||||||
|
# Default class - traffic gets passed through this limiter with lower priority if not otherwise classified by the Shaper.csv
|
||||||
|
# Only 1/4 of defaultClassCapacity is guarenteed (to prevent hitting ceiling of upstream), for the most part it serves as an "up to" ceiling.
|
||||||
|
# Default class can use up to defaultClassCapacityDownloadMbps when that bandwidth isn't used by known hosts.
|
||||||
|
shell('tc class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 classid ' + hex(queue+1) + ':2 htb rate ' + str(defaultClassCapacityDownloadMbps/4) + 'mbit ceil ' + str(defaultClassCapacityDownloadMbps) + 'mbit prio 5')
|
||||||
|
shell('tc qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':2 ' + fqOrCAKE)
|
||||||
|
|
||||||
|
thisInterface = interfaceB
|
||||||
|
shell('tc qdisc replace dev ' + thisInterface + ' root handle 7FFF: mq')
|
||||||
|
for queue in range(queuesAvailable):
|
||||||
|
shell('tc qdisc add dev ' + thisInterface + ' parent 7FFF:' + hex(queue+1) + ' handle ' + hex(queue+1) + ': htb default 2')
|
||||||
|
shell('tc class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ': classid ' + hex(queue+1) + ':1 htb rate '+ str(upstreamBandwidthCapacityUploadMbps) + 'mbit ceil ' + str(upstreamBandwidthCapacityUploadMbps) + 'mbit')
|
||||||
|
shell('tc qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 ' + fqOrCAKE)
|
||||||
|
# Default class - traffic gets passed through this limiter with lower priority if not otherwise classified by the Shaper.csv.
|
||||||
|
# Only 1/4 of defaultClassCapacity is guarenteed (to prevent hitting ceiling of upstream), for the most part it serves as an "up to" ceiling.
|
||||||
|
# Default class can use up to defaultClassCapacityUploadMbps when that bandwidth isn't used by known hosts.
|
||||||
|
shell('tc class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 classid ' + hex(queue+1) + ':2 htb rate ' + str(defaultClassCapacityUploadMbps/4) + 'mbit ceil ' + str(defaultClassCapacityUploadMbps) + 'mbit prio 5')
|
||||||
|
shell('tc qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':2 ' + fqOrCAKE)
|
||||||
else:
|
else:
|
||||||
if enableActualShellCommands:
|
# Create single HTB qdisc (non XDP method - allows IPv6)
|
||||||
result = os.system('./xdp-cpumap-tc/src/xdp_iphash_to_cpu_cmdline --clear')
|
thisInterface = interfaceA
|
||||||
|
shell('tc qdisc replace dev ' + thisInterface + ' root handle 0x1: htb default 2 r2q 1514')
|
||||||
# Create MQ qdisc for each interface
|
for queue in range(queuesAvailable):
|
||||||
thisInterface = interfaceA
|
shell('tc qdisc add dev ' + thisInterface + ' parent 0x1:' + hex(queue+1) + ' handle ' + hex(queue+1) + ': htb default 2')
|
||||||
shell('tc qdisc replace dev ' + thisInterface + ' root handle 7FFF: mq')
|
shell('tc class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ': classid ' + hex(queue+1) + ':1 htb rate '+ str(upstreamBandwidthCapacityDownloadMbps) + 'mbit ceil ' + str(upstreamBandwidthCapacityDownloadMbps) + 'mbit')
|
||||||
for queue in range(queuesAvailable):
|
shell('tc qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 ' + fqOrCAKE)
|
||||||
shell('tc qdisc add dev ' + thisInterface + ' parent 7FFF:' + hex(queue+1) + ' handle ' + hex(queue+1) + ': htb default 2')
|
# Default class - traffic gets passed through this limiter with lower priority if not otherwise classified by the Shaper.csv
|
||||||
shell('tc class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ': classid ' + hex(queue+1) + ':1 htb rate '+ str(upstreamBandwidthCapacityDownloadMbps) + 'mbit ceil ' + str(upstreamBandwidthCapacityDownloadMbps) + 'mbit')
|
# Only 1/4 of defaultClassCapacity is guarenteed (to prevent hitting ceiling of upstream), for the most part it serves as an "up to" ceiling.
|
||||||
shell('tc qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 ' + fqOrCAKE)
|
# Default class can use up to defaultClassCapacityDownloadMbps when that bandwidth isn't used by known hosts.
|
||||||
# Default class - traffic gets passed through this limiter with lower priority if not otherwise classified by the Shaper.csv
|
shell('tc class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 classid ' + hex(queue+1) + ':2 htb rate ' + str(defaultClassCapacityDownloadMbps/4) + 'mbit ceil ' + str(defaultClassCapacityDownloadMbps) + 'mbit prio 5')
|
||||||
# Only 1/4 of defaultClassCapacity is guarenteed (to prevent hitting ceiling of upstream), for the most part it serves as an "up to" ceiling.
|
shell('tc qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':2 ' + fqOrCAKE)
|
||||||
# Default class can use up to defaultClassCapacityDownloadMbps when that bandwidth isn't used by known hosts.
|
|
||||||
shell('tc class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 classid ' + hex(queue+1) + ':2 htb rate ' + str(defaultClassCapacityDownloadMbps/4) + 'mbit ceil ' + str(defaultClassCapacityDownloadMbps) + 'mbit prio 5')
|
thisInterface = interfaceB
|
||||||
shell('tc qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':2 ' + fqOrCAKE)
|
shell('tc qdisc replace dev ' + thisInterface + ' root handle 0x1: htb default 2 r2q 1514')
|
||||||
|
for queue in range(queuesAvailable):
|
||||||
|
shell('tc qdisc add dev ' + thisInterface + ' parent 0x1:' + hex(queue+1) + ' handle ' + hex(queue+1) + ': htb default 2')
|
||||||
|
shell('tc class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ': classid ' + hex(queue+1) + ':1 htb rate '+ str(upstreamBandwidthCapacityUploadMbps) + 'mbit ceil ' + str(upstreamBandwidthCapacityUploadMbps) + 'mbit')
|
||||||
|
shell('tc qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 ' + fqOrCAKE)
|
||||||
|
# Default class - traffic gets passed through this limiter with lower priority if not otherwise classified by the Shaper.csv.
|
||||||
|
# Only 1/4 of defaultClassCapacity is guarenteed (to prevent hitting ceiling of upstream), for the most part it serves as an "up to" ceiling.
|
||||||
|
# Default class can use up to defaultClassCapacityUploadMbps when that bandwidth isn't used by known hosts.
|
||||||
|
shell('tc class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 classid ' + hex(queue+1) + ':2 htb rate ' + str(defaultClassCapacityUploadMbps/4) + 'mbit ceil ' + str(defaultClassCapacityUploadMbps) + 'mbit prio 5')
|
||||||
|
shell('tc qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':2 ' + fqOrCAKE)
|
||||||
|
|
||||||
thisInterface = interfaceB
|
# Execute actual Linux TC commands
|
||||||
shell('tc qdisc replace dev ' + thisInterface + ' root handle 7FFF: mq')
|
|
||||||
for queue in range(queuesAvailable):
|
|
||||||
shell('tc qdisc add dev ' + thisInterface + ' parent 7FFF:' + hex(queue+1) + ' handle ' + hex(queue+1) + ': htb default 2')
|
|
||||||
shell('tc class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ': classid ' + hex(queue+1) + ':1 htb rate '+ str(upstreamBandwidthCapacityUploadMbps) + 'mbit ceil ' + str(upstreamBandwidthCapacityUploadMbps) + 'mbit')
|
|
||||||
shell('tc qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 ' + fqOrCAKE)
|
|
||||||
# Default class - traffic gets passed through this limiter with lower priority if not otherwise classified by the Shaper.csv.
|
|
||||||
# Only 1/4 of defaultClassCapacity is guarenteed (to prevent hitting ceiling of upstream), for the most part it serves as an "up to" ceiling.
|
|
||||||
# Default class can use up to defaultClassCapacityUploadMbps when that bandwidth isn't used by known hosts.
|
|
||||||
shell('tc class add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':1 classid ' + hex(queue+1) + ':2 htb rate ' + str(defaultClassCapacityUploadMbps/4) + 'mbit ceil ' + str(defaultClassCapacityUploadMbps) + 'mbit prio 5')
|
|
||||||
shell('tc qdisc add dev ' + thisInterface + ' parent ' + hex(queue+1) + ':2 ' + fqOrCAKE)
|
|
||||||
|
|
||||||
# Execute actual Linux TC and XDP-CPUMAP-TC filter commands
|
|
||||||
print("Executing linux TC class/qdisc commands")
|
print("Executing linux TC class/qdisc commands")
|
||||||
with open('linux_tc.txt', 'w') as f:
|
with open('linux_tc.txt', 'w') as f:
|
||||||
for line in linuxTCcommands:
|
for line in linuxTCcommands:
|
||||||
@ -531,11 +648,16 @@ def refreshShapers():
|
|||||||
logging.info(line)
|
logging.info(line)
|
||||||
shell("/sbin/tc -f -b linux_tc.txt")
|
shell("/sbin/tc -f -b linux_tc.txt")
|
||||||
print("Executed " + str(len(linuxTCcommands)) + " linux TC class/qdisc commands")
|
print("Executed " + str(len(linuxTCcommands)) + " linux TC class/qdisc commands")
|
||||||
print("Executing XDP-CPUMAP-TC IP filter commands")
|
|
||||||
for command in xdpCPUmapCommands:
|
# Execute actual XDP-CPUMAP-TC filter commands
|
||||||
logging.info(command)
|
if usingXDP:
|
||||||
shell(command)
|
print("Executing XDP-CPUMAP-TC IP filter commands")
|
||||||
print("Executed " + str(len(xdpCPUmapCommands)) + " XDP-CPUMAP-TC IP filter commands")
|
for command in xdpCPUmapCommands:
|
||||||
|
logging.info(command)
|
||||||
|
shell(command)
|
||||||
|
print("Executed " + str(len(xdpCPUmapCommands)) + " XDP-CPUMAP-TC IP filter commands")
|
||||||
|
|
||||||
|
# Record end time
|
||||||
reloadEndTime = datetime.now()
|
reloadEndTime = datetime.now()
|
||||||
|
|
||||||
# Recap - warn operator if devices were skipped
|
# Recap - warn operator if devices were skipped
|
||||||
@ -565,6 +687,9 @@ def refreshShapers():
|
|||||||
# Report reload time
|
# Report reload time
|
||||||
reloadTimeSeconds = (reloadEndTime - reloadStartTime).seconds
|
reloadTimeSeconds = (reloadEndTime - reloadStartTime).seconds
|
||||||
print("Queue and IP filter reload completed in " + str(reloadTimeSeconds) + " seconds")
|
print("Queue and IP filter reload completed in " + str(reloadTimeSeconds) + " seconds")
|
||||||
|
|
||||||
|
# Done
|
||||||
|
print("refreshShapers completed on " + datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
@ -590,9 +715,5 @@ if __name__ == '__main__':
|
|||||||
if args.validate:
|
if args.validate:
|
||||||
status = validateNetworkAndDevices()
|
status = validateNetworkAndDevices()
|
||||||
else:
|
else:
|
||||||
# Starting
|
|
||||||
print("refreshShapers starting at " + datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
|
|
||||||
# Refresh and/or set up queues
|
# Refresh and/or set up queues
|
||||||
refreshShapers()
|
refreshShapers()
|
||||||
# Done
|
|
||||||
print("refreshShapers completed on " + datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
|
|
||||||
|
Loading…
Reference in New Issue
Block a user