Add build script and associated path changes

* Adds build_rust.sh:
   * Creates `src/bin` and `src/bin/static` if needed.
   * Compiles all of the executables in the `rust` tree.
   * Copies the executables into `src/bin`
   * Copies the static web data for `lqos_node_manager`
     into `bin/static`.
* Updates .gitignore to ignore the bin/ folder
* Updates LibreQoS.py to call bin/xdp_iphash_to_cpu_cmdline
  instead of ./xdp_iphash_to_cpu_cmdline (cleaner).
* Updates graphInfluxDB.py to call the temporary Rust shim
  that provides output like the previous one, hopefully
  (poorly tested) allowing the existing graph system to work
  with the new structure.

See #181

Signed-off-by: Herbert Wolverson <herberticus@gmail.com>
This commit is contained in:
Herbert Wolverson 2023-01-04 15:35:21 +00:00
parent 10a674abde
commit deded47cac
5 changed files with 73 additions and 11 deletions

1
.gitignore vendored
View File

@ -48,6 +48,7 @@ src/tinsStats.json
# Ignore Rust build artifacts # Ignore Rust build artifacts
src/rust/target src/rust/target
src/bin
# Ignore project folders for some IDEs # Ignore project folders for some IDEs
.idea/ .idea/

View File

@ -82,7 +82,7 @@ def tearDown(interfaceA, interfaceB):
# Full teardown of everything for exiting LibreQoS # Full teardown of everything for exiting LibreQoS
if enableActualShellCommands: if enableActualShellCommands:
# Clear IP filters and remove xdp program from interfaces # Clear IP filters and remove xdp program from interfaces
result = os.system('./xdp_iphash_to_cpu_cmdline clear') result = os.system('./bin/xdp_iphash_to_cpu_cmdline clear')
# The daemon is controling this now, let's not confuse things # The daemon is controling this now, let's not confuse things
#shell('ip link set dev ' + interfaceA + ' xdp off') #shell('ip link set dev ' + interfaceA + ' xdp off')
#shell('ip link set dev ' + interfaceB + ' xdp off') #shell('ip link set dev ' + interfaceB + ' xdp off')
@ -714,14 +714,14 @@ def refreshShapers():
for device in circuit['devices']: for device in circuit['devices']:
if device['ipv4s']: if device['ipv4s']:
for ipv4 in device['ipv4s']: for ipv4 in device['ipv4s']:
xdpCPUmapCommands.append('./xdp_iphash_to_cpu_cmdline add --ip ' + str(ipv4) + ' --cpu ' + data[node]['cpuNum'] + ' --classid ' + circuit['classid']) xdpCPUmapCommands.append('./bin/xdp_iphash_to_cpu_cmdline add --ip ' + str(ipv4) + ' --cpu ' + data[node]['cpuNum'] + ' --classid ' + circuit['classid'])
if OnAStick: if OnAStick:
xdpCPUmapCommands.append('./xdp_iphash_to_cpu_cmdline add --ip ' + str(ipv4) + ' --cpu ' + data[node]['up_cpuNum'] + ' --classid ' + circuit['up_classid'] + ' --upload 1') xdpCPUmapCommands.append('./bin/xdp_iphash_to_cpu_cmdline add --ip ' + str(ipv4) + ' --cpu ' + data[node]['up_cpuNum'] + ' --classid ' + circuit['up_classid'] + ' --upload 1')
if device['ipv6s']: if device['ipv6s']:
for ipv6 in device['ipv6s']: for ipv6 in device['ipv6s']:
xdpCPUmapCommands.append('./xdp_iphash_to_cpu_cmdline add --ip ' + str(ipv6) + ' --cpu ' + data[node]['cpuNum'] + ' --classid ' + circuit['classid']) xdpCPUmapCommands.append('./bin/xdp_iphash_to_cpu_cmdline add --ip ' + str(ipv6) + ' --cpu ' + data[node]['cpuNum'] + ' --classid ' + circuit['classid'])
if OnAStick: if OnAStick:
xdpCPUmapCommands.append('./xdp_iphash_to_cpu_cmdline add --ip ' + str(ipv6) + ' --cpu ' + data[node]['up_cpuNum'] + ' --classid ' + circuit['up_classid'] + ' --upload 1') xdpCPUmapCommands.append('./bin/xdp_iphash_to_cpu_cmdline add --ip ' + str(ipv6) + ' --cpu ' + data[node]['up_cpuNum'] + ' --classid ' + circuit['up_classid'] + ' --upload 1')
if device['deviceName'] not in devicesShaped: if device['deviceName'] not in devicesShaped:
devicesShaped.append(device['deviceName']) devicesShaped.append(device['deviceName'])
# Recursive call this function for children nodes attached to this node # Recursive call this function for children nodes attached to this node
@ -751,7 +751,7 @@ def refreshShapers():
xdpStartTime = datetime.now() xdpStartTime = datetime.now()
if enableActualShellCommands: if enableActualShellCommands:
# Here we use os.system for the command, because otherwise it sometimes gltiches out with Popen in shell() # Here we use os.system for the command, because otherwise it sometimes gltiches out with Popen in shell()
result = os.system('./xdp_iphash_to_cpu_cmdline clear') result = os.system('./bin/xdp_iphash_to_cpu_cmdline clear')
# Set up XDP-CPUMAP-TC # Set up XDP-CPUMAP-TC
logging.info("# XDP Setup") logging.info("# XDP Setup")
# Commented out - the daemon does this # Commented out - the daemon does this
@ -908,18 +908,18 @@ def refreshShapersUpdateOnly():
def removeDeviceIPsFromFilter(circuit): def removeDeviceIPsFromFilter(circuit):
for device in circuit['devices']: for device in circuit['devices']:
for ipv4 in device['ipv4s']: for ipv4 in device['ipv4s']:
shell('./xdp_iphash_to_cpu_cmdline del ip ' + str(ipv4)) shell('./bin/xdp_iphash_to_cpu_cmdline del ip ' + str(ipv4))
for ipv6 in device['ipv6s']: for ipv6 in device['ipv6s']:
shell('./xdp_iphash_to_cpu_cmdline del ip ' + str(ipv6)) shell('./bin/xdp_iphash_to_cpu_cmdline del ip ' + str(ipv6))
def addDeviceIPsToFilter(circuit, cpuNumHex): def addDeviceIPsToFilter(circuit, cpuNumHex):
# TODO: Possible issue, check that the lqosd system expects the CPU in hex # TODO: Possible issue, check that the lqosd system expects the CPU in hex
for device in circuit['devices']: for device in circuit['devices']:
for ipv4 in device['ipv4s']: for ipv4 in device['ipv4s']:
shell('./xdp_iphash_to_cpu_cmdline add --ip ' + str(ipv4) + ' --cpu ' + cpuNumHex + ' --classid ' + circuit['classid']) shell('./bin/xdp_iphash_to_cpu_cmdline add --ip ' + str(ipv4) + ' --cpu ' + cpuNumHex + ' --classid ' + circuit['classid'])
for ipv6 in device['ipv6s']: for ipv6 in device['ipv6s']:
shell('./xdp_iphash_to_cpu_cmdline add --ip ' + str(ipv6) + ' --cpu ' + cpuNumHex + ' --classid ' + circuit['classid']) shell('./bin/xdp_iphash_to_cpu_cmdline add --ip ' + str(ipv6) + ' --cpu ' + cpuNumHex + ' --classid ' + circuit['classid'])
def getAllParentNodes(data, allParentNodes): def getAllParentNodes(data, allParentNodes):

28
src/build_rust.sh Executable file
View File

@ -0,0 +1,28 @@
#!/bin/bash
# This script builds the Rust sub-system and places the results in the
# `src/bin` directory.
#
# You still need to setup services to run `lqosd` and `lqos_node_manager`
# automatically.
#
# Don't forget to setup `/etc/lqos`
PROGS="lqosd lqtop xdp_iphash_to_cpu_cmdline xdp_pping"
mkdir -p bin/static
pushd rust
#cargo clean
for prog in $PROGS
do
pushd $prog
cargo build --release
popd
done
for prog in $PROGS
do
cp target/release/$prog ../bin
done
popd
cp -R rust/lqos_node_manager/static/* bin/static
echo "Don't forget to setup /etc/lqos!"

View File

@ -306,7 +306,7 @@ def getParentNodeLatencyStats(parentNodes, subscriberCircuits):
def getCircuitLatencyStats(subscriberCircuits): def getCircuitLatencyStats(subscriberCircuits):
command = './cpumap-pping/src/xdp_pping' command = './src/bin/xdp_pping'
listOfEntries = json.loads(subprocess.run(command.split(' '), stdout=subprocess.PIPE).stdout.decode('utf-8')) listOfEntries = json.loads(subprocess.run(command.split(' '), stdout=subprocess.PIPE).stdout.decode('utf-8'))
tcpLatencyForClassID = {} tcpLatencyForClassID = {}

33
src/lqos.example Normal file
View File

@ -0,0 +1,33 @@
# This file *must* be installed in `/etc/lqos`.
# Change the values to match your setup.
# Where is LibreQoS installed?
lqos_directory = '/opt/libreqos/src'
[tuning]
stop_irq_balance = true
netdev_budget_usecs = 20
netdev_budget_packets = 1
rx_usecs = 0
tx_usecs = 0
disable_rxvlan = true
disable_txvlan = true
disable_offload = [ "gso", "tso", "lro", "sg", "gro" ]
# If you are running a traditional two-interface setup, use:
# interface_mapping = [
# { name = "enp1s0f1", redirect_to = "enp1s0f2", scan_vlans = false },
# { name = "enp1s0f2", redirect_to = "enp1s0f1", scan_vlans = false }
# ]
# vlan_mapping = []
# For "on a stick":
[bridge]
use_kernel_bridge = true
interface_mapping = [
{ name = "enp1s0f1", redirect_to = "enp1s0f1", scan_vlans = true }
]
vlan_mapping = [
{ parent = "enp1s0f1", tag = 3, redirect_to = 4 },
{ parent = "enp1s0f1", tag = 4, redirect_to = 3 }
]