Merge pull request #547 from LibreQoE/issue_518_reloading_2

Issue 518 reloading 2
This commit is contained in:
Herbert "TheBracket" Wolverson 2024-08-22 13:07:06 -05:00 committed by GitHub
commit 001694c77f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 215 additions and 197 deletions

View File

@ -1,229 +1,247 @@
#![allow(dead_code)] #![allow(dead_code)]
use anyhow::{Error, Result};
use libbpf_sys::{
bpf_map_delete_elem, bpf_map_get_next_key, bpf_map_lookup_elem,
bpf_map_update_elem, bpf_obj_get, BPF_NOEXIST,
};
use std::{ use std::{
ffi::{c_void, CString}, ffi::{c_void, CString},
marker::PhantomData, marker::PhantomData,
ptr::null_mut, ptr::null_mut,
}; };
use anyhow::{Error, Result};
use libbpf_sys::{bpf_map_delete_elem, bpf_map_get_next_key, bpf_map_lookup_elem, bpf_map_update_elem, BPF_NOEXIST, bpf_obj_get};
use crate::lqos_kernel::bpf::bpf_map_delete_batch;
/// Represents an underlying BPF map, accessed via the filesystem. /// Represents an underlying BPF map, accessed via the filesystem.
/// `BpfMap` *only* talks to shared (not PER-CPU) variants of maps. /// `BpfMap` *only* talks to shared (not PER-CPU) variants of maps.
/// ///
/// `K` is the *key* type, indexing the map. /// `K` is the *key* type, indexing the map.
/// `V` is the *value* type, and must exactly match the underlying C data type. /// `V` is the *value* type, and must exactly match the underlying C data type.
pub struct BpfMap<K, V> { pub struct BpfMap<K, V> {
fd: i32, fd: i32,
_key_phantom: PhantomData<K>, _key_phantom: PhantomData<K>,
_val_phantom: PhantomData<V>, _val_phantom: PhantomData<V>,
} }
impl<K, V> BpfMap<K, V> impl<K, V> BpfMap<K, V>
where where
K: Default + Clone, K: Default + Clone,
V: Default + Clone, V: Default + Clone,
{ {
/// Connect to a BPF map via a filename. Connects the internal /// Connect to a BPF map via a filename. Connects the internal
/// file descriptor, which is held until the structure is /// file descriptor, which is held until the structure is
/// dropped. /// dropped.
pub fn from_path(filename: &str) -> Result<Self> { pub fn from_path(filename: &str) -> Result<Self> {
let filename_c = CString::new(filename)?; let filename_c = CString::new(filename)?;
let fd = unsafe { bpf_obj_get(filename_c.as_ptr()) }; let fd = unsafe { bpf_obj_get(filename_c.as_ptr()) };
if fd < 0 { if fd < 0 {
Err(Error::msg("Unable to open BPF map")) Err(Error::msg("Unable to open BPF map"))
} else { } else {
Ok(Self { fd, _key_phantom: PhantomData, _val_phantom: PhantomData }) Ok(Self { fd, _key_phantom: PhantomData, _val_phantom: PhantomData })
} }
}
/// Iterates the undlering BPF map, and adds the results
/// to a vector. Each entry contains a `key, value` tuple.
///
/// This has performance issues due to excessive cloning
pub fn dump_vec(&self) -> Vec<(K, V)> {
let mut result = Vec::new();
let mut prev_key: *mut K = null_mut();
let mut key: K = K::default();
let key_ptr: *mut K = &mut key;
let mut value = V::default();
let value_ptr: *mut V = &mut value;
unsafe {
while bpf_map_get_next_key(
self.fd,
prev_key as *mut c_void,
key_ptr as *mut c_void,
) == 0
{
bpf_map_lookup_elem(
self.fd,
key_ptr as *mut c_void,
value_ptr as *mut c_void,
);
result.push((key.clone(), value.clone()));
prev_key = key_ptr;
}
} }
result /// Iterates the underlying BPF map, and adds the results
} /// to a vector. Each entry contains a `key, value` tuple.
///
/// This has performance issues due to excessive cloning
pub fn dump_vec(&self) -> Vec<(K, V)> {
let mut result = Vec::new();
/// Inserts an entry into a BPF map. let mut prev_key: *mut K = null_mut();
/// Use this sparingly, because it briefly pauses XDP access to the let mut key: K = K::default();
/// underlying map (through internal locking we can't reach from let key_ptr: *mut K = &mut key;
/// userland). let mut value = V::default();
/// let value_ptr: *mut V = &mut value;
/// ## Arguments
/// unsafe {
/// * `key` - the key to insert. while bpf_map_get_next_key(
/// * `value` - the value to insert. self.fd,
/// prev_key as *mut c_void,
/// Returns Ok if insertion succeeded, a generic error (no details yet) key_ptr as *mut c_void,
/// if it fails. ) == 0
pub fn insert(&mut self, key: &mut K, value: &mut V) -> Result<()> { {
let key_ptr: *mut K = key; bpf_map_lookup_elem(
let val_ptr: *mut V = value; self.fd,
let err = unsafe { key_ptr as *mut c_void,
bpf_map_update_elem( value_ptr as *mut c_void,
self.fd, );
key_ptr as *mut c_void, result.push((key.clone(), value.clone()));
val_ptr as *mut c_void, prev_key = key_ptr;
BPF_NOEXIST.into(), }
) }
};
if err != 0 { result
Err(Error::msg(format!("Unable to insert into map ({err})")))
} else {
Ok(())
} }
}
/// Inserts an entry into a BPF map. /// Inserts an entry into a BPF map.
/// Use this sparingly, because it briefly pauses XDP access to the /// Use this sparingly, because it briefly pauses XDP access to the
/// underlying map (through internal locking we can't reach from /// underlying map (through internal locking we can't reach from
/// userland). /// userland).
/// ///
/// ## Arguments /// ## Arguments
/// ///
/// * `key` - the key to insert. /// * `key` - the key to insert.
/// * `value` - the value to insert. /// * `value` - the value to insert.
/// ///
/// Returns Ok if insertion succeeded, a generic error (no details yet) /// Returns Ok if insertion succeeded, a generic error (no details yet)
/// if it fails. /// if it fails.
pub fn insert_or_update(&mut self, key: &mut K, value: &mut V) -> Result<()> { pub fn insert(&mut self, key: &mut K, value: &mut V) -> Result<()> {
let key_ptr: *mut K = key; let key_ptr: *mut K = key;
let val_ptr: *mut V = value; let val_ptr: *mut V = value;
let err = unsafe { let err = unsafe {
bpf_map_update_elem( bpf_map_update_elem(
self.fd, self.fd,
key_ptr as *mut c_void, key_ptr as *mut c_void,
val_ptr as *mut c_void, val_ptr as *mut c_void,
0, BPF_NOEXIST.into(),
) )
}; };
if err != 0 { if err != 0 {
Err(Error::msg(format!("Unable to insert into map ({err})"))) Err(Error::msg(format!("Unable to insert into map ({err})")))
} else { } else {
Ok(()) Ok(())
}
} }
}
/// Deletes an entry from the underlying eBPF map. /// Inserts an entry into a BPF map.
/// Use this sparingly, it locks the underlying map in the /// Use this sparingly, because it briefly pauses XDP access to the
/// kernel. This can cause *long* delays under heavy load. /// underlying map (through internal locking we can't reach from
/// /// userland).
/// ## Arguments ///
/// /// ## Arguments
/// * `key` - the key to delete. ///
/// /// * `key` - the key to insert.
/// Return `Ok` if deletion succeeded. /// * `value` - the value to insert.
pub fn delete(&mut self, key: &mut K) -> Result<()> { ///
let key_ptr: *mut K = key; /// Returns Ok if insertion succeeded, a generic error (no details yet)
let err = unsafe { bpf_map_delete_elem(self.fd, key_ptr as *mut c_void) }; /// if it fails.
if err != 0 { pub fn insert_or_update(&mut self, key: &mut K, value: &mut V) -> Result<()> {
if err == -2 { let key_ptr: *mut K = key;
// ENOEXIST : not actually an error, just nothing to do let val_ptr: *mut V = value;
let err = unsafe {
bpf_map_update_elem(
self.fd,
key_ptr as *mut c_void,
val_ptr as *mut c_void,
0,
)
};
if err != 0 {
Err(Error::msg(format!("Unable to insert into map ({err})")))
} else {
Ok(())
}
}
/// Deletes an entry from the underlying eBPF map.
/// Use this sparingly, it locks the underlying map in the
/// kernel. This can cause *long* delays under heavy load.
///
/// ## Arguments
///
/// * `key` - the key to delete.
///
/// Return `Ok` if deletion succeeded.
pub fn delete(&mut self, key: &mut K) -> Result<()> {
let key_ptr: *mut K = key;
let err = unsafe { bpf_map_delete_elem(self.fd, key_ptr as *mut c_void) };
if err != 0 {
if err == -2 {
// ENOEXIST : not actually an error, just nothing to do
Ok(())
} else {
Err(Error::msg("Unable to delete from map"))
}
} else {
Ok(())
}
}
/// Delete all entries in the underlying eBPF map.
/// Use this sparingly, it locks the underlying map. Under
/// heavy load, it WILL eventually terminate - but it might
/// take a very long time. Only use this for cleaning up
/// sparsely allocated map data.
pub fn clear(&mut self) -> Result<()> {
loop {
let mut key = K::default();
let mut prev_key: *mut K = null_mut();
unsafe {
let key_ptr: *mut K = &mut key;
while bpf_map_get_next_key(
self.fd,
prev_key as *mut c_void,
key_ptr as *mut c_void,
) == 0
{
bpf_map_delete_elem(self.fd, key_ptr as *mut c_void);
prev_key = key_ptr;
}
}
key = K::default();
prev_key = null_mut();
unsafe {
let key_ptr: *mut K = &mut key;
if bpf_map_get_next_key(
self.fd,
prev_key as *mut c_void,
key_ptr as *mut c_void,
) != 0
{
break;
}
}
}
Ok(()) Ok(())
} else {
Err(Error::msg("Unable to delete from map"))
}
} else {
Ok(())
} }
}
/// Delete all entries in the underlying eBPF map. /// Delete all entries in the underlying eBPF map.
/// Use this sparingly, it locks the underlying map. Under /// Use this sparingly, it locks the underlying map. Under
/// heavy load, it WILL eventually terminate - but it might /// heavy load, it WILL eventually terminate - but it might
/// take a very long time. Only use this for cleaning up /// take a very long time. Only use this for cleaning up
/// sparsely allocated map data. /// sparsely allocated map data.
pub fn clear(&mut self) -> Result<()> { ///
loop { /// This version skips the "did it really clear?" repeat
let mut key = K::default(); /// found in the main version.
let mut prev_key: *mut K = null_mut(); pub fn clear_no_repeat(&mut self) -> Result<()> {
unsafe { let mut key = K::default();
let key_ptr: *mut K = &mut key; let mut prev_key: *mut K = null_mut();
while bpf_map_get_next_key( unsafe {
self.fd, let key_ptr: *mut K = &mut key;
prev_key as *mut c_void, while bpf_map_get_next_key(
key_ptr as *mut c_void, self.fd,
) == 0 prev_key as *mut c_void,
{ key_ptr as *mut c_void,
bpf_map_delete_elem(self.fd, key_ptr as *mut c_void); ) == 0
prev_key = key_ptr; {
bpf_map_delete_elem(self.fd, key_ptr as *mut c_void);
prev_key = key_ptr;
}
} }
} Ok(())
}
key = K::default(); /// Clears an eBPF map using `bpf_map_delete_batch`, which
prev_key = null_mut(); /// has better locking semantics than per-row.
unsafe { pub fn clear_bulk(&mut self) -> Result<()> {
let key_ptr: *mut K = &mut key; let mut keys: Vec<K> = self.dump_vec().iter().map(|(k, _)| {
if bpf_map_get_next_key( k.clone()
self.fd, }).collect();
prev_key as *mut c_void, let mut count = keys.len() as u32;
key_ptr as *mut c_void, loop {
) != 0 let ret = unsafe {
{ bpf_map_delete_batch(self.fd, keys.as_mut_ptr() as *mut c_void, &mut count, null_mut())
break; };
if ret != 0 || count == 0 {
break;
}
} }
} Ok(())
} }
Ok(())
}
/// Delete all entries in the underlying eBPF map.
/// Use this sparingly, it locks the underlying map. Under
/// heavy load, it WILL eventually terminate - but it might
/// take a very long time. Only use this for cleaning up
/// sparsely allocated map data.
///
/// This version skips the "did it really clear?" repeat
/// found in the main version.
pub fn clear_no_repeat(&mut self) -> Result<()> {
let mut key = K::default();
let mut prev_key: *mut K = null_mut();
unsafe {
let key_ptr: *mut K = &mut key;
while bpf_map_get_next_key(
self.fd,
prev_key as *mut c_void,
key_ptr as *mut c_void,
) == 0
{
bpf_map_delete_elem(self.fd, key_ptr as *mut c_void);
prev_key = key_ptr;
}
}
Ok(())
}
} }
impl<K, V> Drop for BpfMap<K, V> { impl<K, V> Drop for BpfMap<K, V> {
fn drop(&mut self) { fn drop(&mut self) {
let _ = nix::unistd::close(self.fd); let _ = nix::unistd::close(self.fd);
} }
} }

View File

@ -100,6 +100,6 @@ pub fn list_mapped_ips() -> Result<Vec<(IpHashKey, IpHashData)>> {
/// destinations. /// destinations.
pub fn clear_hot_cache() -> Result<()> { pub fn clear_hot_cache() -> Result<()> {
let mut bpf_map = BpfMap::<XdpIpAddress, IpHashData>::from_path("/sys/fs/bpf/ip_to_cpu_and_tc_hotcache")?; let mut bpf_map = BpfMap::<XdpIpAddress, IpHashData>::from_path("/sys/fs/bpf/ip_to_cpu_and_tc_hotcache")?;
bpf_map.clear()?; bpf_map.clear_bulk()?;
Ok(()) Ok(())
} }