mirror of
https://github.com/LibreQoE/LibreQoS.git
synced 2025-02-25 18:55:32 -06:00
Proper fix for submitting IP/CPU/Queue mapping batches.
* Bring the per-client buffer size back down to a reasonable 2k. * Divide submission batches into groups and submit those. It's still MASSIVELY faster, but it can't fall victim to guessing the number of batches incorrectly.
This commit is contained in:
parent
bfe9601be1
commit
9392b43e3c
@ -12,7 +12,7 @@ use tokio::{
|
||||
|
||||
use super::BUS_SOCKET_DIRECTORY;
|
||||
|
||||
const READ_BUFFER_SIZE: usize = 2048000;
|
||||
const READ_BUFFER_SIZE: usize = 20_480;
|
||||
|
||||
/// Implements a Tokio-friendly server using Unix Sockets and the bus protocol.
|
||||
/// Requests are handled and then forwarded to the handler.
|
||||
|
@ -178,11 +178,15 @@ impl BatchedCommands {
|
||||
}
|
||||
|
||||
pub fn submit(&mut self) -> PyResult<usize> {
|
||||
const MAX_BATH_SIZE: usize = 512;
|
||||
// We're draining the request list out, which is a move that
|
||||
// *should* be elided by the optimizing compiler.
|
||||
let len = self.batch.len();
|
||||
let batch: Vec<BusRequest> = self.batch.drain(0..).collect();
|
||||
while !self.batch.is_empty() {
|
||||
let batch_size = usize::min(MAX_BATH_SIZE, self.batch.len());
|
||||
let batch: Vec<BusRequest> = self.batch.drain(0..batch_size).collect();
|
||||
run_query(batch).unwrap();
|
||||
}
|
||||
Ok(len)
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user