Proper fix for submitting IP/CPU/Queue mapping batches.

* Bring the per-client buffer size back down to a reasonable 2k.
* Divide submission batches into groups and submit those. It's
  still MASSIVELY faster, but it can't fall victim to guessing
  the number of batches incorrectly.
This commit is contained in:
Herbert Wolverson 2023-03-06 15:58:57 +00:00
parent bfe9601be1
commit 9392b43e3c
2 changed files with 7 additions and 3 deletions

View File

@ -12,7 +12,7 @@ use tokio::{
use super::BUS_SOCKET_DIRECTORY; use super::BUS_SOCKET_DIRECTORY;
const READ_BUFFER_SIZE: usize = 2048000; const READ_BUFFER_SIZE: usize = 20_480;
/// Implements a Tokio-friendly server using Unix Sockets and the bus protocol. /// Implements a Tokio-friendly server using Unix Sockets and the bus protocol.
/// Requests are handled and then forwarded to the handler. /// Requests are handled and then forwarded to the handler.

View File

@ -178,11 +178,15 @@ impl BatchedCommands {
} }
pub fn submit(&mut self) -> PyResult<usize> { pub fn submit(&mut self) -> PyResult<usize> {
const MAX_BATH_SIZE: usize = 512;
// We're draining the request list out, which is a move that // We're draining the request list out, which is a move that
// *should* be elided by the optimizing compiler. // *should* be elided by the optimizing compiler.
let len = self.batch.len(); let len = self.batch.len();
let batch: Vec<BusRequest> = self.batch.drain(0..).collect(); while !self.batch.is_empty() {
run_query(batch).unwrap(); let batch_size = usize::min(MAX_BATH_SIZE, self.batch.len());
let batch: Vec<BusRequest> = self.batch.drain(0..batch_size).collect();
run_query(batch).unwrap();
}
Ok(len) Ok(len)
} }
} }