From 11375d34e1090b62439b4b6215747b40e5a86dfe Mon Sep 17 00:00:00 2001 From: Herbert Wolverson Date: Fri, 17 May 2024 14:11:10 -0500 Subject: [PATCH 1/9] Add a TRACING flag to the lqos_kern C system. If it is defined, the entry and exit times of the XDP and TC system are recorded in the kernel trace pipe. --- src/rust/lqos_sys/src/bpf/lqos_kern.c | 36 ++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/src/rust/lqos_sys/src/bpf/lqos_kern.c b/src/rust/lqos_sys/src/bpf/lqos_kern.c index 7d93fe59..9dca16d7 100644 --- a/src/rust/lqos_sys/src/bpf/lqos_kern.c +++ b/src/rust/lqos_sys/src/bpf/lqos_kern.c @@ -21,6 +21,7 @@ #include "common/flows.h" //#define VERBOSE 1 +//#define TRACING 1 /* Theory of operation: 1. (Packet arrives at interface) @@ -60,7 +61,6 @@ __be16 isp_vlan = 0; #define round_up(x, y) ((((x) - 1) | __round_mask(x, y)) + 1) #define ctx_ptr(ctx, mem) (void *)(unsigned long)ctx->mem - // Structure for passing metadata from XDP to TC struct metadata_pass_t { __u32 tc_handle; // The encoded TC handle @@ -70,6 +70,9 @@ struct metadata_pass_t { SEC("xdp") int xdp_prog(struct xdp_md *ctx) { +#ifdef TRACING + __u64 started = bpf_ktime_get_ns(); +#endif #ifdef VERBOSE bpf_debug("XDP-RDR"); #endif @@ -106,6 +109,7 @@ int xdp_prog(struct xdp_md *ctx) // If the dissector is unable to figure out what's going on, bail // out. if (!dissector_new(ctx, &dissector)) return XDP_PASS; + // Note that this step rewrites the VLAN tag if redirection // is requested. if (!dissector_find_l3_offset(&dissector, vlan_redirect)) return XDP_PASS; @@ -115,6 +119,7 @@ int xdp_prog(struct xdp_md *ctx) internet_vlan, &dissector ); + #ifdef VERBOSE bpf_debug("(XDP) Effective direction: %d", effective_direction); #endif @@ -208,6 +213,14 @@ int xdp_prog(struct xdp_md *ctx) #ifdef VERBOSE bpf_debug("(XDP) Redirect result: %u", redirect_result); #endif + +#ifdef TRACING +{ + __u64 now = bpf_ktime_get_ns(); + bpf_debug("(XDP) Exit time: %u", now - started); +} +#endif + return redirect_result; } return XDP_PASS; @@ -217,6 +230,9 @@ int xdp_prog(struct xdp_md *ctx) SEC("tc") int tc_iphash_to_cpu(struct __sk_buff *skb) { +#ifdef TRACING + __u64 started = bpf_ktime_get_ns(); +#endif #ifdef VERBOSE bpf_debug("TC-MAP"); #endif @@ -271,6 +287,12 @@ int tc_iphash_to_cpu(struct __sk_buff *skb) // We can short-circuit the redirect and bypass the second // LPM lookup! Yay! skb->priority = meta->tc_handle; + #ifdef TRACING + { + __u64 now = bpf_ktime_get_ns(); + bpf_debug("(TC) Exit time: %u", now - started); + } + #endif return TC_ACT_OK; } } @@ -310,12 +332,24 @@ int tc_iphash_to_cpu(struct __sk_buff *skb) bpf_debug("(TC) Mapped to TC handle %x", ip_info->tc_handle); #endif skb->priority = ip_info->tc_handle; + #ifdef TRACING + { + __u64 now = bpf_ktime_get_ns(); + bpf_debug("(TC) Exit time: %u", now - started); + } + #endif return TC_ACT_OK; } else { // We didn't find anything #ifdef VERBOSE bpf_debug("(TC) didn't map anything"); #endif + #ifdef TRACING + { + __u64 now = bpf_ktime_get_ns(); + bpf_debug("(TC) Exit time: %u", now - started); + } + #endif return TC_ACT_OK; } From ab64113f9854fd6b12e92da1f148f60a2b8ea829 Mon Sep 17 00:00:00 2001 From: Herbert Wolverson Date: Fri, 17 May 2024 14:12:15 -0500 Subject: [PATCH 2/9] Add a "hot cache" to the XDP LPM lookup system. Adds a new BPF map (an LRU hash) containing IP addresses and TC mapping info. IPs are first checked against the hot cache, because a hashmap lookup is faster than an LPM lookup. If found, the cached value is used. If not found, then the key is inserted into the LRU map (so currently hot stays present, others expire over time) for future cache use. --- src/rust/lqos_sys/src/bpf/common/lpm.h | 48 +++++++++++++++++++-- src/rust/lqos_sys/src/bpf/common/maximums.h | 3 ++ 2 files changed, 48 insertions(+), 3 deletions(-) diff --git a/src/rust/lqos_sys/src/bpf/common/lpm.h b/src/rust/lqos_sys/src/bpf/common/lpm.h index 61200695..99f01cd6 100644 --- a/src/rust/lqos_sys/src/bpf/common/lpm.h +++ b/src/rust/lqos_sys/src/bpf/common/lpm.h @@ -9,7 +9,6 @@ #include #include #include "maximums.h" -#include "debug.h" #include "dissector.h" // Data structure used for map_ip_hash @@ -24,6 +23,22 @@ struct ip_hash_key { struct in6_addr address; // An IPv6 address. IPv4 uses the last 32 bits. }; +// Hot cache for recent IP lookups, an attempt +// at a speed improvement predicated on the idea +// that LPM isn't the fastest +// The cache is optional. define USE_HOTCACHE +// to enable it. +#define USE_HOTCACHE 1 + +#ifdef USE_HOTCACHE +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __uint(max_entries, HOT_CACHE_SIZE); + __type(key, struct in6_addr); + __type(value, struct ip_hash_info); +} ip_to_cpu_and_tc_hotcache SEC(".maps"); +#endif + // Map describing IP to CPU/TC mappings struct { __uint(type, BPF_MAP_TYPE_LPM_TRIE); @@ -71,13 +86,39 @@ static __always_inline struct ip_hash_info * setup_lookup_key_and_tc_cpu( struct dissector_t * dissector ) { + struct ip_hash_info * ip_info; + lookup_key->prefixlen = 128; lookup_key->address = (direction == 1) ? dissector->dst_ip : dissector->src_ip; - struct ip_hash_info * ip_info = bpf_map_lookup_elem( + + #ifdef USE_HOTCACHE + // Try a hot cache search + ip_info = bpf_map_lookup_elem( + &ip_to_cpu_and_tc_hotcache, + &lookup_key->address + ); + if (ip_info) { + // We got a cache hit, so return + return ip_info; + } + #endif + + ip_info = bpf_map_lookup_elem( &map_ip_to_cpu_and_tc, lookup_key ); + #ifdef USE_HOTCACHE + if (ip_info) { + // We found it, so add it to the cache + bpf_map_update_elem( + &ip_to_cpu_and_tc_hotcache, + &lookup_key->address, + ip_info, + BPF_NOEXIST + ); + } + #endif return ip_info; } @@ -104,9 +145,10 @@ static __always_inline struct ip_hash_info * tc_setup_lookup_key_and_tc_cpu( lookup_key->prefixlen = 128; // Direction is reversed because we are operating on egress if (direction < 3) { - lookup_key->address = (direction == 1) ? dissector->src_ip : + lookup_key->address = (direction == 1) ? dissector->src_ip : dissector->dst_ip; *out_effective_direction = direction; + struct ip_hash_info * ip_info = bpf_map_lookup_elem( &map_ip_to_cpu_and_tc, lookup_key diff --git a/src/rust/lqos_sys/src/bpf/common/maximums.h b/src/rust/lqos_sys/src/bpf/common/maximums.h index 7520cbc9..2a3afe7f 100644 --- a/src/rust/lqos_sys/src/bpf/common/maximums.h +++ b/src/rust/lqos_sys/src/bpf/common/maximums.h @@ -14,3 +14,6 @@ // Maximum number of packet pairs to track per flow. #define MAX_PACKETS MAX_FLOWS + +// Hot Cache Size +#define HOT_CACHE_SIZE 512 \ No newline at end of file From 6dc811968b7b81e96d1d0705ba8ff4c5711b3f16 Mon Sep 17 00:00:00 2001 From: Herbert Wolverson Date: Fri, 17 May 2024 14:14:29 -0500 Subject: [PATCH 3/9] Pin the HOT_CACHE map so it can be accessed elsewhere. --- src/rust/lqos_sys/src/bpf/common/lpm.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/rust/lqos_sys/src/bpf/common/lpm.h b/src/rust/lqos_sys/src/bpf/common/lpm.h index 99f01cd6..160453c7 100644 --- a/src/rust/lqos_sys/src/bpf/common/lpm.h +++ b/src/rust/lqos_sys/src/bpf/common/lpm.h @@ -36,6 +36,7 @@ struct { __uint(max_entries, HOT_CACHE_SIZE); __type(key, struct in6_addr); __type(value, struct ip_hash_info); + __uint(pinning, LIBBPF_PIN_BY_NAME); } ip_to_cpu_and_tc_hotcache SEC(".maps"); #endif From d9eedcd80419304f8e72a243fe84e2b303ceac80 Mon Sep 17 00:00:00 2001 From: Herbert Wolverson Date: Fri, 17 May 2024 14:30:30 -0500 Subject: [PATCH 4/9] Add cache invalidation to the hot cache. Whenever an IP mapping changes, the cache is invalidated - meaning it will re-cache the correct values. --- src/rust/lqos_sys/src/ip_mapping/mod.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/rust/lqos_sys/src/ip_mapping/mod.rs b/src/rust/lqos_sys/src/ip_mapping/mod.rs index d5f038fc..6be7b373 100644 --- a/src/rust/lqos_sys/src/ip_mapping/mod.rs +++ b/src/rust/lqos_sys/src/ip_mapping/mod.rs @@ -36,6 +36,7 @@ pub fn add_ip_to_tc( let mut value = IpHashData { cpu: ip_to_add.cpu, tc_handle: ip_to_add.handle() }; bpf_map.insert_or_update(&mut key, &mut value)?; + clear_hot_cache()?; Ok(()) } @@ -56,6 +57,7 @@ pub fn del_ip_from_tc(address: &str, upload: bool) -> Result<()> { let ip = XdpIpAddress::from_ip(ip); let mut key = IpHashKey { prefixlen: ip_to_add.prefix, address: ip.0 }; bpf_map.delete(&mut key)?; + clear_hot_cache()?; Ok(()) } @@ -71,6 +73,8 @@ pub fn clear_ips_from_tc() -> Result<()> { )?; bpf_map.clear()?; + clear_hot_cache()?; + Ok(()) } @@ -89,3 +93,12 @@ pub fn list_mapped_ips() -> Result> { Ok(raw) } + +/// Clears the "hot cache", which should be done whenever you change the IP +/// mappings - because otherwise cached data will keep going to the previous +/// destinations. +fn clear_hot_cache() -> Result<()> { + let mut bpf_map = BpfMap::::from_path("/sys/fs/bpf/ip_to_cpu_and_tc_hotcache")?; + bpf_map.clear()?; + Ok(()) +} \ No newline at end of file From 8372786d46685ba7214164f51a85e1293cb5696f Mon Sep 17 00:00:00 2001 From: Herbert Wolverson Date: Fri, 17 May 2024 14:52:13 -0500 Subject: [PATCH 5/9] TINY change: only set the prefixlen if we're actually going to use it. --- src/rust/lqos_sys/src/bpf/common/lpm.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rust/lqos_sys/src/bpf/common/lpm.h b/src/rust/lqos_sys/src/bpf/common/lpm.h index 160453c7..e657aa8d 100644 --- a/src/rust/lqos_sys/src/bpf/common/lpm.h +++ b/src/rust/lqos_sys/src/bpf/common/lpm.h @@ -89,8 +89,7 @@ static __always_inline struct ip_hash_info * setup_lookup_key_and_tc_cpu( { struct ip_hash_info * ip_info; - lookup_key->prefixlen = 128; - lookup_key->address = (direction == 1) ? dissector->dst_ip : + lookup_key->address = (direction == 1) ? dissector->dst_ip : dissector->src_ip; #ifdef USE_HOTCACHE @@ -105,6 +104,7 @@ static __always_inline struct ip_hash_info * setup_lookup_key_and_tc_cpu( } #endif + lookup_key->prefixlen = 128; ip_info = bpf_map_lookup_elem( &map_ip_to_cpu_and_tc, lookup_key From 0e711dc09c996ca9e187d78ff0a7827164f986c7 Mon Sep 17 00:00:00 2001 From: Herbert Wolverson Date: Fri, 17 May 2024 15:01:09 -0500 Subject: [PATCH 6/9] Expand hot cache map size to 32k entries (as Gates would say, that should be enough for everyone). Add a "negative cache" element. When a "no match" item appears, it is mapped to CPU 4294967294. That's considered a safe magic number, because if you have that many CPUs in one box you are really going to surprise me. If an LPM lookup occurs, and still comes up as "no match" - then the item is cached with this sentinel value. If a hot cache hit returns the sentinal value, it returns NULL just like if no hit had occurred. This should eliminate the performance penalty that accompanies IP addresses blasting out as fast as they can, but not being in Shaped Devices. --- src/rust/lqos_sys/src/bpf/common/lpm.h | 18 ++++++++++++++++++ src/rust/lqos_sys/src/bpf/common/maximums.h | 6 +++++- src/rust/remove_pinned_maps.sh | 4 +++- 3 files changed, 26 insertions(+), 2 deletions(-) diff --git a/src/rust/lqos_sys/src/bpf/common/lpm.h b/src/rust/lqos_sys/src/bpf/common/lpm.h index e657aa8d..07e64046 100644 --- a/src/rust/lqos_sys/src/bpf/common/lpm.h +++ b/src/rust/lqos_sys/src/bpf/common/lpm.h @@ -99,6 +99,11 @@ static __always_inline struct ip_hash_info * setup_lookup_key_and_tc_cpu( &lookup_key->address ); if (ip_info) { + // Is it a negative hit? + if (ip_info->cpu == NEGATIVE_HIT) { + return NULL; + } + // We got a cache hit, so return return ip_info; } @@ -118,6 +123,19 @@ static __always_inline struct ip_hash_info * setup_lookup_key_and_tc_cpu( ip_info, BPF_NOEXIST ); + } else { + // Store a negative result. This is designed to alleviate the pain + // of repeatedly hitting queries for IPs that ARE NOT shaped. + struct ip_hash_info negative_hit = { + .cpu = NEGATIVE_HIT, + .tc_handle = NEGATIVE_HIT + }; + bpf_map_update_elem( + &ip_to_cpu_and_tc_hotcache, + &lookup_key->address, + &negative_hit, + BPF_NOEXIST + ); } #endif return ip_info; diff --git a/src/rust/lqos_sys/src/bpf/common/maximums.h b/src/rust/lqos_sys/src/bpf/common/maximums.h index 2a3afe7f..d1049142 100644 --- a/src/rust/lqos_sys/src/bpf/common/maximums.h +++ b/src/rust/lqos_sys/src/bpf/common/maximums.h @@ -16,4 +16,8 @@ #define MAX_PACKETS MAX_FLOWS // Hot Cache Size -#define HOT_CACHE_SIZE 512 \ No newline at end of file +#define HOT_CACHE_SIZE 32768 + +// Hot Cache Negative Hit Flag +// If you have 4294967294 CPUs, I love you. +#define NEGATIVE_HIT 4294967294 \ No newline at end of file diff --git a/src/rust/remove_pinned_maps.sh b/src/rust/remove_pinned_maps.sh index 38b7a3e8..fb715623 100755 --- a/src/rust/remove_pinned_maps.sh +++ b/src/rust/remove_pinned_maps.sh @@ -13,4 +13,6 @@ rm -v /sys/fs/bpf/bifrost_vlan_map rm -v /sys/fs/bpf/heimdall rm -v /sys/fs/bpf/heimdall_config rm -v /sys/fs/bpf/heimdall_watching -rm -v /sys/fs/bpf/flowbee \ No newline at end of file +rm -v /sys/fs/bpf/flowbee +rm -v /sys/fs/bpf/ip_to_cpu_and_tc_hotcache + From a326dce33fefb97690a32af6e7778db2a7c3ba3a Mon Sep 17 00:00:00 2001 From: Herbert Wolverson Date: Wed, 22 May 2024 08:41:49 -0500 Subject: [PATCH 7/9] ISSUE #486 - Fix the B to G in Plotly on the throughput graph. We're replacing it in beta 2, but that turned out to be a one-liner. --- src/rust/lqos_node_manager/static/lqos.js | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/rust/lqos_node_manager/static/lqos.js b/src/rust/lqos_node_manager/static/lqos.js index 3abe56d7..36b153fd 100644 --- a/src/rust/lqos_node_manager/static/lqos.js +++ b/src/rust/lqos_node_manager/static/lqos.js @@ -359,7 +359,14 @@ class MultiRingBuffer { {x: x, y:this.data['shaped'].sortedY[1], name: 'Shaped Upload', type: 'scatter', fill: 'tozeroy', marker: {color: 'rgb(124,252,0)'}}, ]; if (this.plotted == null) { - Plotly.newPlot(graph, graphData, { margin: { l:0,r:0,b:0,t:0,pad:4 }, yaxis: { automargin: true, title: "Traffic (bits)" }, xaxis: {automargin: true, title: "Time since now (seconds)"} }, { responsive: true }); + Plotly.newPlot( + graph, + graphData, + { + margin: { l:0,r:0,b:0,t:0,pad:4 }, + yaxis: { automargin: true, title: "Traffic (bits)", exponentformat: "SI" }, + xaxis: {automargin: true, title: "Time since now (seconds)"} + }, { responsive: true }); this.plotted = true; } else { Plotly.redraw(graph, graphData); From 667fec63e990e6130965aaf970ccf0ba1e930a2e Mon Sep 17 00:00:00 2001 From: Herbert Wolverson Date: Wed, 22 May 2024 09:10:13 -0500 Subject: [PATCH 8/9] ISSUEE #486 - Replace THROUGHPUT_BUFFER's external lock that required async with interior mutability regular lock that doesn't. This eliminates the possibility of cross-task locking issues leading to a deadlock, and reduces the surface area of the lock period also. Also replace RwLock with Mutex, the simplicity is usually a net gain. This appears to have resolved the issue for me. --- .../src/tracker/cache/throughput.rs | 41 ++++++++++++------- .../src/tracker/cache_manager.rs | 3 +- src/rust/lqos_node_manager/src/tracker/mod.rs | 4 +- 3 files changed, 29 insertions(+), 19 deletions(-) diff --git a/src/rust/lqos_node_manager/src/tracker/cache/throughput.rs b/src/rust/lqos_node_manager/src/tracker/cache/throughput.rs index b15e7ef1..ebf5c54a 100644 --- a/src/rust/lqos_node_manager/src/tracker/cache/throughput.rs +++ b/src/rust/lqos_node_manager/src/tracker/cache/throughput.rs @@ -1,14 +1,19 @@ +use std::sync::Mutex; + use crate::tracker::ThroughputPerSecond; use lqos_bus::{bus_request, BusRequest, BusResponse}; use once_cell::sync::Lazy; -use rocket::tokio::sync::RwLock; -pub static THROUGHPUT_BUFFER: Lazy> = - Lazy::new(|| RwLock::new(TotalThroughput::new())); +pub static THROUGHPUT_BUFFER: Lazy = + Lazy::new(|| TotalThroughput::new()); /// Maintains an in-memory ringbuffer of the last 5 minutes of /// throughput data. pub struct TotalThroughput { + inner: Mutex +} + +struct TotalThroughputInner { data: Vec, head: usize, prev_head: usize, @@ -18,14 +23,16 @@ impl TotalThroughput { /// Create a new throughput ringbuffer system pub fn new() -> Self { Self { - data: vec![ThroughputPerSecond::default(); 300], - head: 0, - prev_head: 0, + inner: Mutex::new(TotalThroughputInner { + data: vec![ThroughputPerSecond::default(); 300], + head: 0, + prev_head: 0, + }), } } /// Run once per second to update the ringbuffer with current data - pub async fn tick(&mut self) { + pub async fn tick(&self) { if let Ok(messages) = bus_request(vec![BusRequest::GetCurrentThroughput]).await { @@ -36,12 +43,14 @@ impl TotalThroughput { shaped_bits_per_second, } = msg { - self.data[self.head].bits_per_second = bits_per_second; - self.data[self.head].packets_per_second = packets_per_second; - self.data[self.head].shaped_bits_per_second = shaped_bits_per_second; - self.prev_head = self.head; - self.head += 1; - self.head %= 300; + let mut lock = self.inner.lock().unwrap(); + let head = lock.head; + lock.data[head].bits_per_second = bits_per_second; + lock.data[head].packets_per_second = packets_per_second; + lock.data[head].shaped_bits_per_second = shaped_bits_per_second; + lock.prev_head = lock.head; + lock.head += 1; + lock.head %= 300; } } } @@ -49,12 +58,14 @@ impl TotalThroughput { /// Retrieve just the current throughput data (1 tick) pub fn current(&self) -> ThroughputPerSecond { - self.data[self.prev_head] + let lock = self.inner.lock().unwrap(); + lock.data[lock.prev_head] } /// Retrieve the head (0-299) and the full current throughput /// buffer. Used to populate the dashboard the first time. pub fn copy(&self) -> (usize, Vec) { - (self.head, self.data.clone()) + let lock = self.inner.lock().unwrap(); + (lock.head, lock.data.clone()) } } diff --git a/src/rust/lqos_node_manager/src/tracker/cache_manager.rs b/src/rust/lqos_node_manager/src/tracker/cache_manager.rs index 75083303..0d612679 100644 --- a/src/rust/lqos_node_manager/src/tracker/cache_manager.rs +++ b/src/rust/lqos_node_manager/src/tracker/cache_manager.rs @@ -121,8 +121,7 @@ fn watch_for_shaped_devices_changing() -> Result<()> { pub async fn update_total_throughput_buffer() { loop { let now = Instant::now(); - let mut lock = THROUGHPUT_BUFFER.write().await; - lock.tick().await; + THROUGHPUT_BUFFER.tick().await; let wait_time = Duration::from_secs(1) - now.elapsed(); if wait_time.as_micros() > 0 { rocket::tokio::time::sleep(Duration::from_secs(1)).await; diff --git a/src/rust/lqos_node_manager/src/tracker/mod.rs b/src/rust/lqos_node_manager/src/tracker/mod.rs index ec5c2b8f..7d6994a2 100644 --- a/src/rust/lqos_node_manager/src/tracker/mod.rs +++ b/src/rust/lqos_node_manager/src/tracker/mod.rs @@ -73,7 +73,7 @@ pub struct ThroughputPerSecond { pub async fn current_throughput( _auth: AuthGuard, ) -> NoCache> { - let result = THROUGHPUT_BUFFER.read().await.current(); + let result = THROUGHPUT_BUFFER.current(); NoCache::new(MsgPack(result)) } @@ -81,7 +81,7 @@ pub async fn current_throughput( pub async fn throughput_ring_buffer( _auth: AuthGuard, ) -> NoCache)>> { - let result = THROUGHPUT_BUFFER.read().await.copy(); + let result = THROUGHPUT_BUFFER.copy(); NoCache::new(MsgPack(result)) } From 12721dff85d11370789d0ead37c78c9300c9bfa5 Mon Sep 17 00:00:00 2001 From: Herbert Wolverson Date: Thu, 23 May 2024 09:55:48 -0500 Subject: [PATCH 9/9] ISSUE #468 - After managing to reproduce it with Robert, this should fix the actual issue. The actual problem was checked subtraction in a timer loop carefully checking for the negative - which isn't allowed - and then doing it anyway. Oops. --- src/rust/lqos_node_manager/src/tracker/cache_manager.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/rust/lqos_node_manager/src/tracker/cache_manager.rs b/src/rust/lqos_node_manager/src/tracker/cache_manager.rs index 0d612679..353e7469 100644 --- a/src/rust/lqos_node_manager/src/tracker/cache_manager.rs +++ b/src/rust/lqos_node_manager/src/tracker/cache_manager.rs @@ -122,9 +122,9 @@ pub async fn update_total_throughput_buffer() { loop { let now = Instant::now(); THROUGHPUT_BUFFER.tick().await; - let wait_time = Duration::from_secs(1) - now.elapsed(); - if wait_time.as_micros() > 0 { - rocket::tokio::time::sleep(Duration::from_secs(1)).await; + let elapsed = now.elapsed(); + if elapsed < Duration::from_secs(1) { + rocket::tokio::time::sleep(Duration::from_secs(1) - elapsed).await; } } } \ No newline at end of file