mirror of
https://gitlab.com/veilid/veilid.git
synced 2024-12-24 07:00:01 -06:00
alignment refactor
This commit is contained in:
parent
221c09b555
commit
16d74b96f3
@ -444,7 +444,11 @@ impl ClientApiConnection {
|
||||
res.map_err(map_to_string)
|
||||
}
|
||||
|
||||
pub async fn server_appcall_reply(&mut self, id: u64, msg: Vec<u8>) -> Result<(), String> {
|
||||
pub async fn server_appcall_reply(
|
||||
&mut self,
|
||||
id: OperationId,
|
||||
msg: Vec<u8>,
|
||||
) -> Result<(), String> {
|
||||
trace!("ClientApiConnection::appcall_reply");
|
||||
let server = {
|
||||
let inner = self.inner.borrow();
|
||||
@ -455,7 +459,7 @@ impl ClientApiConnection {
|
||||
.clone()
|
||||
};
|
||||
let mut request = server.borrow().app_call_reply_request();
|
||||
request.get().set_id(id);
|
||||
request.get().set_id(id.as_u64());
|
||||
request.get().set_message(&msg);
|
||||
let response = self
|
||||
.cancellable(request.send().promise)
|
||||
|
@ -47,7 +47,7 @@ struct CommandProcessorInner {
|
||||
autoreconnect: bool,
|
||||
server_addr: Option<SocketAddr>,
|
||||
connection_waker: Eventual,
|
||||
last_call_id: Option<u64>,
|
||||
last_call_id: Option<OperationId>,
|
||||
}
|
||||
|
||||
type Handle<T> = Rc<RefCell<T>>;
|
||||
@ -249,7 +249,7 @@ reply - reply to an AppCall not handled directly by the server
|
||||
}
|
||||
Ok(v) => v,
|
||||
};
|
||||
(id, second)
|
||||
(OperationId::new(id), second)
|
||||
} else {
|
||||
let id = match some_last_id {
|
||||
None => {
|
||||
@ -394,8 +394,8 @@ reply - reply to an AppCall not handled directly by the server
|
||||
pub fn update_network_status(&mut self, network: veilid_core::VeilidStateNetwork) {
|
||||
self.inner_mut().ui.set_network_status(
|
||||
network.started,
|
||||
network.bps_down,
|
||||
network.bps_up,
|
||||
network.bps_down.as_u64(),
|
||||
network.bps_up.as_u64(),
|
||||
network.peers,
|
||||
);
|
||||
}
|
||||
@ -471,7 +471,9 @@ reply - reply to an AppCall not handled directly by the server
|
||||
|
||||
self.inner().ui.add_node_event(format!(
|
||||
"AppCall ({:?}) id = {:016x} : {}",
|
||||
call.sender, call.id, strmsg
|
||||
call.sender,
|
||||
call.id.as_u64(),
|
||||
strmsg
|
||||
));
|
||||
|
||||
self.inner_mut().last_call_id = Some(call.id);
|
||||
|
@ -1,7 +1,7 @@
|
||||
use super::*;
|
||||
use cursive_table_view::*;
|
||||
use std::cmp::Ordering;
|
||||
use veilid_core::PeerTableData;
|
||||
use veilid_core::*;
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
|
||||
pub enum PeerTableColumn {
|
||||
@ -24,7 +24,8 @@ pub enum PeerTableColumn {
|
||||
// }
|
||||
// }
|
||||
|
||||
fn format_ts(ts: u64) -> String {
|
||||
fn format_ts(ts: Timestamp) -> String {
|
||||
let ts = ts.as_u64();
|
||||
let secs = timestamp_to_secs(ts);
|
||||
if secs >= 1.0 {
|
||||
format!("{:.2}s", timestamp_to_secs(ts))
|
||||
@ -33,7 +34,8 @@ fn format_ts(ts: u64) -> String {
|
||||
}
|
||||
}
|
||||
|
||||
fn format_bps(bps: u64) -> String {
|
||||
fn format_bps(bps: ByteCount) -> String {
|
||||
let bps = bps.as_u64();
|
||||
if bps >= 1024u64 * 1024u64 * 1024u64 {
|
||||
format!("{:.2}GB/s", (bps / (1024u64 * 1024u64)) as f64 / 1024.0)
|
||||
} else if bps >= 1024u64 * 1024u64 {
|
||||
|
@ -21,8 +21,8 @@ pub struct ConnectionLimits {
|
||||
max_connection_frequency_per_min: usize,
|
||||
conn_count_by_ip4: BTreeMap<Ipv4Addr, usize>,
|
||||
conn_count_by_ip6_prefix: BTreeMap<Ipv6Addr, usize>,
|
||||
conn_timestamps_by_ip4: BTreeMap<Ipv4Addr, Vec<u64>>,
|
||||
conn_timestamps_by_ip6_prefix: BTreeMap<Ipv6Addr, Vec<u64>>,
|
||||
conn_timestamps_by_ip4: BTreeMap<Ipv4Addr, Vec<Timestamp>>,
|
||||
conn_timestamps_by_ip6_prefix: BTreeMap<Ipv6Addr, Vec<Timestamp>>,
|
||||
}
|
||||
|
||||
impl ConnectionLimits {
|
||||
@ -48,7 +48,7 @@ impl ConnectionLimits {
|
||||
for (key, value) in &mut self.conn_timestamps_by_ip4 {
|
||||
value.retain(|v| {
|
||||
// keep timestamps that are less than a minute away
|
||||
cur_ts.saturating_sub(*v) < 60_000_000u64
|
||||
cur_ts.saturating_sub(*v) < TimestampDuration::new(60_000_000u64)
|
||||
});
|
||||
if value.is_empty() {
|
||||
dead_keys.push(*key);
|
||||
@ -64,7 +64,7 @@ impl ConnectionLimits {
|
||||
for (key, value) in &mut self.conn_timestamps_by_ip6_prefix {
|
||||
value.retain(|v| {
|
||||
// keep timestamps that are less than a minute away
|
||||
cur_ts.saturating_sub(*v) < 60_000_000u64
|
||||
cur_ts.saturating_sub(*v) < TimestampDuration::new(60_000_000u64)
|
||||
});
|
||||
if value.is_empty() {
|
||||
dead_keys.push(*key);
|
||||
@ -95,7 +95,7 @@ impl ConnectionLimits {
|
||||
let tstamps = &mut self.conn_timestamps_by_ip4.entry(v4).or_default();
|
||||
tstamps.retain(|v| {
|
||||
// keep timestamps that are less than a minute away
|
||||
ts.saturating_sub(*v) < 60_000_000u64
|
||||
ts.saturating_sub(*v) < TimestampDuration::new(60_000_000u64)
|
||||
});
|
||||
assert!(tstamps.len() <= self.max_connection_frequency_per_min);
|
||||
if tstamps.len() == self.max_connection_frequency_per_min {
|
||||
|
@ -38,12 +38,12 @@ use wasm::*;
|
||||
|
||||
pub const MAX_MESSAGE_SIZE: usize = MAX_ENVELOPE_SIZE;
|
||||
pub const IPADDR_TABLE_SIZE: usize = 1024;
|
||||
pub const IPADDR_MAX_INACTIVE_DURATION_US: u64 = 300_000_000u64; // 5 minutes
|
||||
pub const IPADDR_MAX_INACTIVE_DURATION_US: TimestampDuration = TimestampDuration::new(300_000_000u64); // 5 minutes
|
||||
pub const PUBLIC_ADDRESS_CHANGE_DETECTION_COUNT: usize = 3;
|
||||
pub const PUBLIC_ADDRESS_CHECK_CACHE_SIZE: usize = 8;
|
||||
pub const PUBLIC_ADDRESS_CHECK_TASK_INTERVAL_SECS: u32 = 60;
|
||||
pub const PUBLIC_ADDRESS_INCONSISTENCY_TIMEOUT_US: u64 = 300_000_000u64; // 5 minutes
|
||||
pub const PUBLIC_ADDRESS_INCONSISTENCY_PUNISHMENT_TIMEOUT_US: u64 = 3600_000_000u64; // 60 minutes
|
||||
pub const PUBLIC_ADDRESS_INCONSISTENCY_TIMEOUT_US: TimestampDuration = TimestampDuration::new(300_000_000u64); // 5 minutes
|
||||
pub const PUBLIC_ADDRESS_INCONSISTENCY_PUNISHMENT_TIMEOUT_US: TimestampDuration = TimestampDuration::new(3600_000_000u64); // 60 minutes
|
||||
pub const BOOT_MAGIC: &[u8; 4] = b"BOOT";
|
||||
|
||||
#[derive(Copy, Clone, Debug, Default)]
|
||||
@ -138,7 +138,7 @@ struct NetworkManagerInner {
|
||||
public_address_check_cache:
|
||||
BTreeMap<PublicAddressCheckCacheKey, LruCache<IpAddr, SocketAddress>>,
|
||||
public_address_inconsistencies_table:
|
||||
BTreeMap<PublicAddressCheckCacheKey, HashMap<IpAddr, u64>>,
|
||||
BTreeMap<PublicAddressCheckCacheKey, HashMap<IpAddr, Timestamp>>,
|
||||
}
|
||||
|
||||
struct NetworkManagerUnlockedInner {
|
||||
@ -426,7 +426,7 @@ impl NetworkManager {
|
||||
pub fn purge_client_whitelist(&self) {
|
||||
let timeout_ms = self.with_config(|c| c.network.client_whitelist_timeout_ms);
|
||||
let mut inner = self.inner.lock();
|
||||
let cutoff_timestamp = get_aligned_timestamp() - ((timeout_ms as u64) * 1000u64);
|
||||
let cutoff_timestamp = get_aligned_timestamp() - TimestampDuration::new((timeout_ms as u64) * 1000u64);
|
||||
// Remove clients from the whitelist that haven't been since since our whitelist timeout
|
||||
while inner
|
||||
.client_whitelist
|
||||
@ -1285,7 +1285,7 @@ impl NetworkManager {
|
||||
// Network accounting
|
||||
self.stats_packet_rcvd(
|
||||
connection_descriptor.remote_address().to_ip_addr(),
|
||||
data.len() as u64,
|
||||
ByteCount::new(data.len() as u64),
|
||||
);
|
||||
|
||||
// If this is a zero length packet, just drop it, because these are used for hole punching
|
||||
@ -1447,7 +1447,7 @@ impl NetworkManager {
|
||||
}
|
||||
|
||||
// Callbacks from low level network for statistics gathering
|
||||
pub fn stats_packet_sent(&self, addr: IpAddr, bytes: u64) {
|
||||
pub fn stats_packet_sent(&self, addr: IpAddr, bytes: ByteCount) {
|
||||
let inner = &mut *self.inner.lock();
|
||||
inner
|
||||
.stats
|
||||
@ -1463,7 +1463,7 @@ impl NetworkManager {
|
||||
.add_up(bytes);
|
||||
}
|
||||
|
||||
pub fn stats_packet_rcvd(&self, addr: IpAddr, bytes: u64) {
|
||||
pub fn stats_packet_rcvd(&self, addr: IpAddr, bytes: ByteCount) {
|
||||
let inner = &mut *self.inner.lock();
|
||||
inner
|
||||
.stats
|
||||
|
@ -406,7 +406,7 @@ impl Network {
|
||||
}
|
||||
// Network accounting
|
||||
self.network_manager()
|
||||
.stats_packet_sent(dial_info.to_ip_addr(), data_len as u64);
|
||||
.stats_packet_sent(dial_info.to_ip_addr(), ByteCount::new(data_len as u64));
|
||||
|
||||
Ok(NetworkResult::Value(()))
|
||||
}
|
||||
@ -440,7 +440,7 @@ impl Network {
|
||||
.await
|
||||
.wrap_err("send message failure")?);
|
||||
self.network_manager()
|
||||
.stats_packet_sent(dial_info.to_ip_addr(), data_len as u64);
|
||||
.stats_packet_sent(dial_info.to_ip_addr(), ByteCount::new(data_len as u64));
|
||||
|
||||
// receive single response
|
||||
let mut out = vec![0u8; MAX_MESSAGE_SIZE];
|
||||
@ -454,7 +454,7 @@ impl Network {
|
||||
|
||||
let recv_socket_addr = recv_addr.remote_address().to_socket_addr();
|
||||
self.network_manager()
|
||||
.stats_packet_rcvd(recv_socket_addr.ip(), recv_len as u64);
|
||||
.stats_packet_rcvd(recv_socket_addr.ip(), ByteCount::new(recv_len as u64));
|
||||
|
||||
// if the from address is not the same as the one we sent to, then drop this
|
||||
if recv_socket_addr != peer_socket_addr {
|
||||
@ -481,7 +481,7 @@ impl Network {
|
||||
|
||||
network_result_try!(pnc.send(data).await.wrap_err("send failure")?);
|
||||
self.network_manager()
|
||||
.stats_packet_sent(dial_info.to_ip_addr(), data_len as u64);
|
||||
.stats_packet_sent(dial_info.to_ip_addr(), ByteCount::new(data_len as u64));
|
||||
|
||||
let out = network_result_try!(network_result_try!(timeout(timeout_ms, pnc.recv())
|
||||
.await
|
||||
@ -489,7 +489,7 @@ impl Network {
|
||||
.wrap_err("recv failure")?);
|
||||
|
||||
self.network_manager()
|
||||
.stats_packet_rcvd(dial_info.to_ip_addr(), out.len() as u64);
|
||||
.stats_packet_rcvd(dial_info.to_ip_addr(), ByteCount::new(out.len() as u64));
|
||||
|
||||
Ok(NetworkResult::Value(out))
|
||||
}
|
||||
@ -519,7 +519,7 @@ impl Network {
|
||||
|
||||
// Network accounting
|
||||
self.network_manager()
|
||||
.stats_packet_sent(peer_socket_addr.ip(), data_len as u64);
|
||||
.stats_packet_sent(peer_socket_addr.ip(), ByteCount::new(data_len as u64));
|
||||
|
||||
// Data was consumed
|
||||
return Ok(None);
|
||||
@ -536,7 +536,7 @@ impl Network {
|
||||
// Network accounting
|
||||
self.network_manager().stats_packet_sent(
|
||||
descriptor.remote().to_socket_addr().ip(),
|
||||
data_len as u64,
|
||||
ByteCount::new(data_len as u64),
|
||||
);
|
||||
|
||||
// Data was consumed
|
||||
@ -595,7 +595,7 @@ impl Network {
|
||||
|
||||
// Network accounting
|
||||
self.network_manager()
|
||||
.stats_packet_sent(dial_info.to_ip_addr(), data_len as u64);
|
||||
.stats_packet_sent(dial_info.to_ip_addr(), ByteCount::new(data_len as u64));
|
||||
|
||||
Ok(NetworkResult::value(connection_descriptor))
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ impl Network {
|
||||
// Network accounting
|
||||
network_manager.stats_packet_rcvd(
|
||||
descriptor.remote_address().to_ip_addr(),
|
||||
size as u64,
|
||||
ByteCount::new(size as u64),
|
||||
);
|
||||
|
||||
// Pass it up for processing
|
||||
|
@ -13,7 +13,7 @@ impl NetworkManager {
|
||||
.set_routine(move |s, l, t| {
|
||||
Box::pin(
|
||||
this.clone()
|
||||
.rolling_transfers_task_routine(s, l, t)
|
||||
.rolling_transfers_task_routine(s, Timestamp::new(l), Timestamp::new(t))
|
||||
.instrument(trace_span!(
|
||||
parent: None,
|
||||
"NetworkManager rolling transfers task routine"
|
||||
@ -30,7 +30,11 @@ impl NetworkManager {
|
||||
.set_routine(move |s, l, t| {
|
||||
Box::pin(
|
||||
this.clone()
|
||||
.public_address_check_task_routine(s, l, t)
|
||||
.public_address_check_task_routine(
|
||||
s,
|
||||
Timestamp::new(l),
|
||||
Timestamp::new(t),
|
||||
)
|
||||
.instrument(trace_span!(
|
||||
parent: None,
|
||||
"public address check task routine"
|
||||
|
@ -384,11 +384,11 @@ impl BucketEntryInner {
|
||||
rti: &RoutingTableInner,
|
||||
only_live: bool,
|
||||
filter: Option<NodeRefFilter>,
|
||||
) -> Vec<(ConnectionDescriptor, u64)> {
|
||||
) -> Vec<(ConnectionDescriptor, Timestamp)> {
|
||||
let connection_manager =
|
||||
rti.unlocked_inner.network_manager.connection_manager();
|
||||
|
||||
let mut out: Vec<(ConnectionDescriptor, u64)> = self
|
||||
let mut out: Vec<(ConnectionDescriptor, Timestamp)> = self
|
||||
.last_connections
|
||||
.iter()
|
||||
.filter_map(|(k, v)| {
|
||||
@ -432,7 +432,7 @@ impl BucketEntryInner {
|
||||
// If this is not connection oriented, then we check our last seen time
|
||||
// to see if this mapping has expired (beyond our timeout)
|
||||
let cur_ts = get_aligned_timestamp();
|
||||
(v.1 + (CONNECTIONLESS_TIMEOUT_SECS as u64 * 1_000_000u64)) >= cur_ts
|
||||
(v.1 + TimestampDuration::new(CONNECTIONLESS_TIMEOUT_SECS as u64 * 1_000_000u64)) >= cur_ts
|
||||
};
|
||||
|
||||
if alive {
|
||||
@ -554,7 +554,7 @@ impl BucketEntryInner {
|
||||
match self.peer_stats.rpc_stats.first_consecutive_seen_ts {
|
||||
None => false,
|
||||
Some(ts) => {
|
||||
cur_ts.saturating_sub(ts) >= (UNRELIABLE_PING_SPAN_SECS as u64 * 1000000u64)
|
||||
cur_ts.saturating_sub(ts) >= TimestampDuration::new(UNRELIABLE_PING_SPAN_SECS as u64 * 1000000u64)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -569,7 +569,7 @@ impl BucketEntryInner {
|
||||
match self.peer_stats.rpc_stats.last_seen_ts {
|
||||
None => self.peer_stats.rpc_stats.recent_lost_answers < NEVER_REACHED_PING_COUNT,
|
||||
Some(ts) => {
|
||||
cur_ts.saturating_sub(ts) >= (UNRELIABLE_PING_SPAN_SECS as u64 * 1000000u64)
|
||||
cur_ts.saturating_sub(ts) >= TimestampDuration::new(UNRELIABLE_PING_SPAN_SECS as u64 * 1000000u64)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -582,7 +582,7 @@ impl BucketEntryInner {
|
||||
.max(self.peer_stats.rpc_stats.last_question_ts)
|
||||
}
|
||||
|
||||
fn needs_constant_ping(&self, cur_ts: Timestamp, interval: Timestamp) -> bool {
|
||||
fn needs_constant_ping(&self, cur_ts: Timestamp, interval_us: TimestampDuration) -> bool {
|
||||
// If we have not either seen the node in the last 'interval' then we should ping it
|
||||
let latest_contact_time = self.latest_contact_time();
|
||||
|
||||
@ -590,7 +590,7 @@ impl BucketEntryInner {
|
||||
None => true,
|
||||
Some(latest_contact_time) => {
|
||||
// If we haven't done anything with this node in 'interval' seconds
|
||||
cur_ts.saturating_sub(latest_contact_time) >= (interval * 1000000u64)
|
||||
cur_ts.saturating_sub(latest_contact_time) >= interval_us
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -603,7 +603,7 @@ impl BucketEntryInner {
|
||||
// If this entry needs a keepalive (like a relay node),
|
||||
// then we should ping it regularly to keep our association alive
|
||||
if needs_keepalive {
|
||||
return self.needs_constant_ping(cur_ts, KEEPALIVE_PING_INTERVAL_SECS as u64);
|
||||
return self.needs_constant_ping(cur_ts, TimestampDuration::new(KEEPALIVE_PING_INTERVAL_SECS as u64 * 1000000u64));
|
||||
}
|
||||
|
||||
// If we don't have node status for this node, then we should ping it to get some node status
|
||||
@ -636,8 +636,8 @@ impl BucketEntryInner {
|
||||
latest_contact_time.saturating_sub(start_of_reliable_time);
|
||||
|
||||
retry_falloff_log(
|
||||
reliable_last,
|
||||
reliable_cur,
|
||||
reliable_last.as_u64(),
|
||||
reliable_cur.as_u64(),
|
||||
RELIABLE_PING_INTERVAL_START_SECS as u64 * 1_000_000u64,
|
||||
RELIABLE_PING_INTERVAL_MAX_SECS as u64 * 1_000_000u64,
|
||||
RELIABLE_PING_INTERVAL_MULTIPLIER,
|
||||
@ -647,7 +647,7 @@ impl BucketEntryInner {
|
||||
}
|
||||
BucketEntryState::Unreliable => {
|
||||
// If we are in an unreliable state, we need a ping every UNRELIABLE_PING_INTERVAL_SECS seconds
|
||||
self.needs_constant_ping(cur_ts, UNRELIABLE_PING_INTERVAL_SECS as u64)
|
||||
self.needs_constant_ping(cur_ts, TimestampDuration::new(UNRELIABLE_PING_INTERVAL_SECS as u64 * 1000000u64))
|
||||
}
|
||||
BucketEntryState::Dead => false,
|
||||
}
|
||||
@ -673,7 +673,7 @@ impl BucketEntryInner {
|
||||
{
|
||||
format!(
|
||||
"{}s ago",
|
||||
timestamp_to_secs(cur_ts.saturating_sub(first_consecutive_seen_ts))
|
||||
timestamp_to_secs(cur_ts.saturating_sub(first_consecutive_seen_ts).as_u64())
|
||||
)
|
||||
} else {
|
||||
"never".to_owned()
|
||||
@ -681,7 +681,7 @@ impl BucketEntryInner {
|
||||
let last_seen_ts_str = if let Some(last_seen_ts) = self.peer_stats.rpc_stats.last_seen_ts {
|
||||
format!(
|
||||
"{}s ago",
|
||||
timestamp_to_secs(cur_ts.saturating_sub(last_seen_ts))
|
||||
timestamp_to_secs(cur_ts.saturating_sub(last_seen_ts).as_u64())
|
||||
)
|
||||
} else {
|
||||
"never".to_owned()
|
||||
@ -698,7 +698,7 @@ impl BucketEntryInner {
|
||||
////////////////////////////////////////////////////////////////
|
||||
/// Called when rpc processor things happen
|
||||
|
||||
pub(super) fn question_sent(&mut self, ts: Timestamp, bytes: u64, expects_answer: bool) {
|
||||
pub(super) fn question_sent(&mut self, ts: Timestamp, bytes: ByteCount, expects_answer: bool) {
|
||||
self.transfer_stats_accounting.add_up(bytes);
|
||||
self.peer_stats.rpc_stats.messages_sent += 1;
|
||||
self.peer_stats.rpc_stats.failed_to_send = 0;
|
||||
@ -707,7 +707,7 @@ impl BucketEntryInner {
|
||||
self.peer_stats.rpc_stats.last_question_ts = Some(ts);
|
||||
}
|
||||
}
|
||||
pub(super) fn question_rcvd(&mut self, ts: Timestamp, bytes: u64) {
|
||||
pub(super) fn question_rcvd(&mut self, ts: Timestamp, bytes: ByteCount) {
|
||||
self.transfer_stats_accounting.add_down(bytes);
|
||||
self.peer_stats.rpc_stats.messages_rcvd += 1;
|
||||
self.touch_last_seen(ts);
|
||||
@ -755,12 +755,12 @@ impl BucketEntry {
|
||||
updated_since_last_network_change: false,
|
||||
last_connections: BTreeMap::new(),
|
||||
local_network: BucketEntryLocalNetwork {
|
||||
last_seen_our_node_info_ts: 0,
|
||||
last_seen_our_node_info_ts: Timestamp::new(0u64),
|
||||
signed_node_info: None,
|
||||
node_status: None,
|
||||
},
|
||||
public_internet: BucketEntryPublicInternet {
|
||||
last_seen_our_node_info_ts: 0,
|
||||
last_seen_our_node_info_ts: Timestamp::new(0u64),
|
||||
signed_node_info: None,
|
||||
node_status: None,
|
||||
},
|
||||
|
@ -402,7 +402,7 @@ impl RoutingTable {
|
||||
}
|
||||
|
||||
/// Return our current node info timestamp
|
||||
pub fn get_own_node_info_ts(&self, routing_domain: RoutingDomain) -> Option<u64> {
|
||||
pub fn get_own_node_info_ts(&self, routing_domain: RoutingDomain) -> Option<Timestamp> {
|
||||
self.inner.read().get_own_node_info_ts(routing_domain)
|
||||
}
|
||||
|
||||
|
@ -7,7 +7,7 @@ use rkyv::{
|
||||
/// The size of the remote private route cache
|
||||
const REMOTE_PRIVATE_ROUTE_CACHE_SIZE: usize = 1024;
|
||||
/// Remote private route cache entries expire in 5 minutes if they haven't been used
|
||||
const REMOTE_PRIVATE_ROUTE_CACHE_EXPIRY: TimestampDuration = 300_000_000u64.into();
|
||||
const REMOTE_PRIVATE_ROUTE_CACHE_EXPIRY: TimestampDuration = TimestampDuration::new(300_000_000u64);
|
||||
/// Amount of time a route can remain idle before it gets tested
|
||||
const ROUTE_MIN_IDLE_TIME_MS: u32 = 30_000;
|
||||
|
||||
@ -39,16 +39,16 @@ pub struct RouteStats {
|
||||
#[with(Skip)]
|
||||
pub questions_lost: u32,
|
||||
/// Timestamp of when the route was created
|
||||
pub created_ts: u64,
|
||||
pub created_ts: Timestamp,
|
||||
/// Timestamp of when the route was last checked for validity
|
||||
#[with(Skip)]
|
||||
pub last_tested_ts: Option<u64>,
|
||||
pub last_tested_ts: Option<Timestamp>,
|
||||
/// Timestamp of when the route was last sent to
|
||||
#[with(Skip)]
|
||||
pub last_sent_ts: Option<u64>,
|
||||
pub last_sent_ts: Option<Timestamp>,
|
||||
/// Timestamp of when the route was last received over
|
||||
#[with(Skip)]
|
||||
pub last_received_ts: Option<u64>,
|
||||
pub last_received_ts: Option<Timestamp>,
|
||||
/// Transfers up and down
|
||||
pub transfer_stats_down_up: TransferStatsDownUp,
|
||||
/// Latency stats
|
||||
@ -63,7 +63,7 @@ pub struct RouteStats {
|
||||
|
||||
impl RouteStats {
|
||||
/// Make new route stats
|
||||
pub fn new(created_ts: u64) -> Self {
|
||||
pub fn new(created_ts: Timestamp) -> Self {
|
||||
Self {
|
||||
created_ts,
|
||||
..Default::default()
|
||||
@ -143,7 +143,9 @@ impl RouteStats {
|
||||
// Has the route been tested within the idle time we'd want to check things?
|
||||
// (also if we've received successfully over the route, this will get set)
|
||||
if let Some(last_tested_ts) = self.last_tested_ts {
|
||||
if cur_ts.saturating_sub(last_tested_ts) > (ROUTE_MIN_IDLE_TIME_MS as u64 * 1000u64) {
|
||||
if cur_ts.saturating_sub(last_tested_ts)
|
||||
> TimestampDuration::new(ROUTE_MIN_IDLE_TIME_MS as u64 * 1000u64)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
@ -210,9 +212,9 @@ pub struct RemotePrivateRouteInfo {
|
||||
// The private route itself
|
||||
private_route: Option<PrivateRoute>,
|
||||
/// Did this remote private route see our node info due to no safety route in use
|
||||
last_seen_our_node_info_ts: u64,
|
||||
last_seen_our_node_info_ts: Timestamp,
|
||||
/// Last time this remote private route was requested for any reason (cache expiration)
|
||||
last_touched_ts: u64,
|
||||
last_touched_ts: Timestamp,
|
||||
/// Stats
|
||||
stats: RouteStats,
|
||||
}
|
||||
@ -1630,7 +1632,7 @@ impl RouteSpecStore {
|
||||
.and_modify(|rpr| {
|
||||
if cur_ts.saturating_sub(rpr.last_touched_ts) >= REMOTE_PRIVATE_ROUTE_CACHE_EXPIRY {
|
||||
// Start fresh if this had expired
|
||||
rpr.last_seen_our_node_info_ts = 0;
|
||||
rpr.last_seen_our_node_info_ts = Timestamp::new(0);
|
||||
rpr.last_touched_ts = cur_ts;
|
||||
rpr.stats = RouteStats::new(cur_ts);
|
||||
} else {
|
||||
@ -1641,7 +1643,7 @@ impl RouteSpecStore {
|
||||
.or_insert_with(|| RemotePrivateRouteInfo {
|
||||
// New remote private route cache entry
|
||||
private_route: Some(private_route),
|
||||
last_seen_our_node_info_ts: 0,
|
||||
last_seen_our_node_info_ts: Timestamp::new(0),
|
||||
last_touched_ts: cur_ts,
|
||||
stats: RouteStats::new(cur_ts),
|
||||
});
|
||||
|
@ -262,7 +262,7 @@ impl RoutingTableInner {
|
||||
}
|
||||
|
||||
/// Return our current node info timestamp
|
||||
pub fn get_own_node_info_ts(&self, routing_domain: RoutingDomain) -> Option<u64> {
|
||||
pub fn get_own_node_info_ts(&self, routing_domain: RoutingDomain) -> Option<Timestamp> {
|
||||
self.with_routing_domain(routing_domain, |rdd| {
|
||||
if !rdd.common().has_valid_own_node_info() {
|
||||
None
|
||||
@ -440,7 +440,7 @@ impl RoutingTableInner {
|
||||
|
||||
pub fn with_entries<T, F: FnMut(&RoutingTableInner, DHTKey, Arc<BucketEntry>) -> Option<T>>(
|
||||
&self,
|
||||
cur_ts: u64,
|
||||
cur_ts: Timestamp,
|
||||
min_state: BucketEntryState,
|
||||
mut f: F,
|
||||
) -> Option<T> {
|
||||
@ -812,7 +812,7 @@ impl RoutingTableInner {
|
||||
pub fn find_peers_with_sort_and_filter<C, T, O>(
|
||||
&self,
|
||||
node_count: usize,
|
||||
cur_ts: u64,
|
||||
cur_ts: Timestamp,
|
||||
mut filters: VecDeque<RoutingTableEntryFilter>,
|
||||
mut compare: C,
|
||||
mut transform: T,
|
||||
|
@ -56,12 +56,12 @@ impl TransferStatsAccounting {
|
||||
|
||||
self.current_transfer = TransferCount::default();
|
||||
|
||||
transfer_stats.down.maximum = 0;
|
||||
transfer_stats.up.maximum = 0;
|
||||
transfer_stats.down.minimum = u64::MAX;
|
||||
transfer_stats.up.minimum = u64::MAX;
|
||||
transfer_stats.down.average = 0;
|
||||
transfer_stats.up.average = 0;
|
||||
transfer_stats.down.maximum = 0.into();
|
||||
transfer_stats.up.maximum = 0.into();
|
||||
transfer_stats.down.minimum = u64::MAX.into();
|
||||
transfer_stats.up.minimum = u64::MAX.into();
|
||||
transfer_stats.down.average = 0.into();
|
||||
transfer_stats.up.average = 0.into();
|
||||
for xfer in &self.rolling_transfers {
|
||||
let bpsd = xfer.down * 1000u64 / dur_ms;
|
||||
let bpsu = xfer.up * 1000u64 / dur_ms;
|
||||
@ -97,9 +97,9 @@ impl LatencyStatsAccounting {
|
||||
self.rolling_latencies.push_back(latency);
|
||||
|
||||
let mut ls = LatencyStats {
|
||||
fastest: u64::MAX,
|
||||
average: 0,
|
||||
slowest: 0,
|
||||
fastest: u64::MAX.into(),
|
||||
average: 0.into(),
|
||||
slowest: 0.into(),
|
||||
};
|
||||
for rl in &self.rolling_latencies {
|
||||
ls.fastest.min_assign(*rl);
|
||||
|
@ -18,7 +18,7 @@ impl RoutingTable {
|
||||
.set_routine(move |s, l, t| {
|
||||
Box::pin(
|
||||
this.clone()
|
||||
.rolling_transfers_task_routine(s, l, t)
|
||||
.rolling_transfers_task_routine(s, Timestamp::new(l), Timestamp::new(t))
|
||||
.instrument(trace_span!(
|
||||
parent: None,
|
||||
"RoutingTable rolling transfers task routine"
|
||||
@ -35,7 +35,7 @@ impl RoutingTable {
|
||||
.set_routine(move |s, l, t| {
|
||||
Box::pin(
|
||||
this.clone()
|
||||
.kick_buckets_task_routine(s, l, t)
|
||||
.kick_buckets_task_routine(s, Timestamp::new(l), Timestamp::new(t))
|
||||
.instrument(trace_span!(parent: None, "kick buckets task routine")),
|
||||
)
|
||||
});
|
||||
@ -80,7 +80,7 @@ impl RoutingTable {
|
||||
.set_routine(move |s, l, t| {
|
||||
Box::pin(
|
||||
this.clone()
|
||||
.ping_validator_task_routine(s, l, t)
|
||||
.ping_validator_task_routine(s, Timestamp::new(l), Timestamp::new(t))
|
||||
.instrument(trace_span!(parent: None, "ping validator task routine")),
|
||||
)
|
||||
});
|
||||
@ -94,7 +94,7 @@ impl RoutingTable {
|
||||
.set_routine(move |s, l, t| {
|
||||
Box::pin(
|
||||
this.clone()
|
||||
.relay_management_task_routine(s, l, t)
|
||||
.relay_management_task_routine(s, Timestamp::new(l), Timestamp::new(t))
|
||||
.instrument(trace_span!(parent: None, "relay management task routine")),
|
||||
)
|
||||
});
|
||||
@ -108,7 +108,11 @@ impl RoutingTable {
|
||||
.set_routine(move |s, l, t| {
|
||||
Box::pin(
|
||||
this.clone()
|
||||
.private_route_management_task_routine(s, l, t)
|
||||
.private_route_management_task_routine(
|
||||
s,
|
||||
Timestamp::new(l),
|
||||
Timestamp::new(t),
|
||||
)
|
||||
.instrument(trace_span!(
|
||||
parent: None,
|
||||
"private route management task routine"
|
||||
|
@ -91,7 +91,7 @@ impl RoutingTable {
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
fn ping_validator_local_network(
|
||||
&self,
|
||||
cur_ts: u64,
|
||||
cur_ts: Timestamp,
|
||||
unord: &mut FuturesUnordered<
|
||||
SendPinBoxFuture<Result<NetworkResult<Answer<Option<SenderInfo>>>, RPCError>>,
|
||||
>,
|
||||
@ -122,8 +122,8 @@ impl RoutingTable {
|
||||
pub(crate) async fn ping_validator_task_routine(
|
||||
self,
|
||||
stop_token: StopToken,
|
||||
_last_ts: u64,
|
||||
cur_ts: u64,
|
||||
_last_ts: Timestamp,
|
||||
cur_ts: Timestamp,
|
||||
) -> EyreResult<()> {
|
||||
let mut unord = FuturesUnordered::new();
|
||||
|
||||
|
@ -9,7 +9,7 @@ impl RPCOperationCancelTunnelQ {
|
||||
pub fn decode(
|
||||
reader: &veilid_capnp::operation_cancel_tunnel_q::Reader,
|
||||
) -> Result<RPCOperationCancelTunnelQ, RPCError> {
|
||||
let id = reader.get_id();
|
||||
let id = TunnelId::new(reader.get_id());
|
||||
|
||||
Ok(RPCOperationCancelTunnelQ { id })
|
||||
}
|
||||
@ -17,7 +17,7 @@ impl RPCOperationCancelTunnelQ {
|
||||
&self,
|
||||
builder: &mut veilid_capnp::operation_cancel_tunnel_q::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
builder.set_id(self.id);
|
||||
builder.set_id(self.id.as_u64());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -35,7 +35,7 @@ impl RPCOperationCancelTunnelA {
|
||||
) -> Result<RPCOperationCancelTunnelA, RPCError> {
|
||||
match reader.which().map_err(RPCError::protocol)? {
|
||||
veilid_capnp::operation_cancel_tunnel_a::Which::Tunnel(r) => {
|
||||
Ok(RPCOperationCancelTunnelA::Tunnel(r))
|
||||
Ok(RPCOperationCancelTunnelA::Tunnel(TunnelId::new(r)))
|
||||
}
|
||||
veilid_capnp::operation_cancel_tunnel_a::Which::Error(r) => {
|
||||
let tunnel_error = decode_tunnel_error(r.map_err(RPCError::protocol)?);
|
||||
@ -49,7 +49,7 @@ impl RPCOperationCancelTunnelA {
|
||||
) -> Result<(), RPCError> {
|
||||
match self {
|
||||
RPCOperationCancelTunnelA::Tunnel(p) => {
|
||||
builder.set_tunnel(*p);
|
||||
builder.set_tunnel(p.as_u64());
|
||||
}
|
||||
RPCOperationCancelTunnelA::Error(e) => {
|
||||
builder.set_error(encode_tunnel_error(*e));
|
||||
|
@ -12,7 +12,7 @@ impl RPCOperationCompleteTunnelQ {
|
||||
pub fn decode(
|
||||
reader: &veilid_capnp::operation_complete_tunnel_q::Reader,
|
||||
) -> Result<RPCOperationCompleteTunnelQ, RPCError> {
|
||||
let id = reader.get_id();
|
||||
let id = TunnelId::new(reader.get_id());
|
||||
let local_mode = match reader.get_local_mode().map_err(RPCError::protocol)? {
|
||||
veilid_capnp::TunnelEndpointMode::Raw => TunnelMode::Raw,
|
||||
veilid_capnp::TunnelEndpointMode::Turn => TunnelMode::Turn,
|
||||
@ -32,7 +32,7 @@ impl RPCOperationCompleteTunnelQ {
|
||||
&self,
|
||||
builder: &mut veilid_capnp::operation_complete_tunnel_q::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
builder.set_id(self.id);
|
||||
builder.set_id(self.id.as_u64());
|
||||
builder.set_local_mode(match self.local_mode {
|
||||
TunnelMode::Raw => veilid_capnp::TunnelEndpointMode::Raw,
|
||||
TunnelMode::Turn => veilid_capnp::TunnelEndpointMode::Turn,
|
||||
|
@ -11,7 +11,7 @@ impl RPCOperationStartTunnelQ {
|
||||
pub fn decode(
|
||||
reader: &veilid_capnp::operation_start_tunnel_q::Reader,
|
||||
) -> Result<RPCOperationStartTunnelQ, RPCError> {
|
||||
let id = reader.get_id();
|
||||
let id = TunnelId::new(reader.get_id());
|
||||
let local_mode = match reader.get_local_mode().map_err(RPCError::protocol)? {
|
||||
veilid_capnp::TunnelEndpointMode::Raw => TunnelMode::Raw,
|
||||
veilid_capnp::TunnelEndpointMode::Turn => TunnelMode::Turn,
|
||||
@ -28,7 +28,7 @@ impl RPCOperationStartTunnelQ {
|
||||
&self,
|
||||
builder: &mut veilid_capnp::operation_start_tunnel_q::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
builder.set_id(self.id);
|
||||
builder.set_id(self.id.as_u64());
|
||||
builder.set_local_mode(match self.local_mode {
|
||||
TunnelMode::Raw => veilid_capnp::TunnelEndpointMode::Raw,
|
||||
TunnelMode::Turn => veilid_capnp::TunnelEndpointMode::Turn,
|
||||
|
@ -58,8 +58,8 @@ pub fn encode_full_tunnel(
|
||||
full_tunnel: &FullTunnel,
|
||||
builder: &mut veilid_capnp::full_tunnel::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
builder.set_id(full_tunnel.id);
|
||||
builder.set_timeout(full_tunnel.timeout);
|
||||
builder.set_id(full_tunnel.id.as_u64());
|
||||
builder.set_timeout(full_tunnel.timeout.as_u64());
|
||||
let mut l_builder = builder.reborrow().init_local();
|
||||
encode_tunnel_endpoint(&full_tunnel.local, &mut l_builder)?;
|
||||
let mut r_builder = builder.reborrow().init_remote();
|
||||
@ -70,8 +70,8 @@ pub fn encode_full_tunnel(
|
||||
pub fn decode_full_tunnel(
|
||||
reader: &veilid_capnp::full_tunnel::Reader,
|
||||
) -> Result<FullTunnel, RPCError> {
|
||||
let id = reader.get_id();
|
||||
let timeout = reader.get_timeout();
|
||||
let id = TunnelId::new(reader.get_id());
|
||||
let timeout = TimestampDuration::new(reader.get_timeout());
|
||||
let l_reader = reader.get_local().map_err(RPCError::protocol)?;
|
||||
let local = decode_tunnel_endpoint(&l_reader)?;
|
||||
let r_reader = reader.get_remote().map_err(RPCError::protocol)?;
|
||||
@ -89,8 +89,8 @@ pub fn encode_partial_tunnel(
|
||||
partial_tunnel: &PartialTunnel,
|
||||
builder: &mut veilid_capnp::partial_tunnel::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
builder.set_id(partial_tunnel.id);
|
||||
builder.set_timeout(partial_tunnel.timeout);
|
||||
builder.set_id(partial_tunnel.id.as_u64());
|
||||
builder.set_timeout(partial_tunnel.timeout.as_u64());
|
||||
let mut l_builder = builder.reborrow().init_local();
|
||||
encode_tunnel_endpoint(&partial_tunnel.local, &mut l_builder)?;
|
||||
Ok(())
|
||||
@ -99,8 +99,8 @@ pub fn encode_partial_tunnel(
|
||||
pub fn decode_partial_tunnel(
|
||||
reader: &veilid_capnp::partial_tunnel::Reader,
|
||||
) -> Result<PartialTunnel, RPCError> {
|
||||
let id = reader.get_id();
|
||||
let timeout = reader.get_timeout();
|
||||
let id = TunnelId::new(reader.get_id());
|
||||
let timeout = TimestampDuration::new(reader.get_timeout());
|
||||
let l_reader = reader.get_local().map_err(RPCError::protocol)?;
|
||||
let local = decode_tunnel_endpoint(&l_reader)?;
|
||||
|
||||
|
@ -254,7 +254,7 @@ impl RPCProcessor {
|
||||
// set up channel
|
||||
let mut concurrency = c.network.rpc.concurrency;
|
||||
let queue_size = c.network.rpc.queue_size;
|
||||
let timeout = ms_to_us(c.network.rpc.timeout_ms);
|
||||
let timeout_us = TimestampDuration::new(ms_to_us(c.network.rpc.timeout_ms));
|
||||
let max_route_hop_count = c.network.rpc.max_route_hop_count as usize;
|
||||
if concurrency == 0 {
|
||||
concurrency = get_concurrency() / 2;
|
||||
@ -265,7 +265,7 @@ impl RPCProcessor {
|
||||
let validate_dial_info_receipt_time_ms = c.network.dht.validate_dial_info_receipt_time_ms;
|
||||
|
||||
RPCProcessorUnlockedInner {
|
||||
timeout_us: timeout,
|
||||
timeout_us,
|
||||
queue_size,
|
||||
concurrency,
|
||||
max_route_hop_count,
|
||||
@ -879,8 +879,8 @@ impl RPCProcessor {
|
||||
let rss = self.routing_table.route_spec_store();
|
||||
|
||||
// Get latency for all local routes
|
||||
let mut total_local_latency = 0u64;
|
||||
let total_latency = recv_ts.saturating_sub(send_ts);
|
||||
let mut total_local_latency = TimestampDuration::new(0u64);
|
||||
let total_latency: TimestampDuration = recv_ts.saturating_sub(send_ts);
|
||||
|
||||
// If safety route was used, record route there
|
||||
if let Some(sr_pubkey) = &safety_route {
|
||||
@ -932,12 +932,12 @@ impl RPCProcessor {
|
||||
if let Some(sr_pubkey) = &safety_route {
|
||||
let rss = self.routing_table.route_spec_store();
|
||||
rss.with_route_stats(send_ts, sr_pubkey, |s| {
|
||||
s.record_latency(total_latency / 2);
|
||||
s.record_latency(total_latency / 2u64);
|
||||
});
|
||||
}
|
||||
if let Some(pr_pubkey) = &reply_private_route {
|
||||
rss.with_route_stats(send_ts, pr_pubkey, |s| {
|
||||
s.record_latency(total_latency / 2);
|
||||
s.record_latency(total_latency / 2u64);
|
||||
});
|
||||
}
|
||||
}
|
||||
@ -1365,7 +1365,7 @@ impl RPCProcessor {
|
||||
routing_domain,
|
||||
}),
|
||||
timestamp: get_aligned_timestamp(),
|
||||
body_len: body.len() as u64,
|
||||
body_len: ByteCount::new(body.len() as u64),
|
||||
},
|
||||
data: RPCMessageData { contents: body },
|
||||
};
|
||||
|
@ -89,6 +89,12 @@ impl<Rhs: Into<u64>> core::ops::Mul<Rhs> for AlignedU64 {
|
||||
}
|
||||
}
|
||||
|
||||
impl<Rhs: Into<u64>> core::ops::MulAssign<Rhs> for AlignedU64 {
|
||||
fn mul_assign(&mut self, rhs: Rhs) {
|
||||
self.0 *= rhs.into();
|
||||
}
|
||||
}
|
||||
|
||||
impl<Rhs: Into<u64>> core::ops::Div<Rhs> for AlignedU64 {
|
||||
type Output = Self;
|
||||
|
||||
@ -97,6 +103,12 @@ impl<Rhs: Into<u64>> core::ops::Div<Rhs> for AlignedU64 {
|
||||
}
|
||||
}
|
||||
|
||||
impl<Rhs: Into<u64>> core::ops::DivAssign<Rhs> for AlignedU64 {
|
||||
fn div_assign(&mut self, rhs: Rhs) {
|
||||
self.0 /= rhs.into();
|
||||
}
|
||||
}
|
||||
|
||||
impl AlignedU64 {
|
||||
pub const fn new(v: u64) -> Self {
|
||||
Self(v)
|
||||
|
@ -1961,7 +1961,7 @@ impl SignedRelayedNodeInfo {
|
||||
sig_bytes.append(&mut builder_to_vec(ri_msg).map_err(VeilidAPIError::internal)?);
|
||||
|
||||
// Add timestamp to signature
|
||||
sig_bytes.append(&mut timestamp.to_le_bytes().to_vec());
|
||||
sig_bytes.append(&mut timestamp.as_u64().to_le_bytes().to_vec());
|
||||
|
||||
Ok(sig_bytes)
|
||||
}
|
||||
|
@ -261,7 +261,7 @@ impl veilid_server::Server for VeilidServerImpl {
|
||||
) -> Promise<(), ::capnp::Error> {
|
||||
trace!("VeilidServerImpl::app_call_reply");
|
||||
|
||||
let id = pry!(params.get()).get_id();
|
||||
let id = OperationId::new(pry!(params.get()).get_id());
|
||||
let message = pry!(pry!(params.get()).get_message()).to_owned();
|
||||
|
||||
let veilid_api = self.veilid_api.clone();
|
||||
|
Loading…
Reference in New Issue
Block a user