mirror of
https://gitlab.com/veilid/veilid.git
synced 2024-11-22 08:56:58 -06:00
remove lease manager, add network class to infoq, other config stuff
This commit is contained in:
parent
53cd521ba8
commit
a33473d8ea
12
Cargo.lock
generated
12
Cargo.lock
generated
@ -269,17 +269,6 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-recursion"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2cda8f4bcc10624c4e85bc66b3f452cca98cfa5ca002dc83a16aad2367641bea"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-std"
|
||||
version = "1.10.0"
|
||||
@ -4187,7 +4176,6 @@ dependencies = [
|
||||
"android_logger",
|
||||
"anyhow",
|
||||
"async-lock",
|
||||
"async-recursion",
|
||||
"async-std",
|
||||
"async-tls",
|
||||
"async-tungstenite 0.17.1",
|
||||
|
2
external/hashlink
vendored
2
external/hashlink
vendored
@ -1 +1 @@
|
||||
Subproject commit 9841db4f9a8b4d6bd46af78927cd88e70befd3f4
|
||||
Subproject commit c8da3a58485c850f4029a58de99b1af83112ba8a
|
@ -34,7 +34,6 @@ directories = "^4"
|
||||
once_cell = "^1"
|
||||
json = "^0"
|
||||
flume = { version = "^0", features = ["async"] }
|
||||
async-recursion = "^1"
|
||||
|
||||
ed25519-dalek = { version = "^1", default_features = false, features = ["alloc", "u64_backend"] }
|
||||
x25519-dalek = { package = "x25519-dalek-ng", version = "^1", default_features = false, features = ["u64_backend"] }
|
||||
|
@ -163,23 +163,28 @@ struct ValueData {
|
||||
##############################
|
||||
|
||||
struct OperationInfoQ {
|
||||
nodeInfo @0 :NodeInfo; # node info update about the infoq sender
|
||||
}
|
||||
|
||||
|
||||
enum NetworkClass {
|
||||
server @0; # S = Device with public IP and no UDP firewall
|
||||
mapped @1; # M = Device with portmap behind any NAT
|
||||
fullConeNAT @2; # F = Device without portmap behind full-cone NAT
|
||||
addressRestrictedNAT @3; # R1 = Device without portmap behind address-only restricted NAT
|
||||
portRestrictedNAT @4; # R2 = Device without portmap behind address-and-port restricted NAT
|
||||
outboundOnly @5; # O = Outbound only
|
||||
webApp @6; # W = PWA in either normal or tor web browser
|
||||
invalid @7; # X = Invalid
|
||||
}
|
||||
|
||||
struct NodeInfo {
|
||||
canRoute @0 :Bool;
|
||||
networkClass @0 :NetworkClass;
|
||||
willRoute @1 :Bool;
|
||||
|
||||
canTunnel @2 :Bool;
|
||||
willTunnel @3 :Bool;
|
||||
|
||||
canSignalLease @4 :Bool;
|
||||
willSignalLease @5 :Bool;
|
||||
|
||||
canRelayLease @6 :Bool;
|
||||
willRelayLease @7 :Bool;
|
||||
|
||||
canValidateDialInfo @8 :Bool;
|
||||
willValidateDialInfo @9 :Bool;
|
||||
willTunnel @2 :Bool;
|
||||
willSignal @3 :Bool;
|
||||
willRelay @4 :Bool;
|
||||
willValidateDialInfo @5 :Bool;
|
||||
}
|
||||
|
||||
struct SenderInfo {
|
||||
@ -368,8 +373,7 @@ struct Operation {
|
||||
|
||||
respondTo :union {
|
||||
none @1 :Void; # no response is desired
|
||||
sender @2 :DialInfo; # (Optional) envelope sender node id to be used for reply
|
||||
# possibly through a relay if the request arrived that way
|
||||
sender @2 :DialInfo; # (Optional) the 'best' envelope-sender dial info to be used for reply (others may exist via findNodeQ)
|
||||
privateRoute @3 :PrivateRoute; # embedded private route to be used for reply
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
use super::*;
|
||||
|
||||
use crate::intf::*;
|
||||
use crate::network_manager::*;
|
||||
use crate::routing_table::*;
|
||||
use crate::*;
|
||||
|
||||
@ -178,7 +177,7 @@ impl Network {
|
||||
routing_table.register_dial_info(
|
||||
external1_dial_info,
|
||||
DialInfoOrigin::Discovered,
|
||||
Some(NetworkClass::FullNAT),
|
||||
Some(NetworkClass::FullConeNAT),
|
||||
);
|
||||
|
||||
// No more retries
|
||||
|
@ -1,194 +0,0 @@
|
||||
use crate::*;
|
||||
use network_manager::*;
|
||||
use routing_table::*;
|
||||
use xx::*;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub enum LeaseKind {
|
||||
Signal,
|
||||
Relay,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub enum RelayMode {
|
||||
Disabled,
|
||||
Inbound,
|
||||
Full,
|
||||
}
|
||||
|
||||
pub struct LeaseDetails {}
|
||||
|
||||
pub struct LeaseManagerInner {
|
||||
network_manager: NetworkManager,
|
||||
max_server_signal_leases: usize,
|
||||
max_server_relay_leases: usize,
|
||||
max_client_signal_leases: usize,
|
||||
max_client_relay_leases: usize,
|
||||
// server_signal_leases: BTreeMap< //xxx :how will these be accounted for?
|
||||
client_relay_mode: RelayMode,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct LeaseManager {
|
||||
inner: Arc<Mutex<LeaseManagerInner>>,
|
||||
}
|
||||
|
||||
impl LeaseManager {
|
||||
fn new_inner(network_manager: NetworkManager) -> LeaseManagerInner {
|
||||
LeaseManagerInner {
|
||||
network_manager,
|
||||
max_server_signal_leases: 1,
|
||||
max_server_relay_leases: 1,
|
||||
max_client_signal_leases: 1,
|
||||
max_client_relay_leases: 1,
|
||||
client_relay_mode: RelayMode::Disabled,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(network_manager: NetworkManager) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(Mutex::new(Self::new_inner(network_manager))),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn network_manager(&self) -> NetworkManager {
|
||||
self.inner.lock().network_manager.clone()
|
||||
}
|
||||
pub async fn startup(&self) -> Result<(), String> {
|
||||
trace!("startup lease manager");
|
||||
// Retrieve config
|
||||
{
|
||||
let mut inner = self.inner.lock();
|
||||
let config = inner.network_manager.config();
|
||||
let c = config.get();
|
||||
inner.max_server_signal_leases = c.network.leases.max_server_signal_leases as usize;
|
||||
inner.max_server_relay_leases = c.network.leases.max_server_relay_leases as usize;
|
||||
inner.max_client_signal_leases = c.network.leases.max_client_signal_leases as usize;
|
||||
inner.max_client_relay_leases = c.network.leases.max_client_relay_leases as usize;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
pub async fn tick(&self) -> Result<(), String> {
|
||||
//
|
||||
Ok(())
|
||||
}
|
||||
pub async fn shutdown(&self) {
|
||||
let network_manager = self.network_manager();
|
||||
*self.inner.lock() = Self::new_inner(network_manager);
|
||||
}
|
||||
|
||||
////////////////////////////////
|
||||
// Client-side
|
||||
|
||||
// xxx: this should automatically get set when a lease is obtained and reset when it is released or lost or expires
|
||||
// pub fn client_set_relay_mode(&self, relay_mode: RelayMode) {
|
||||
// self.inner.lock().client_relay_mode = relay_mode;
|
||||
// }
|
||||
|
||||
pub fn client_get_relay_mode(&self) -> RelayMode {
|
||||
self.inner.lock().client_relay_mode
|
||||
}
|
||||
|
||||
pub fn client_is_relay_peer_addr(&self, _peer_addr: PeerAddress) -> bool {
|
||||
error!("unimplemented");
|
||||
false
|
||||
}
|
||||
|
||||
pub async fn client_request_lease(&self) -> Result<(), String> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
////////////////////////////////
|
||||
// Server-side
|
||||
|
||||
// Signal leases
|
||||
pub fn server_has_valid_signal_lease(&self, _recipient_id: &DHTKey) -> Option<NodeRef> {
|
||||
error!("unimplemented");
|
||||
None
|
||||
}
|
||||
pub fn server_can_provide_signal_lease(&self) -> bool {
|
||||
let inner = self.inner.lock();
|
||||
if inner.max_server_signal_leases == 0 {
|
||||
return false;
|
||||
}
|
||||
if let Some(network_class) = inner.network_manager.get_network_class() {
|
||||
match network_class {
|
||||
NetworkClass::Server => true,
|
||||
NetworkClass::Mapped => true,
|
||||
NetworkClass::FullNAT => true,
|
||||
NetworkClass::AddressRestrictedNAT => false,
|
||||
NetworkClass::PortRestrictedNAT => false,
|
||||
NetworkClass::OutboundOnly => false,
|
||||
NetworkClass::WebApp => false,
|
||||
NetworkClass::TorWebApp => false,
|
||||
NetworkClass::Invalid => false,
|
||||
}
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
pub fn server_will_provide_signal_lease(&self) -> bool {
|
||||
if !self.server_can_provide_signal_lease() {
|
||||
return false;
|
||||
}
|
||||
let inner = self.inner.lock();
|
||||
if inner.max_server_signal_leases == 0 {
|
||||
return false;
|
||||
}
|
||||
// xxx: check total number of signal leases active...
|
||||
// xxx: depends on who is asking?
|
||||
// signaling requires inbound ability, so check to see if we have public dial info
|
||||
let routing_table = inner.network_manager.routing_table();
|
||||
if !routing_table.has_global_dial_info() {
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
// Relay leases
|
||||
pub fn server_has_valid_relay_lease(&self, _recipient_id: &DHTKey) -> Option<NodeRef> {
|
||||
error!("unimplemented");
|
||||
None
|
||||
}
|
||||
pub fn server_can_provide_relay_lease(&self) -> bool {
|
||||
let inner = self.inner.lock();
|
||||
if inner.max_server_signal_leases == 0 {
|
||||
return false;
|
||||
}
|
||||
if let Some(network_class) = inner.network_manager.get_network_class() {
|
||||
match network_class {
|
||||
NetworkClass::Server => true,
|
||||
NetworkClass::Mapped => true,
|
||||
NetworkClass::FullNAT => true,
|
||||
NetworkClass::AddressRestrictedNAT => false,
|
||||
NetworkClass::PortRestrictedNAT => false,
|
||||
NetworkClass::OutboundOnly => false,
|
||||
NetworkClass::WebApp => false,
|
||||
NetworkClass::TorWebApp => false,
|
||||
NetworkClass::Invalid => false,
|
||||
}
|
||||
} else {
|
||||
false
|
||||
}
|
||||
// xxx: also depends on network strength / bandwidth availability?
|
||||
}
|
||||
pub fn server_will_provide_relay_lease(&self) -> bool {
|
||||
if !self.server_can_provide_relay_lease() {
|
||||
return false;
|
||||
}
|
||||
let inner = self.inner.lock();
|
||||
if inner.max_server_relay_leases == 0 {
|
||||
return false;
|
||||
}
|
||||
// xxx: check total number of signal leases active...
|
||||
// xxx: depends on who is asking?
|
||||
// relaying requires inbound ability, so check to see if we have public dial info
|
||||
let routing_table = inner.network_manager.routing_table();
|
||||
if !routing_table.has_global_dial_info() {
|
||||
return false;
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
@ -12,7 +12,6 @@ mod connection_table;
|
||||
mod core_context;
|
||||
mod dht;
|
||||
mod intf;
|
||||
mod lease_manager;
|
||||
mod network_connection;
|
||||
mod network_manager;
|
||||
mod receipt_manager;
|
||||
|
@ -59,7 +59,7 @@ impl DummyNetworkConnection {
|
||||
pub struct NetworkConnectionStats {
|
||||
last_message_sent_time: Option<u64>,
|
||||
last_message_recv_time: Option<u64>,
|
||||
established_time: u64,
|
||||
_established_time: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@ -92,7 +92,7 @@ impl NetworkConnection {
|
||||
stats: NetworkConnectionStats {
|
||||
last_message_sent_time: None,
|
||||
last_message_recv_time: None,
|
||||
established_time: intf::get_timestamp(),
|
||||
_established_time: intf::get_timestamp(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -3,7 +3,6 @@ use connection_manager::*;
|
||||
use dht::*;
|
||||
use hashlink::LruCache;
|
||||
use intf::*;
|
||||
use lease_manager::*;
|
||||
use receipt_manager::*;
|
||||
use routing_table::*;
|
||||
use rpc_processor::RPCProcessor;
|
||||
@ -15,32 +14,6 @@ pub const MAX_MESSAGE_SIZE: usize = MAX_ENVELOPE_SIZE;
|
||||
pub const IPADDR_TABLE_SIZE: usize = 1024;
|
||||
pub const IPADDR_MAX_INACTIVE_DURATION_US: u64 = 300_000_000u64; // 5 minutes
|
||||
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
|
||||
pub enum NetworkClass {
|
||||
Server = 0, // S = Device with public IP and no UDP firewall
|
||||
Mapped = 1, // M = Device with portmap behind any NAT
|
||||
FullNAT = 2, // F = Device without portmap behind full-cone NAT
|
||||
AddressRestrictedNAT = 3, // R1 = Device without portmap behind address-only restricted NAT
|
||||
PortRestrictedNAT = 4, // R2 = Device without portmap behind address-and-port restricted NAT
|
||||
OutboundOnly = 5, // O = Outbound only
|
||||
WebApp = 6, // W = PWA in normal web browser
|
||||
TorWebApp = 7, // T = PWA in Tor browser
|
||||
Invalid = 8, // I = Invalid network class, unreachable or can not send packets
|
||||
}
|
||||
|
||||
impl NetworkClass {
|
||||
pub fn inbound_capable(&self) -> bool {
|
||||
matches!(
|
||||
self,
|
||||
Self::Server
|
||||
| Self::Mapped
|
||||
| Self::FullNAT
|
||||
| Self::AddressRestrictedNAT
|
||||
| Self::PortRestrictedNAT
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Default)]
|
||||
pub struct ProtocolConfig {
|
||||
pub udp_enabled: bool,
|
||||
@ -78,7 +51,6 @@ struct NetworkComponents {
|
||||
net: Network,
|
||||
connection_manager: ConnectionManager,
|
||||
rpc_processor: RPCProcessor,
|
||||
lease_manager: LeaseManager,
|
||||
receipt_manager: ReceiptManager,
|
||||
}
|
||||
|
||||
@ -114,12 +86,17 @@ impl Default for NetworkManagerStats {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ClientWhitelistEntry {
|
||||
last_seen: u64,
|
||||
}
|
||||
// The mutable state of the network manager
|
||||
struct NetworkManagerInner {
|
||||
routing_table: Option<RoutingTable>,
|
||||
components: Option<NetworkComponents>,
|
||||
network_class: Option<NetworkClass>,
|
||||
stats: NetworkManagerStats,
|
||||
client_whitelist: LruCache<key::DHTKey, ClientWhitelistEntry>,
|
||||
}
|
||||
|
||||
struct NetworkManagerUnlockedInner {
|
||||
@ -143,6 +120,7 @@ impl NetworkManager {
|
||||
components: None,
|
||||
network_class: None,
|
||||
stats: NetworkManagerStats::default(),
|
||||
client_whitelist: LruCache::new_unbounded(),
|
||||
}
|
||||
}
|
||||
fn new_unlocked_inner(_config: VeilidConfig) -> NetworkManagerUnlockedInner {
|
||||
@ -195,15 +173,6 @@ impl NetworkManager {
|
||||
.rpc_processor
|
||||
.clone()
|
||||
}
|
||||
pub fn lease_manager(&self) -> LeaseManager {
|
||||
self.inner
|
||||
.lock()
|
||||
.components
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.lease_manager
|
||||
.clone()
|
||||
}
|
||||
pub fn receipt_manager(&self) -> ReceiptManager {
|
||||
self.inner
|
||||
.lock()
|
||||
@ -250,19 +219,16 @@ impl NetworkManager {
|
||||
let net = Network::new(self.clone());
|
||||
let connection_manager = ConnectionManager::new(self.clone());
|
||||
let rpc_processor = RPCProcessor::new(self.clone());
|
||||
let lease_manager = LeaseManager::new(self.clone());
|
||||
let receipt_manager = ReceiptManager::new(self.clone());
|
||||
self.inner.lock().components = Some(NetworkComponents {
|
||||
net: net.clone(),
|
||||
connection_manager: connection_manager.clone(),
|
||||
rpc_processor: rpc_processor.clone(),
|
||||
lease_manager: lease_manager.clone(),
|
||||
receipt_manager: receipt_manager.clone(),
|
||||
});
|
||||
|
||||
// Start network components
|
||||
rpc_processor.startup().await?;
|
||||
lease_manager.startup().await?;
|
||||
receipt_manager.startup().await?;
|
||||
net.startup().await?;
|
||||
connection_manager.startup().await;
|
||||
@ -289,7 +255,6 @@ impl NetworkManager {
|
||||
components.connection_manager.shutdown().await;
|
||||
components.net.shutdown().await;
|
||||
components.receipt_manager.shutdown().await;
|
||||
components.lease_manager.shutdown().await;
|
||||
components.rpc_processor.shutdown().await;
|
||||
}
|
||||
|
||||
@ -301,14 +266,54 @@ impl NetworkManager {
|
||||
trace!("NetworkManager::shutdown end");
|
||||
}
|
||||
|
||||
pub fn update_client_whitelist(&self, client: key::DHTKey) {
|
||||
let mut inner = self.inner.lock();
|
||||
match inner.client_whitelist.entry(client) {
|
||||
hashlink::lru_cache::Entry::Occupied(mut entry) => {
|
||||
entry.get_mut().last_seen = intf::get_timestamp()
|
||||
}
|
||||
hashlink::lru_cache::Entry::Vacant(entry) => {
|
||||
entry.insert(ClientWhitelistEntry {
|
||||
last_seen: intf::get_timestamp(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_client_whitelist(&self, client: key::DHTKey) -> bool {
|
||||
let mut inner = self.inner.lock();
|
||||
|
||||
match inner.client_whitelist.entry(client) {
|
||||
hashlink::lru_cache::Entry::Occupied(mut entry) => {
|
||||
entry.get_mut().last_seen = intf::get_timestamp();
|
||||
true
|
||||
}
|
||||
hashlink::lru_cache::Entry::Vacant(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn purge_client_whitelist(&self) {
|
||||
let timeout_ms = self.config.get().network.client_whitelist_timeout_ms;
|
||||
let mut inner = self.inner.lock();
|
||||
let cutoff_timestamp = intf::get_timestamp() - ((timeout_ms as u64) * 1000u64);
|
||||
// Remove clients from the whitelist that haven't been since since our whitelist timeout
|
||||
while inner
|
||||
.client_whitelist
|
||||
.peek_lru()
|
||||
.map(|v| v.1.last_seen < cutoff_timestamp)
|
||||
.unwrap_or_default()
|
||||
{
|
||||
inner.client_whitelist.remove_lru();
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn tick(&self) -> Result<(), String> {
|
||||
let (routing_table, net, lease_manager, receipt_manager) = {
|
||||
let (routing_table, net, receipt_manager) = {
|
||||
let inner = self.inner.lock();
|
||||
let components = inner.components.as_ref().unwrap();
|
||||
(
|
||||
inner.routing_table.as_ref().unwrap().clone(),
|
||||
components.net.clone(),
|
||||
components.lease_manager.clone(),
|
||||
components.receipt_manager.clone(),
|
||||
)
|
||||
};
|
||||
@ -326,12 +331,12 @@ impl NetworkManager {
|
||||
// Run the low level network tick
|
||||
net.tick().await?;
|
||||
|
||||
// Run the lease manager tick
|
||||
lease_manager.tick().await?;
|
||||
|
||||
// Run the receipt manager tick
|
||||
receipt_manager.tick().await?;
|
||||
|
||||
// Purge the client whitelist
|
||||
self.purge_client_whitelist();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -344,6 +349,26 @@ impl NetworkManager {
|
||||
}
|
||||
}
|
||||
|
||||
// Get our node's capabilities
|
||||
pub fn generate_node_info(&self) -> NodeInfo {
|
||||
let network_class = self.get_network_class().unwrap_or(NetworkClass::Invalid);
|
||||
|
||||
let will_route = network_class.can_relay(); // xxx: eventually this may have more criteria added
|
||||
let will_tunnel = network_class.can_relay(); // xxx: we may want to restrict by battery life and network bandwidth at some point
|
||||
let will_signal = network_class.can_signal();
|
||||
let will_relay = network_class.can_relay();
|
||||
let will_validate_dial_info = network_class.can_validate_dial_info();
|
||||
|
||||
NodeInfo {
|
||||
network_class,
|
||||
will_route,
|
||||
will_tunnel,
|
||||
will_signal,
|
||||
will_relay,
|
||||
will_validate_dial_info,
|
||||
}
|
||||
}
|
||||
|
||||
// Return what protocols we have enabled
|
||||
pub fn get_protocol_config(&self) -> Option<ProtocolConfig> {
|
||||
if let Some(components) = &self.inner.lock().components {
|
||||
@ -514,61 +539,19 @@ impl NetworkManager {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
// Decode envelope header
|
||||
// Decode envelope header (may fail signature validation)
|
||||
let envelope =
|
||||
Envelope::from_data(data).map_err(|_| "envelope failed to decode".to_owned())?;
|
||||
|
||||
// Get routing table and rpc processor
|
||||
let (routing_table, lease_manager, rpc) = {
|
||||
let (routing_table, rpc) = {
|
||||
let inner = self.inner.lock();
|
||||
(
|
||||
inner.routing_table.as_ref().unwrap().clone(),
|
||||
inner.components.as_ref().unwrap().lease_manager.clone(),
|
||||
inner.components.as_ref().unwrap().rpc_processor.clone(),
|
||||
)
|
||||
};
|
||||
|
||||
// Peek at header and see if we need to send this to a relay lease
|
||||
// If the recipient id is not our node id, then it needs relaying
|
||||
let sender_id = envelope.get_sender_id();
|
||||
let recipient_id = envelope.get_recipient_id();
|
||||
if recipient_id != routing_table.node_id() {
|
||||
// Ensure a lease exists for this node before we relay it
|
||||
let relay_nr = if let Some(lease_nr) =
|
||||
lease_manager.server_has_valid_relay_lease(&recipient_id)
|
||||
{
|
||||
// Inbound lease
|
||||
lease_nr
|
||||
} else if let Some(lease_nr) = lease_manager.server_has_valid_relay_lease(&sender_id) {
|
||||
// Resolve the node to send this to
|
||||
rpc.resolve_node(recipient_id, Some(lease_nr.clone())).await.map_err(|e| {
|
||||
format!(
|
||||
"failed to resolve recipient node for relay, dropping outbound relayed packet...: {:?}",
|
||||
e
|
||||
)
|
||||
})?
|
||||
} else {
|
||||
return Err("received envelope not intended for this node".to_owned());
|
||||
};
|
||||
|
||||
// Re-send the packet to the leased node
|
||||
self.net()
|
||||
.send_data(relay_nr, data.to_vec())
|
||||
.await
|
||||
.map_err(|e| format!("failed to forward envelope: {}", e))?;
|
||||
// Inform caller that we dealt with the envelope, but did not process it locally
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// DH to get decryption key (cached)
|
||||
let node_id_secret = routing_table.node_id_secret();
|
||||
|
||||
// Decrypt the envelope body
|
||||
// xxx: punish nodes that send messages that fail to decrypt eventually
|
||||
let body = envelope
|
||||
.decrypt_body(self.crypto(), data, &node_id_secret)
|
||||
.map_err(|_| "failed to decrypt envelope body".to_owned())?;
|
||||
|
||||
// Get timestamp range
|
||||
let (tsbehind, tsahead) = {
|
||||
let c = self.config.get();
|
||||
@ -598,6 +581,74 @@ impl NetworkManager {
|
||||
}
|
||||
}
|
||||
|
||||
// Peek at header and see if we need to relay this
|
||||
// If the recipient id is not our node id, then it needs relaying
|
||||
let sender_id = envelope.get_sender_id();
|
||||
let recipient_id = envelope.get_recipient_id();
|
||||
if recipient_id != routing_table.node_id() {
|
||||
// See if the source node is allowed to resolve nodes
|
||||
// This is a costly operation, so only outbound-relay permitted
|
||||
// nodes are allowed to do this, for example PWA users
|
||||
|
||||
let relay_nr = if self.check_client_whitelist(sender_id) {
|
||||
// Cache the envelope information in the routing table
|
||||
// let source_noderef = routing_table
|
||||
// .register_node_with_existing_connection(envelope.get_sender_id(), descriptor, ts)
|
||||
// .map_err(|e| format!("node id registration failed: {}", e))?;
|
||||
// source_noderef.operate(|e| e.set_min_max_version(envelope.get_min_max_version()));
|
||||
|
||||
// If the sender is in the client whitelist, allow a full resolve_node,
|
||||
// which effectively lets the client use our routing table
|
||||
rpc.resolve_node(recipient_id).await.map_err(|e| {
|
||||
format!(
|
||||
"failed to resolve recipient node for relay, dropping outbound relayed packet...: {:?}",
|
||||
e
|
||||
)
|
||||
}).map_err(logthru_net!())?
|
||||
} else {
|
||||
// If this is not a node in the client whitelist, only allow inbound relay
|
||||
// which only performs a lightweight lookup before passing the packet back out
|
||||
|
||||
// See if we have the node in our routing table
|
||||
// We should, because relays are chosen by nodes that have established connectivity and
|
||||
// should be mutually in each others routing tables. The node needing the relay will be
|
||||
// pinging this node regularly to keep itself in the routing table
|
||||
if let Some(nr) = routing_table.lookup_node_ref(recipient_id) {
|
||||
// ensure we have dial_info for the entry already,
|
||||
if !nr.operate(|e| e.dial_infos().is_empty()) {
|
||||
nr
|
||||
} else {
|
||||
return Err(format!(
|
||||
"Inbound relay asked for recipient with no dial info: {}",
|
||||
recipient_id
|
||||
));
|
||||
}
|
||||
} else {
|
||||
return Err(format!(
|
||||
"Inbound relay asked for recipient not in routing table: {}",
|
||||
recipient_id
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
// Re-send the packet to the leased node
|
||||
self.net()
|
||||
.send_data(relay_nr, data.to_vec())
|
||||
.await
|
||||
.map_err(|e| format!("failed to forward envelope: {}", e))?;
|
||||
// Inform caller that we dealt with the envelope, but did not process it locally
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// DH to get decryption key (cached)
|
||||
let node_id_secret = routing_table.node_id_secret();
|
||||
|
||||
// Decrypt the envelope body
|
||||
// xxx: punish nodes that send messages that fail to decrypt eventually
|
||||
let body = envelope
|
||||
.decrypt_body(self.crypto(), data, &node_id_secret)
|
||||
.map_err(|_| "failed to decrypt envelope body".to_owned())?;
|
||||
|
||||
// Cache the envelope information in the routing table
|
||||
let source_noderef = routing_table
|
||||
.register_node_with_existing_connection(envelope.get_sender_id(), descriptor, ts)
|
||||
|
@ -1,5 +1,6 @@
|
||||
mod address;
|
||||
mod dial_info;
|
||||
mod network_class;
|
||||
mod node_dial_info;
|
||||
mod node_info;
|
||||
mod nonce;
|
||||
@ -11,6 +12,7 @@ mod socket_address;
|
||||
|
||||
pub use address::*;
|
||||
pub use dial_info::*;
|
||||
pub use network_class::*;
|
||||
pub use node_dial_info::*;
|
||||
pub use node_info::*;
|
||||
pub use nonce::*;
|
||||
|
27
veilid-core/src/rpc_processor/coders/network_class.rs
Normal file
27
veilid-core/src/rpc_processor/coders/network_class.rs
Normal file
@ -0,0 +1,27 @@
|
||||
use crate::*;
|
||||
|
||||
pub fn encode_network_class(network_class: NetworkClass) -> veilid_capnp::NetworkClass {
|
||||
match network_class {
|
||||
NetworkClass::Server => veilid_capnp::NetworkClass::Server,
|
||||
NetworkClass::Mapped => veilid_capnp::NetworkClass::Mapped,
|
||||
NetworkClass::FullConeNAT => veilid_capnp::NetworkClass::FullConeNAT,
|
||||
NetworkClass::AddressRestrictedNAT => veilid_capnp::NetworkClass::AddressRestrictedNAT,
|
||||
NetworkClass::PortRestrictedNAT => veilid_capnp::NetworkClass::PortRestrictedNAT,
|
||||
NetworkClass::OutboundOnly => veilid_capnp::NetworkClass::OutboundOnly,
|
||||
NetworkClass::WebApp => veilid_capnp::NetworkClass::WebApp,
|
||||
NetworkClass::Invalid => veilid_capnp::NetworkClass::Invalid,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decode_network_class(network_class: veilid_capnp::NetworkClass) -> NetworkClass {
|
||||
match network_class {
|
||||
veilid_capnp::NetworkClass::Server => NetworkClass::Server,
|
||||
veilid_capnp::NetworkClass::Mapped => NetworkClass::Mapped,
|
||||
veilid_capnp::NetworkClass::FullConeNAT => NetworkClass::FullConeNAT,
|
||||
veilid_capnp::NetworkClass::AddressRestrictedNAT => NetworkClass::AddressRestrictedNAT,
|
||||
veilid_capnp::NetworkClass::PortRestrictedNAT => NetworkClass::PortRestrictedNAT,
|
||||
veilid_capnp::NetworkClass::OutboundOnly => NetworkClass::OutboundOnly,
|
||||
veilid_capnp::NetworkClass::WebApp => NetworkClass::WebApp,
|
||||
veilid_capnp::NetworkClass::Invalid => NetworkClass::Invalid,
|
||||
}
|
||||
}
|
@ -5,19 +5,11 @@ pub fn encode_node_info(
|
||||
node_info: &NodeInfo,
|
||||
builder: &mut veilid_capnp::node_info::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
builder.set_can_route(node_info.can_route);
|
||||
builder.set_network_class(encode_network_class(node_info.network_class));
|
||||
builder.set_will_route(node_info.will_route);
|
||||
|
||||
builder.set_can_tunnel(node_info.can_tunnel);
|
||||
builder.set_will_tunnel(node_info.will_tunnel);
|
||||
|
||||
builder.set_can_signal_lease(node_info.can_signal_lease);
|
||||
builder.set_will_signal_lease(node_info.will_signal_lease);
|
||||
|
||||
builder.set_can_relay_lease(node_info.can_relay_lease);
|
||||
builder.set_will_relay_lease(node_info.will_relay_lease);
|
||||
|
||||
builder.set_can_validate_dial_info(node_info.can_validate_dial_info);
|
||||
builder.set_will_signal(node_info.will_signal);
|
||||
builder.set_will_relay(node_info.will_relay);
|
||||
builder.set_will_validate_dial_info(node_info.will_validate_dial_info);
|
||||
|
||||
Ok(())
|
||||
@ -25,15 +17,16 @@ pub fn encode_node_info(
|
||||
|
||||
pub fn decode_node_info(reader: &veilid_capnp::node_info::Reader) -> Result<NodeInfo, RPCError> {
|
||||
Ok(NodeInfo {
|
||||
can_route: reader.reborrow().get_can_route(),
|
||||
network_class: decode_network_class(
|
||||
reader
|
||||
.reborrow()
|
||||
.get_network_class()
|
||||
.map_err(map_error_capnp_notinschema!())?,
|
||||
),
|
||||
will_route: reader.reborrow().get_will_route(),
|
||||
can_tunnel: reader.reborrow().get_can_tunnel(),
|
||||
will_tunnel: reader.reborrow().get_will_tunnel(),
|
||||
can_signal_lease: reader.reborrow().get_can_signal_lease(),
|
||||
will_signal_lease: reader.reborrow().get_will_signal_lease(),
|
||||
can_relay_lease: reader.reborrow().get_can_relay_lease(),
|
||||
will_relay_lease: reader.reborrow().get_will_relay_lease(),
|
||||
can_validate_dial_info: reader.reborrow().get_can_validate_dial_info(),
|
||||
will_signal: reader.reborrow().get_will_signal(),
|
||||
will_relay: reader.reborrow().get_will_relay(),
|
||||
will_validate_dial_info: reader.reborrow().get_will_validate_dial_info(),
|
||||
})
|
||||
}
|
||||
|
@ -13,7 +13,6 @@ use capnp::message::ReaderSegments;
|
||||
use coders::*;
|
||||
use core::convert::{TryFrom, TryInto};
|
||||
use core::fmt;
|
||||
use lease_manager::*;
|
||||
use network_manager::*;
|
||||
use receipt_manager::*;
|
||||
use routing_table::*;
|
||||
@ -92,6 +91,7 @@ struct RPCMessage {
|
||||
struct RPCMessageReader {
|
||||
header: RPCMessageHeader,
|
||||
reader: capnp::message::Reader<RPCMessageData>,
|
||||
opt_sender_nr: Option<NodeRef>,
|
||||
}
|
||||
|
||||
fn builder_to_vec<'a, T>(builder: capnp::message::Builder<T>) -> Result<Vec<u8>, RPCError>
|
||||
@ -264,7 +264,6 @@ impl RPCProcessor {
|
||||
pub fn resolve_node(
|
||||
&self,
|
||||
node_id: key::DHTKey,
|
||||
lease_holder: Option<NodeRef>,
|
||||
) -> SystemPinBoxFuture<Result<NodeRef, RPCError>> {
|
||||
let this = self.clone();
|
||||
Box::pin(async move {
|
||||
@ -279,26 +278,6 @@ impl RPCProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
// If not, if we are resolving on behalf of a lease holder, ask them for their routing table around the node first
|
||||
if let Some(lhnr) = lease_holder {
|
||||
let fna = this
|
||||
.clone()
|
||||
.rpc_call_find_node(
|
||||
Destination::Direct(lhnr.clone()),
|
||||
node_id,
|
||||
None,
|
||||
RespondTo::Sender(None),
|
||||
)
|
||||
.await?;
|
||||
if let Ok(nrefs) = routing_table.register_find_node_answer(fna) {
|
||||
for nr in nrefs {
|
||||
if !nr.operate(|e| e.dial_infos().is_empty()) {
|
||||
return Ok(nr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If nobody knows where this node is, ask the DHT for it
|
||||
let (count, fanout, timeout) = {
|
||||
let c = this.config.get();
|
||||
@ -541,7 +520,7 @@ impl RPCProcessor {
|
||||
let node_ref = match out_noderef {
|
||||
None => {
|
||||
// resolve node
|
||||
self.resolve_node(out_node_id, None)
|
||||
self.resolve_node(out_node_id)
|
||||
.await
|
||||
.map_err(logthru_rpc!(error))?
|
||||
}
|
||||
@ -739,7 +718,7 @@ impl RPCProcessor {
|
||||
let node_ref = match out_noderef {
|
||||
None => {
|
||||
// resolve node
|
||||
self.resolve_node(out_node_id, None).await?
|
||||
self.resolve_node(out_node_id).await?
|
||||
}
|
||||
Some(nr) => {
|
||||
// got the node in the routing table already
|
||||
@ -768,8 +747,8 @@ impl RPCProcessor {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn wants_answer(&self, request: &veilid_capnp::operation::Reader) -> Result<bool, RPCError> {
|
||||
match request.get_respond_to().which() {
|
||||
fn wants_answer(&self, operation: &veilid_capnp::operation::Reader) -> Result<bool, RPCError> {
|
||||
match operation.get_respond_to().which() {
|
||||
Ok(veilid_capnp::operation::respond_to::None(_)) => Ok(false),
|
||||
Ok(veilid_capnp::operation::respond_to::Sender(_)) => Ok(true),
|
||||
Ok(veilid_capnp::operation::respond_to::PrivateRoute(_)) => Ok(true),
|
||||
@ -777,72 +756,24 @@ impl RPCProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
fn can_validate_dial_info(&self) -> bool {
|
||||
let nman = self.network_manager();
|
||||
if let Some(nc) = nman.get_network_class() {
|
||||
match nc {
|
||||
NetworkClass::Server => true,
|
||||
NetworkClass::Mapped => true,
|
||||
NetworkClass::FullNAT => true,
|
||||
NetworkClass::AddressRestrictedNAT => false,
|
||||
NetworkClass::PortRestrictedNAT => false,
|
||||
NetworkClass::OutboundOnly => false,
|
||||
NetworkClass::WebApp => false,
|
||||
NetworkClass::TorWebApp => false,
|
||||
NetworkClass::Invalid => false,
|
||||
}
|
||||
fn get_respond_to_sender_dial_info(
|
||||
&self,
|
||||
operation: &veilid_capnp::operation::Reader,
|
||||
) -> Result<Option<DialInfo>, RPCError> {
|
||||
if let veilid_capnp::operation::respond_to::Sender(Ok(sender_di_reader)) = operation
|
||||
.get_respond_to()
|
||||
.which()
|
||||
.map_err(map_error_capnp_notinschema!())?
|
||||
{
|
||||
// Sender DialInfo was specified, update our routing table with it
|
||||
Ok(Some(decode_dial_info(&sender_di_reader)?))
|
||||
} else {
|
||||
false
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
fn will_validate_dial_info(&self) -> bool {
|
||||
if !self.can_validate_dial_info() {
|
||||
return false;
|
||||
}
|
||||
|
||||
// only accept info redirects if we aren't using a relay lease
|
||||
// which means our dial info refers to our own actual ip address and not some other node
|
||||
let nman = self.network_manager();
|
||||
let lman = nman.lease_manager();
|
||||
if lman.client_get_relay_mode() != RelayMode::Disabled {
|
||||
return false;
|
||||
}
|
||||
// xxx: bandwidth limiting here, don't commit to doing info redirects if our network quality sucks
|
||||
|
||||
true
|
||||
}
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
fn generate_node_info(&self) -> NodeInfo {
|
||||
let nman = self.network_manager();
|
||||
let lman = nman.lease_manager();
|
||||
|
||||
let can_route = false; // xxx: until we implement this we dont have accounting for it
|
||||
let will_route = false;
|
||||
let can_tunnel = false; // xxx: until we implement this we dont have accounting for it
|
||||
let will_tunnel = false;
|
||||
let can_signal_lease = lman.server_can_provide_signal_lease();
|
||||
let will_signal_lease = lman.server_will_provide_signal_lease();
|
||||
let can_relay_lease = lman.server_can_provide_relay_lease();
|
||||
let will_relay_lease = lman.server_will_provide_relay_lease();
|
||||
let can_validate_dial_info = self.can_validate_dial_info();
|
||||
let will_validate_dial_info = self.will_validate_dial_info();
|
||||
|
||||
NodeInfo {
|
||||
can_route,
|
||||
will_route,
|
||||
can_tunnel,
|
||||
will_tunnel,
|
||||
can_signal_lease,
|
||||
will_signal_lease,
|
||||
can_relay_lease,
|
||||
will_relay_lease,
|
||||
can_validate_dial_info,
|
||||
will_validate_dial_info,
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_sender_info(&self, rpcreader: &RPCMessageReader) -> SenderInfo {
|
||||
let socket_address = rpcreader
|
||||
.header
|
||||
@ -865,6 +796,27 @@ impl RPCProcessor {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// get InfoQ reader
|
||||
let iq_reader = match operation.get_detail().which() {
|
||||
Ok(veilid_capnp::operation::detail::Which::InfoQ(Ok(x))) => x,
|
||||
_ => panic!("invalid operation type in process_info_q"),
|
||||
};
|
||||
|
||||
// Parse out fields
|
||||
let node_info = decode_node_info(
|
||||
&iq_reader
|
||||
.get_node_info()
|
||||
.map_err(map_error_internal!("no valid node info"))?,
|
||||
)?;
|
||||
|
||||
// add node information for the requesting node to our routing table
|
||||
if let Some(sender_nr) = rpcreader.opt_sender_nr.clone() {
|
||||
// Update latest node info in routing table for the infoq sender
|
||||
sender_nr.operate(|e| {
|
||||
e.update_node_info(node_info);
|
||||
});
|
||||
}
|
||||
|
||||
// Send info answer
|
||||
let mut reply_msg = ::capnp::message::Builder::new_default();
|
||||
let mut answer = reply_msg.init_root::<veilid_capnp::operation::Builder>();
|
||||
@ -874,7 +826,7 @@ impl RPCProcessor {
|
||||
let detail = answer.reborrow().init_detail();
|
||||
let mut info_a = detail.init_info_a();
|
||||
// Add node info
|
||||
let node_info = self.generate_node_info();
|
||||
let node_info = self.network_manager().generate_node_info();
|
||||
let mut nib = info_a.reborrow().init_node_info();
|
||||
encode_node_info(&node_info, &mut nib)?;
|
||||
// Add sender info
|
||||
@ -1189,14 +1141,9 @@ impl RPCProcessor {
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
async fn process_rpc_message_version_0(&self, msg: RPCMessage) -> Result<(), RPCError> {
|
||||
let reader = capnp::message::Reader::new(msg.data, Default::default());
|
||||
let rpcreader = RPCMessageReader {
|
||||
header: msg.header,
|
||||
reader,
|
||||
};
|
||||
|
||||
let mut opt_sender_nr: Option<NodeRef> = None;
|
||||
let which = {
|
||||
let operation = rpcreader
|
||||
.reader
|
||||
let operation = reader
|
||||
.get_root::<veilid_capnp::operation::Reader>()
|
||||
.map_err(map_error_capnp_error!())
|
||||
.map_err(logthru_rpc!())?;
|
||||
@ -1237,41 +1184,35 @@ impl RPCProcessor {
|
||||
// Accounting for questions we receive
|
||||
if is_q {
|
||||
// See if we have some Sender DialInfo to incorporate
|
||||
let opt_sender_nr =
|
||||
if let veilid_capnp::operation::respond_to::Sender(Ok(sender_di_reader)) =
|
||||
operation
|
||||
.get_respond_to()
|
||||
.which()
|
||||
.map_err(map_error_capnp_notinschema!())?
|
||||
{
|
||||
opt_sender_nr =
|
||||
if let Some(sender_di) = self.get_respond_to_sender_dial_info(&operation)? {
|
||||
// Sender DialInfo was specified, update our routing table with it
|
||||
let sender_di = decode_dial_info(&sender_di_reader)?;
|
||||
let nr = self
|
||||
.routing_table()
|
||||
.update_node_with_single_dial_info(
|
||||
rpcreader.header.envelope.get_sender_id(),
|
||||
msg.header.envelope.get_sender_id(),
|
||||
&sender_di,
|
||||
)
|
||||
.map_err(RPCError::Internal)?;
|
||||
Some(nr)
|
||||
} else {
|
||||
// look up sender node, in case it's different than our peer due to relaying
|
||||
self.routing_table()
|
||||
.lookup_node_ref(rpcreader.header.envelope.get_sender_id())
|
||||
.lookup_node_ref(msg.header.envelope.get_sender_id())
|
||||
};
|
||||
|
||||
// look up sender node, in case it's different than our peer due to relaying
|
||||
if let Some(sender_nr) = opt_sender_nr {
|
||||
if let Some(sender_nr) = opt_sender_nr.clone() {
|
||||
if which == 0u32 {
|
||||
self.routing_table().stats_ping_rcvd(
|
||||
sender_nr,
|
||||
rpcreader.header.timestamp,
|
||||
rpcreader.header.body_len,
|
||||
msg.header.timestamp,
|
||||
msg.header.body_len,
|
||||
);
|
||||
} else {
|
||||
self.routing_table().stats_question_rcvd(
|
||||
sender_nr,
|
||||
rpcreader.header.timestamp,
|
||||
rpcreader.header.body_len,
|
||||
msg.header.timestamp,
|
||||
msg.header.body_len,
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -1280,6 +1221,12 @@ impl RPCProcessor {
|
||||
which
|
||||
};
|
||||
|
||||
let rpcreader = RPCMessageReader {
|
||||
header: msg.header,
|
||||
reader,
|
||||
opt_sender_nr,
|
||||
};
|
||||
|
||||
match which {
|
||||
0 => self.process_info_q(rpcreader).await, // InfoQ
|
||||
1 => self.process_answer(rpcreader).await, // InfoA
|
||||
@ -1430,7 +1377,10 @@ impl RPCProcessor {
|
||||
self.get_respond_to_sender(peer.clone())
|
||||
.encode(&mut respond_to)?;
|
||||
let detail = question.reborrow().init_detail();
|
||||
detail.init_info_q();
|
||||
let mut iqb = detail.init_info_q();
|
||||
let mut node_info_builder = iqb.reborrow().init_node_info();
|
||||
let node_info = self.network_manager().generate_node_info();
|
||||
encode_node_info(&node_info, &mut node_info_builder)?;
|
||||
|
||||
info_q_msg.into_reader()
|
||||
};
|
||||
|
@ -188,6 +188,7 @@ fn config_callback(key: String) -> ConfigCallbackReturn {
|
||||
"network.max_connections" => Ok(Box::new(16u32)),
|
||||
"network.connection_initial_timeout_ms" => Ok(Box::new(2_000u32)),
|
||||
"network.connection_inactivity_timeout_ms" => Ok(Box::new(60_000u32)),
|
||||
"network.client_whitelist_timeout_ms" => Ok(Box::new(300_000u32)),
|
||||
"network.node_id" => Ok(Box::new(dht::key::DHTKey::default())),
|
||||
"network.node_id_secret" => Ok(Box::new(dht::key::DHTKeySecret::default())),
|
||||
"network.bootstrap" => Ok(Box::new(Vec::<String>::new())),
|
||||
@ -298,6 +299,7 @@ pub async fn test_config() {
|
||||
assert_eq!(inner.network.max_connections, 16);
|
||||
assert_eq!(inner.network.connection_initial_timeout_ms, 2_000u32);
|
||||
assert_eq!(inner.network.connection_inactivity_timeout_ms, 60_000u32);
|
||||
assert_eq!(inner.network.client_whitelist_timeout_ms, 300_000u32);
|
||||
assert!(!inner.network.node_id.valid);
|
||||
assert!(!inner.network.node_id_secret.valid);
|
||||
assert_eq!(inner.network.bootstrap, Vec::<String>::new());
|
||||
|
@ -231,17 +231,62 @@ pub struct SenderInfo {
|
||||
pub socket_address: Option<SocketAddress>,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize)]
|
||||
pub enum NetworkClass {
|
||||
Server = 0, // S = Device with public IP and no UDP firewall
|
||||
Mapped = 1, // M = Device with portmap behind any NAT
|
||||
FullConeNAT = 2, // F = Device without portmap behind full-cone NAT
|
||||
AddressRestrictedNAT = 3, // R1 = Device without portmap behind address-only restricted NAT
|
||||
PortRestrictedNAT = 4, // R2 = Device without portmap behind address-and-port restricted NAT
|
||||
OutboundOnly = 5, // O = Outbound only
|
||||
WebApp = 6, // W = PWA
|
||||
Invalid = 7, // I = Invalid network class, unreachable or can not send packets
|
||||
}
|
||||
|
||||
impl NetworkClass {
|
||||
pub fn inbound_capable(&self) -> bool {
|
||||
matches!(
|
||||
self,
|
||||
Self::Server
|
||||
| Self::Mapped
|
||||
| Self::FullConeNAT
|
||||
| Self::AddressRestrictedNAT
|
||||
| Self::PortRestrictedNAT
|
||||
)
|
||||
}
|
||||
pub fn inbound_requires_signal(&self) -> bool {
|
||||
matches!(self, Self::AddressRestrictedNAT | Self::PortRestrictedNAT)
|
||||
}
|
||||
pub fn dialinfo_requires_keepalive(&self) -> bool {
|
||||
matches!(
|
||||
self,
|
||||
Self::FullConeNAT | Self::AddressRestrictedNAT | Self::PortRestrictedNAT
|
||||
)
|
||||
}
|
||||
pub fn can_signal(&self) -> bool {
|
||||
self.inbound_capable() && !self.inbound_requires_signal()
|
||||
}
|
||||
pub fn can_relay(&self) -> bool {
|
||||
matches!(self, Self::Server | Self::Mapped | Self::FullConeNAT)
|
||||
}
|
||||
pub fn can_validate_dial_info(&self) -> bool {
|
||||
matches!(self, Self::Server | Self::Mapped | Self::FullConeNAT)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for NetworkClass {
|
||||
fn default() -> Self {
|
||||
Self::Invalid
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||
pub struct NodeInfo {
|
||||
pub can_route: bool,
|
||||
pub network_class: NetworkClass,
|
||||
pub will_route: bool,
|
||||
pub can_tunnel: bool,
|
||||
pub will_tunnel: bool,
|
||||
pub can_signal_lease: bool,
|
||||
pub will_signal_lease: bool,
|
||||
pub can_relay_lease: bool,
|
||||
pub will_relay_lease: bool,
|
||||
pub can_validate_dial_info: bool,
|
||||
pub will_signal: bool,
|
||||
pub will_relay: bool,
|
||||
pub will_validate_dial_info: bool,
|
||||
}
|
||||
|
||||
|
@ -138,6 +138,7 @@ pub struct VeilidConfigNetwork {
|
||||
pub max_connections: u32,
|
||||
pub connection_initial_timeout_ms: u32,
|
||||
pub connection_inactivity_timeout_ms: u32,
|
||||
pub client_whitelist_timeout_ms: u32,
|
||||
pub node_id: key::DHTKey,
|
||||
pub node_id_secret: key::DHTKeySecret,
|
||||
pub bootstrap: Vec<String>,
|
||||
@ -294,6 +295,7 @@ impl VeilidConfig {
|
||||
get_config!(inner.network.max_connections);
|
||||
get_config!(inner.network.connection_initial_timeout_ms);
|
||||
get_config!(inner.network.connection_inactivity_timeout_ms);
|
||||
get_config!(inner.network.client_whitelist_timeout_ms);
|
||||
get_config!(inner.network.bootstrap);
|
||||
get_config!(inner.network.routing_table.limit_over_attached);
|
||||
get_config!(inner.network.routing_table.limit_fully_attached);
|
||||
|
@ -86,7 +86,6 @@ cfg_if! {
|
||||
|
||||
// pub use bump_port::*;
|
||||
pub use async_peek_stream::*;
|
||||
pub use async_recursion::async_recursion;
|
||||
pub use clone_stream::*;
|
||||
pub use eventual::*;
|
||||
pub use eventual_base::{EventualCommon, EventualResolvedFuture};
|
||||
|
@ -41,6 +41,7 @@ Future<VeilidConfig> getDefaultVeilidConfig() async {
|
||||
maxConnections: 16,
|
||||
connectionInitialTimeoutMs: 2000,
|
||||
connectionInactivityTimeoutMs: 60000,
|
||||
clientWhitelistTimeoutMs: 300000,
|
||||
nodeId: "",
|
||||
nodeIdSecret: "",
|
||||
bootstrap: [],
|
||||
|
@ -552,6 +552,7 @@ class VeilidConfigNetwork {
|
||||
int maxConnections;
|
||||
int connectionInitialTimeoutMs;
|
||||
int connectionInactivityTimeoutMs;
|
||||
int clientWhitelistTimeoutMs;
|
||||
String nodeId;
|
||||
String nodeIdSecret;
|
||||
List<String> bootstrap;
|
||||
@ -571,6 +572,7 @@ class VeilidConfigNetwork {
|
||||
required this.maxConnections,
|
||||
required this.connectionInitialTimeoutMs,
|
||||
required this.connectionInactivityTimeoutMs,
|
||||
required this.clientWhitelistTimeoutMs,
|
||||
required this.nodeId,
|
||||
required this.nodeIdSecret,
|
||||
required this.bootstrap,
|
||||
@ -592,6 +594,7 @@ class VeilidConfigNetwork {
|
||||
'max_connections': maxConnections,
|
||||
'connection_initial_timeout_ms': connectionInitialTimeoutMs,
|
||||
'connection_inactivity_timeout_ms': connectionInactivityTimeoutMs,
|
||||
'client_whitelist_timeout_ms': clientWhitelistTimeoutMs,
|
||||
'node_id': nodeId,
|
||||
'node_id_secret': nodeIdSecret,
|
||||
'bootstrap': bootstrap,
|
||||
@ -614,6 +617,7 @@ class VeilidConfigNetwork {
|
||||
connectionInitialTimeoutMs = json['connection_initial_timeout_ms'],
|
||||
connectionInactivityTimeoutMs =
|
||||
json['connection_inactivity_timeout_ms'],
|
||||
clientWhitelistTimeoutMs = json['client_whitelist_timeout_ms'],
|
||||
nodeId = json['node_id'],
|
||||
nodeIdSecret = json['node_id_secret'],
|
||||
bootstrap = json['bootstrap'],
|
||||
|
@ -51,6 +51,7 @@ core:
|
||||
max_connections: 16
|
||||
connection_initial_timeout_ms: 2000
|
||||
connection_inactivity_timeout_ms: 60000
|
||||
client_whitelist_timeout_ms: 300000
|
||||
node_id: ''
|
||||
node_id_secret: ''
|
||||
bootstrap: []
|
||||
@ -540,6 +541,7 @@ pub struct Network {
|
||||
pub max_connections: u32,
|
||||
pub connection_initial_timeout_ms: u32,
|
||||
pub connection_inactivity_timeout_ms: u32,
|
||||
pub client_whitelist_timeout_ms: u32,
|
||||
pub node_id: veilid_core::DHTKey,
|
||||
pub node_id_secret: veilid_core::DHTKeySecret,
|
||||
pub bootstrap: Vec<ParsedNodeDialInfo>,
|
||||
@ -815,6 +817,9 @@ impl Settings {
|
||||
"network.connection_inactivity_timeout_ms" => Ok(Box::new(
|
||||
inner.core.network.connection_inactivity_timeout_ms,
|
||||
)),
|
||||
"network.client_whitelist_timeout_ms" => {
|
||||
Ok(Box::new(inner.core.network.client_whitelist_timeout_ms))
|
||||
}
|
||||
"network.node_id" => Ok(Box::new(inner.core.network.node_id)),
|
||||
"network.node_id_secret" => Ok(Box::new(inner.core.network.node_id_secret)),
|
||||
"network.bootstrap" => Ok(Box::new(
|
||||
@ -1168,6 +1173,7 @@ mod tests {
|
||||
assert_eq!(s.core.network.max_connections, 16);
|
||||
assert_eq!(s.core.network.connection_initial_timeout_ms, 2_000u32);
|
||||
assert_eq!(s.core.network.connection_inactivity_timeout_ms, 60_000u32);
|
||||
assert_eq!(s.core.network.client_whitelist_timeout_ms, 300_000u32);
|
||||
assert_eq!(s.core.network.node_id, veilid_core::DHTKey::default());
|
||||
assert_eq!(
|
||||
s.core.network.node_id_secret,
|
||||
|
Loading…
Reference in New Issue
Block a user