setvalue pre-empt

This commit is contained in:
Christien Rioux 2024-05-20 20:25:46 -04:00
parent 6590b76263
commit 908bb48f8c
7 changed files with 476 additions and 262 deletions

View File

@ -10,11 +10,12 @@ struct OutboundGetValueContext {
pub descriptor: Option<Arc<SignedValueDescriptor>>,
/// The parsed schema from the descriptor if we have one
pub schema: Option<DHTSchema>,
/// If we should send a partial update with the current contetx
/// If we should send a partial update with the current context
pub send_partial_update: bool,
}
/// The result of the outbound_get_value operation
#[derive(Clone, Debug)]
pub(super) struct OutboundGetValueResult {
/// Fanout result
pub fanout_result: FanoutResult,
@ -91,11 +92,11 @@ impl StorageManager {
)
.await?
);
let mut ctx = context.lock();
// Keep the descriptor if we got one. If we had a last_descriptor it will
// already be validated by rpc_call_get_value
if let Some(descriptor) = gva.answer.descriptor {
let mut ctx = context.lock();
if ctx.descriptor.is_none() && ctx.schema.is_none() {
let schema = match descriptor.schema() {
Ok(v) => v,
@ -109,69 +110,73 @@ impl StorageManager {
}
// Keep the value if we got one and it is newer and it passes schema validation
if let Some(value) = gva.answer.value {
log_dht!(debug "Got value back: len={} seq={}", value.value_data().data().len(), value.value_data().seq());
let mut ctx = context.lock();
let Some(value) = gva.answer.value else {
// Return peers if we have some
log_network_result!(debug "GetValue returned no value, fanout call returned peers {}", gva.answer.peers.len());
// Ensure we have a schema and descriptor
let (Some(descriptor), Some(schema)) = (&ctx.descriptor, &ctx.schema)
else {
// Got a value but no descriptor for it
// Move to the next node
return Ok(NetworkResult::invalid_message(
"Got value with no descriptor",
));
};
return Ok(NetworkResult::value(gva.answer.peers))
};
// Validate with schema
if !schema.check_subkey_value_data(
descriptor.owner(),
subkey,
value.value_data(),
) {
// Validation failed, ignore this value
// Move to the next node
return Ok(NetworkResult::invalid_message(format!(
"Schema validation failed on subkey {}",
subkey
)));
}
log_dht!(debug "GetValue got value back: len={} seq={}", value.value_data().data().len(), value.value_data().seq());
// If we have a prior value, see if this is a newer sequence number
if let Some(prior_value) = &ctx.value {
let prior_seq = prior_value.value_data().seq();
let new_seq = value.value_data().seq();
// Ensure we have a schema and descriptor
let (Some(descriptor), Some(schema)) = (&ctx.descriptor, &ctx.schema)
else {
// Got a value but no descriptor for it
// Move to the next node
return Ok(NetworkResult::invalid_message(
"Got value with no descriptor",
));
};
if new_seq == prior_seq {
// If sequence number is the same, the data should be the same
if prior_value.value_data() != value.value_data() {
// Move to the next node
return Ok(NetworkResult::invalid_message(
"value data mismatch",
));
}
// Increase the consensus count for the existing value
ctx.value_nodes.push(next_node);
} else if new_seq > prior_seq {
// If the sequence number is greater, start over with the new value
ctx.value = Some(Arc::new(value));
// One node has shown us this value so far
ctx.value_nodes = vec![next_node];
// Send an update since the value changed
ctx.send_partial_update = true;
} else {
// If the sequence number is older, ignore it
// Validate with schema
if !schema.check_subkey_value_data(
descriptor.owner(),
subkey,
value.value_data(),
) {
// Validation failed, ignore this value
// Move to the next node
return Ok(NetworkResult::invalid_message(format!(
"Schema validation failed on subkey {}",
subkey
)));
}
// If we have a prior value, see if this is a newer sequence number
if let Some(prior_value) = &ctx.value {
let prior_seq = prior_value.value_data().seq();
let new_seq = value.value_data().seq();
if new_seq == prior_seq {
// If sequence number is the same, the data should be the same
if prior_value.value_data() != value.value_data() {
// Move to the next node
return Ok(NetworkResult::invalid_message(
"value data mismatch",
));
}
} else {
// If we have no prior value, keep it
// Increase the consensus count for the existing value
ctx.value_nodes.push(next_node);
} else if new_seq > prior_seq {
// If the sequence number is greater, start over with the new value
ctx.value = Some(Arc::new(value));
// One node has shown us this value so far
ctx.value_nodes = vec![next_node];
// Send an update since the value changed
ctx.send_partial_update = true;
} else {
// If the sequence number is older, ignore it
}
} else {
// If we have no prior value, keep it
ctx.value = Some(Arc::new(value));
// One node has shown us this value so far
ctx.value_nodes = vec![next_node];
// Send an update since the value changed
ctx.send_partial_update = true;
}
// Return peers if we have some
log_network_result!(debug "GetValue fanout call returned peers {}", gva.answer.peers.len());

View File

@ -557,7 +557,7 @@ impl StorageManager {
log_stor!(debug "Writing subkey to the network: {}:{} len={}", key, subkey, signed_value_data.value_data().data().len() );
// Use the safety selection we opened the record with
let result = match self
let res_rx = match self
.outbound_set_value(
rpc_processor,
key,
@ -577,36 +577,39 @@ impl StorageManager {
}
};
// Regain the lock after network access
let mut inner = self.lock().await?;
// Wait for the first result
let Ok(result) = res_rx.recv_async().await else {
apibail_internal!("failed to receive results");
};
let result = result?;
let partial = result.fanout_result.kind.is_partial();
// Report on fanout result offline
let was_offline = self.check_fanout_set_offline(key, subkey, &result.fanout_result);
if was_offline {
// Failed to write, try again later
inner.add_offline_subkey_write(key, subkey, safety_selection);
// Process the returned result
let out = self
.process_outbound_set_value_result(
key,
subkey,
signed_value_data.value_data().clone(),
safety_selection,
result,
)
.await?;
// If there's more to process, do it in the background
if partial {
let mut inner = self.lock().await?;
self.process_deferred_outbound_set_value_result_inner(
&mut inner,
res_rx,
key,
subkey,
out.clone()
.unwrap_or_else(|| signed_value_data.value_data().clone()),
safety_selection,
);
}
// Keep the list of nodes that returned a value for later reference
inner.process_fanout_results(key, core::iter::once((subkey, &result.fanout_result)), true);
// Return the new value if it differs from what was asked to set
if result.signed_value_data.value_data() != signed_value_data.value_data() {
// Record the newer value and send and update since it is different than what we just set
inner
.handle_set_local_value(
key,
subkey,
result.signed_value_data.clone(),
WatchUpdateMode::UpdateAll,
)
.await?;
return Ok(Some(result.signed_value_data.value_data().clone()));
}
// If the original value was set, return None
Ok(None)
Ok(out)
}
/// Create,update or cancel an outbound watch to a DHT value

View File

@ -10,9 +10,12 @@ struct OutboundSetValueContext {
pub missed_since_last_set: usize,
/// The parsed schema from the descriptor if we have one
pub schema: DHTSchema,
/// If we should send a partial update with the current context
pub send_partial_update: bool,
}
/// The result of the outbound_set_value operation
#[derive(Clone, Debug)]
pub(super) struct OutboundSetValueResult {
/// Fanout result
pub fanout_result: FanoutResult,
@ -30,7 +33,7 @@ impl StorageManager {
safety_selection: SafetySelection,
value: Arc<SignedValueData>,
descriptor: Arc<SignedValueDescriptor>,
) -> VeilidAPIResult<OutboundSetValueResult> {
) -> VeilidAPIResult<flume::Receiver<VeilidAPIResult<OutboundSetValueResult>>> {
let routing_table = rpc_processor.routing_table();
// Get the DHT parameters for 'SetValue'
@ -50,6 +53,9 @@ impl StorageManager {
inner.get_value_nodes(key)?.unwrap_or_default()
};
// Make the return channel
let (out_tx, out_rx) = flume::unbounded::<VeilidAPIResult<OutboundSetValueResult>>();
// Make do-set-value answer context
let schema = descriptor.schema()?;
let context = Arc::new(Mutex::new(OutboundSetValueContext {
@ -57,156 +63,330 @@ impl StorageManager {
value_nodes: vec![],
missed_since_last_set: 0,
schema,
send_partial_update: false,
}));
// Routine to call to generate fanout
let call_routine = |next_node: NodeRef| {
let rpc_processor = rpc_processor.clone();
let call_routine = {
let context = context.clone();
let descriptor = descriptor.clone();
async move {
let send_descriptor = true; // xxx check if next_node needs the descriptor or not
let rpc_processor = rpc_processor.clone();
// get most recent value to send
let value = {
let ctx = context.lock();
ctx.value.clone()
};
move |next_node: NodeRef| {
let rpc_processor = rpc_processor.clone();
let context = context.clone();
let descriptor = descriptor.clone();
async move {
let send_descriptor = true; // xxx check if next_node needs the descriptor or not
// send across the wire
let sva = network_result_try!(
rpc_processor
.clone()
.rpc_call_set_value(
Destination::direct(next_node.clone()).with_safety(safety_selection),
key,
subkey,
(*value).clone(),
(*descriptor).clone(),
send_descriptor,
)
.await?
);
// get most recent value to send
let value = {
let ctx = context.lock();
ctx.value.clone()
};
// If the node was close enough to possibly set the value
let mut ctx = context.lock();
if !sva.answer.set {
ctx.missed_since_last_set += 1;
// send across the wire
let sva = network_result_try!(
rpc_processor
.clone()
.rpc_call_set_value(
Destination::direct(next_node.clone())
.with_safety(safety_selection),
key,
subkey,
(*value).clone(),
(*descriptor).clone(),
send_descriptor,
)
.await?
);
// Return peers if we have some
log_network_result!(debug "SetValue missed: {}, fanout call returned peers {}", ctx.missed_since_last_set, sva.answer.peers.len());
return Ok(NetworkResult::value(sva.answer.peers));
}
// If the node was close enough to possibly set the value
let mut ctx = context.lock();
if !sva.answer.set {
ctx.missed_since_last_set += 1;
// See if we got a value back
let Some(value) = sva.answer.value else {
// No newer value was found and returned, so increase our consensus count
ctx.value_nodes.push(next_node);
// Return peers if we have some
log_network_result!(debug "SetValue missed: {}, fanout call returned peers {}", ctx.missed_since_last_set, sva.answer.peers.len());
return Ok(NetworkResult::value(sva.answer.peers));
}
// See if we got a value back
let Some(value) = sva.answer.value else {
// No newer value was found and returned, so increase our consensus count
ctx.value_nodes.push(next_node);
ctx.missed_since_last_set = 0;
// Send an update since it was set
if ctx.value_nodes.len() == 1 {
ctx.send_partial_update = true;
}
// Return peers if we have some
log_network_result!(debug "SetValue returned no value, fanout call returned peers {}", sva.answer.peers.len());
return Ok(NetworkResult::value(sva.answer.peers));
};
// Keep the value if we got one and it is newer and it passes schema validation
log_dht!(debug "SetValue got value back: len={} seq={}", value.value_data().data().len(), value.value_data().seq());
// Validate with schema
if !ctx.schema.check_subkey_value_data(
descriptor.owner(),
subkey,
value.value_data(),
) {
// Validation failed, ignore this value and pretend we never saw this node
return Ok(NetworkResult::invalid_message(format!(
"Schema validation failed on subkey {}",
subkey
)));
}
// If we got a value back it should be different than the one we are setting
// But in the case of a benign bug, we can just move to the next node
if ctx.value.value_data() == value.value_data() {
ctx.value_nodes.push(next_node);
ctx.missed_since_last_set = 0;
// Send an update since it was set
if ctx.value_nodes.len() == 1 {
ctx.send_partial_update = true;
}
return Ok(NetworkResult::value(sva.answer.peers));
}
// We have a prior value, ensure this is a newer sequence number
let prior_seq = ctx.value.value_data().seq();
let new_seq = value.value_data().seq();
if new_seq < prior_seq {
// If the sequence number is older node should have not returned a value here.
// Skip this node and its closer list because it is misbehaving
// Ignore this value and pretend we never saw this node
return Ok(NetworkResult::invalid_message("Sequence number is older"));
}
// If the sequence number is greater or equal, keep it
// even if the sequence number is the same, accept all conflicts in an attempt to resolve them
ctx.value = Arc::new(value);
// One node has shown us this value so far
ctx.value_nodes = vec![next_node];
ctx.missed_since_last_set = 0;
// Send an update since the value changed
ctx.send_partial_update = true;
// Return peers if we have some
log_network_result!(debug "SetValue returned no value, fanout call returned peers {}", sva.answer.peers.len());
return Ok(NetworkResult::value(sva.answer.peers));
};
// Keep the value if we got one and it is newer and it passes schema validation
log_dht!(debug "SetValue got value back: len={} seq={}", value.value_data().data().len(), value.value_data().seq());
// Validate with schema
if !ctx.schema.check_subkey_value_data(
descriptor.owner(),
subkey,
value.value_data(),
) {
// Validation failed, ignore this value and pretend we never saw this node
return Ok(NetworkResult::invalid_message("Schema validation failed"));
Ok(NetworkResult::value(sva.answer.peers))
}
// If we got a value back it should be different than the one we are setting
// But in the case of a benign bug, we can just move to the next node
if ctx.value.value_data() == value.value_data() {
ctx.value_nodes.push(next_node);
ctx.missed_since_last_set = 0;
return Ok(NetworkResult::value(sva.answer.peers));
}
// We have a prior value, ensure this is a newer sequence number
let prior_seq = ctx.value.value_data().seq();
let new_seq = value.value_data().seq();
if new_seq < prior_seq {
// If the sequence number is older node should have not returned a value here.
// Skip this node and its closer list because it is misbehaving
// Ignore this value and pretend we never saw this node
return Ok(NetworkResult::invalid_message("Sequence number is older"));
}
// If the sequence number is greater or equal, keep it
// even if the sequence number is the same, accept all conflicts in an attempt to resolve them
ctx.value = Arc::new(value);
// One node has shown us this value so far
ctx.value_nodes = vec![next_node];
ctx.missed_since_last_set = 0;
Ok(NetworkResult::value(sva.answer.peers))
}
};
// Routine to call to check if we're done at each step
let check_done = |_closest_nodes: &[NodeRef]| {
let check_done = {
let context = context.clone();
let out_tx = out_tx.clone();
move |_closest_nodes: &[NodeRef]| {
let mut ctx = context.lock();
// send partial update if desired
if ctx.send_partial_update {
ctx.send_partial_update = false;
// return partial result
let fanout_result = FanoutResult {
kind: FanoutResultKind::Partial,
value_nodes: ctx.value_nodes.clone(),
};
let out=OutboundSetValueResult {
fanout_result,
signed_value_data: ctx.value.clone()};
log_dht!(debug "Sending partial SetValue result: {:?}", out);
if let Err(e) = out_tx.send(Ok(out)) {
log_dht!(debug "Sending partial SetValue result failed: {}", e);
}
}
// If we have reached sufficient consensus, return done
if ctx.value_nodes.len() >= consensus_count {
return Some(());
}
// If we have missed more than our consensus count since our last set, return done
// This keeps the traversal from searching too many nodes when we aren't converging
// Only do this if we have gotten at least half our desired sets.
if ctx.value_nodes.len() >= ((consensus_count + 1) / 2)
&& ctx.missed_since_last_set >= consensus_count
{
return Some(());
}
None
}
};
// Call the fanout in a spawned task
spawn(Box::pin(async move {
let fanout_call = FanoutCall::new(
routing_table.clone(),
key,
key_count,
fanout,
timeout_us,
capability_fanout_node_info_filter(vec![CAP_DHT]),
call_routine,
check_done,
);
let kind = match fanout_call.run(init_fanout_queue).await {
// If we don't finish in the timeout (too much time passed checking for consensus)
TimeoutOr::Timeout => FanoutResultKind::Timeout,
// If we finished with or without consensus (enough nodes returning the same value)
TimeoutOr::Value(Ok(Some(()))) => FanoutResultKind::Finished,
// If we ran out of nodes before getting consensus)
TimeoutOr::Value(Ok(None)) => FanoutResultKind::Exhausted,
// Failed
TimeoutOr::Value(Err(e)) => {
// If we finished with an error, return that
log_dht!(debug "SetValue fanout error: {}", e);
if let Err(e) = out_tx.send(Err(e.into())) {
log_dht!(debug "Sending SetValue fanout error failed: {}", e);
}
return;
}
};
let ctx = context.lock();
let fanout_result = FanoutResult {
kind,
value_nodes: ctx.value_nodes.clone(),
};
log_network_result!(debug "SetValue Fanout: {:?}", fanout_result);
// If we have reached sufficient consensus, return done
if ctx.value_nodes.len() >= consensus_count {
return Some(());
if let Err(e) = out_tx.send(Ok(OutboundSetValueResult {
fanout_result,
signed_value_data: ctx.value.clone(),
})) {
log_dht!(debug "Sending SetValue result failed: {}", e);
}
// If we have missed more than our consensus count since our last set, return done
// This keeps the traversal from searching too many nodes when we aren't converging
// Only do this if we have gotten at least half our desired sets.
if ctx.value_nodes.len() >= ((consensus_count + 1) / 2)
&& ctx.missed_since_last_set >= consensus_count
{
return Some(());
}
None
};
}))
.detach();
// Call the fanout
let fanout_call = FanoutCall::new(
routing_table.clone(),
key,
key_count,
fanout,
timeout_us,
capability_fanout_node_info_filter(vec![CAP_DHT]),
call_routine,
check_done,
);
let kind = match fanout_call.run(init_fanout_queue).await {
// If we don't finish in the timeout (too much time passed checking for consensus)
TimeoutOr::Timeout => FanoutResultKind::Timeout,
// If we finished with or without consensus (enough nodes returning the same value)
TimeoutOr::Value(Ok(Some(()))) => FanoutResultKind::Finished,
// If we ran out of nodes before getting consensus)
TimeoutOr::Value(Ok(None)) => FanoutResultKind::Exhausted,
// Failed
TimeoutOr::Value(Err(e)) => {
// If we finished with an error, return that
log_dht!(debug "SetValue Fanout Error: {}", e);
return Err(e.into());
}
};
let ctx = context.lock();
let fanout_result = FanoutResult {
kind,
value_nodes: ctx.value_nodes.clone(),
};
log_network_result!(debug "SetValue Fanout: {:?}", fanout_result);
Ok(OutboundSetValueResult {
fanout_result,
signed_value_data: ctx.value.clone(),
})
Ok(out_rx)
}
pub(super) fn process_deferred_outbound_set_value_result_inner(&self, inner: &mut StorageManagerInner,
res_rx: flume::Receiver<Result<set_value::OutboundSetValueResult, VeilidAPIError>>,
key: TypedKey, subkey: ValueSubkey, last_value_data: ValueData, safety_selection: SafetySelection, ) {
let this = self.clone();
let last_value_data = Arc::new(Mutex::new(last_value_data));
inner.process_deferred_results(
res_rx,
Box::new(
move |result: VeilidAPIResult<set_value::OutboundSetValueResult>| -> SendPinBoxFuture<bool> {
let this = this.clone();
let last_value_data = last_value_data.clone();
Box::pin(async move {
let result = match result {
Ok(v) => v,
Err(e) => {
log_rtab!(debug "Deferred fanout error: {}", e);
return false;
}
};
let is_partial = result.fanout_result.kind.is_partial();
let lvd = last_value_data.lock().clone();
let value_data = match this.process_outbound_set_value_result(key, subkey, lvd, safety_selection, result).await {
Ok(Some(v)) => v,
Ok(None) => {
return is_partial;
}
Err(e) => {
log_rtab!(debug "Deferred fanout error: {}", e);
return false;
}
};
if is_partial {
// If more partial results show up, don't send an update until we're done
return true;
}
// If we processed the final result, possibly send an update
// if the sequence number changed since our first partial update
// Send with a max count as this is not attached to any watch
let changed = {
let mut lvd = last_value_data.lock();
if lvd.seq() != value_data.seq() {
*lvd = value_data.clone();
true
} else {
false
}
};
if changed {
if let Err(e) = this.update_callback_value_change(key,ValueSubkeyRangeSet::single(subkey), u32::MAX, Some(value_data)).await {
log_rtab!(debug "Failed sending deferred fanout value change: {}", e);
}
}
// Return done
false
})
},
),
);
}
pub(super) async fn process_outbound_set_value_result(&self, key: TypedKey, subkey: ValueSubkey, last_value_data: ValueData, safety_selection: SafetySelection, result: set_value::OutboundSetValueResult) -> Result<Option<ValueData>, VeilidAPIError> {
// Regain the lock after network access
let mut inner = self.lock().await?;
// Report on fanout result offline
let was_offline = self.check_fanout_set_offline(key, subkey, &result.fanout_result);
if was_offline {
// Failed to write, try again later
inner.add_offline_subkey_write(key, subkey, safety_selection);
}
// Keep the list of nodes that returned a value for later reference
inner.process_fanout_results(key, core::iter::once((subkey, &result.fanout_result)), true);
// Return the new value if it differs from what was asked to set
if result.signed_value_data.value_data() != &last_value_data {
// Record the newer value and send and update since it is different than what we just set
inner
.handle_set_local_value(
key,
subkey,
result.signed_value_data.clone(),
WatchUpdateMode::UpdateAll,
)
.await?;
return Ok(Some(result.signed_value_data.value_data().clone()));
}
// If the original value was set, return None
Ok(None)
}
/// Handle a received 'Set Value' query
/// Returns a None if the value passed in was set

View File

@ -68,24 +68,50 @@ impl StorageManager {
)
.await;
match osvres {
Ok(result) => {
let was_offline =
self.check_fanout_set_offline(*key, subkey, &result.fanout_result);
if !was_offline {
if let Some(update_callback) = opt_update_callback.clone() {
// Send valuechange with dead count and no subkeys
update_callback(VeilidUpdate::ValueChange(Box::new(
VeilidValueChange {
key: *key,
subkeys: ValueSubkeyRangeSet::single(subkey),
count: u32::MAX,
value: Some(result.signed_value_data.value_data().clone()),
},
)));
Ok(res_rx) => {
while let Ok(res) = res_rx.recv_async().await {
match res {
Ok(result) => {
let partial = result.fanout_result.kind.is_partial();
// Skip partial results in offline subkey write mode
if partial {
continue;
}
// Process non-partial setvalue result
let was_offline = self.check_fanout_set_offline(
*key,
subkey,
&result.fanout_result,
);
if !was_offline {
if let Some(update_callback) = opt_update_callback.clone() {
// Send valuechange with dead count and no subkeys
update_callback(VeilidUpdate::ValueChange(Box::new(
VeilidValueChange {
key: *key,
subkeys: ValueSubkeyRangeSet::single(subkey),
count: u32::MAX,
value: Some(
result
.signed_value_data
.value_data()
.clone(),
),
},
)));
}
written_subkeys.insert(subkey);
};
fanout_results.push((subkey, result.fanout_result));
break;
}
Err(e) => {
log_stor!(debug "failed to get offline subkey write result: {}:{} {}", key, subkey, e);
break;
}
}
written_subkeys.insert(subkey);
};
fanout_results.push((subkey, result.fanout_result));
}
}
Err(e) => {
log_stor!(debug "failed to write offline subkey: {}:{} {}", key, subkey, e);

View File

@ -21,10 +21,10 @@ packages:
dependency: transitive
description:
name: async_tools
sha256: "972f68ab663724d86260a31e363c1355ff493308441b872bf4e7b8adc67c832c"
sha256: e783ac6ed5645c86da34240389bb3a000fc5e3ae6589c6a482eb24ece7217681
url: "https://pub.dev"
source: hosted
version: "0.1.0"
version: "0.1.1"
boolean_selector:
dependency: transitive
description:
@ -85,10 +85,10 @@ packages:
dependency: "direct main"
description:
name: cupertino_icons
sha256: d57953e10f9f8327ce64a508a355f0b1ec902193f66288e8cb5070e7c47eeb2d
sha256: ba631d1c7f7bef6b729a622b7b752645a2d076dba9976925b8f25725a30e1ee6
url: "https://pub.dev"
source: hosted
version: "1.0.6"
version: "1.0.8"
equatable:
dependency: transitive
description:
@ -187,34 +187,34 @@ packages:
dependency: transitive
description:
name: json_annotation
sha256: b10a7b2ff83d83c777edba3c6a0f97045ddadd56c944e1a23a3fdf43a1bf4467
sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1"
url: "https://pub.dev"
source: hosted
version: "4.8.1"
version: "4.9.0"
leak_tracker:
dependency: transitive
description:
name: leak_tracker
sha256: "78eb209deea09858f5269f5a5b02be4049535f568c07b275096836f01ea323fa"
sha256: "7f0df31977cb2c0b88585095d168e689669a2cc9b97c309665e3386f3e9d341a"
url: "https://pub.dev"
source: hosted
version: "10.0.0"
version: "10.0.4"
leak_tracker_flutter_testing:
dependency: transitive
description:
name: leak_tracker_flutter_testing
sha256: b46c5e37c19120a8a01918cfaf293547f47269f7cb4b0058f21531c2465d6ef0
sha256: "06e98f569d004c1315b991ded39924b21af84cf14cc94791b8aea337d25b57f8"
url: "https://pub.dev"
source: hosted
version: "2.0.1"
version: "3.0.3"
leak_tracker_testing:
dependency: transitive
description:
name: leak_tracker_testing
sha256: a597f72a664dbd293f3bfc51f9ba69816f84dcd403cdac7066cb3f6003f3ab47
sha256: "6ba465d5d76e67ddf503e1161d1f4a6bc42306f9d66ca1e8f079a47290fb06d3"
url: "https://pub.dev"
source: hosted
version: "2.0.1"
version: "3.0.1"
lint_hard:
dependency: "direct dev"
description:
@ -259,10 +259,10 @@ packages:
dependency: transitive
description:
name: meta
sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04
sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136"
url: "https://pub.dev"
source: hosted
version: "1.11.0"
version: "1.12.0"
path:
dependency: "direct main"
description:
@ -275,26 +275,26 @@ packages:
dependency: "direct main"
description:
name: path_provider
sha256: b27217933eeeba8ff24845c34003b003b2b22151de3c908d0e679e8fe1aa078b
sha256: c9e7d3a4cd1410877472158bee69963a4579f78b68c65a2b7d40d1a7a88bb161
url: "https://pub.dev"
source: hosted
version: "2.1.2"
version: "2.1.3"
path_provider_android:
dependency: transitive
description:
name: path_provider_android
sha256: "477184d672607c0a3bf68fbbf601805f92ef79c82b64b4d6eb318cbca4c48668"
sha256: a248d8146ee5983446bf03ed5ea8f6533129a12b11f12057ad1b4a67a2b3b41d
url: "https://pub.dev"
source: hosted
version: "2.2.2"
version: "2.2.4"
path_provider_foundation:
dependency: transitive
description:
name: path_provider_foundation
sha256: "5a7999be66e000916500be4f15a3633ebceb8302719b47b9cc49ce924125350f"
sha256: f234384a3fdd67f989b4d54a5d73ca2a6c422fa55ae694381ae0f4375cd1ea16
url: "https://pub.dev"
source: hosted
version: "2.3.2"
version: "2.4.0"
path_provider_linux:
dependency: transitive
description:
@ -424,10 +424,10 @@ packages:
dependency: transitive
description:
name: test_api
sha256: "5c2f730018264d276c20e4f1503fd1308dfbbae39ec8ee63c5236311ac06954b"
sha256: "9955ae474176f7ac8ee4e989dadfb411a58c30415bcfb648fa04b2b8a03afa7f"
url: "https://pub.dev"
source: hosted
version: "0.6.1"
version: "0.7.0"
typed_data:
dependency: transitive
description:
@ -462,10 +462,10 @@ packages:
dependency: transitive
description:
name: vm_service
sha256: b3d56ff4341b8f182b96aceb2fa20e3dcb336b9f867bc0eafc0de10f1048e957
sha256: "3923c89304b715fb1eb6423f017651664a03bf5f4b29983627c4da791f74a4ec"
url: "https://pub.dev"
source: hosted
version: "13.0.0"
version: "14.2.1"
webdriver:
dependency: transitive
description:
@ -478,10 +478,10 @@ packages:
dependency: transitive
description:
name: win32
sha256: "8cb58b45c47dcb42ab3651533626161d6b67a2921917d8d429791f76972b3480"
sha256: a79dbe579cb51ecd6d30b17e0cae4e0ea15e2c0e66f69ad4198f22a6789e94f4
url: "https://pub.dev"
source: hosted
version: "5.3.0"
version: "5.5.1"
xdg_directories:
dependency: transitive
description:
@ -507,5 +507,5 @@ packages:
source: hosted
version: "0.0.6"
sdks:
dart: ">=3.3.4 <4.0.0"
dart: ">=3.4.0 <4.0.0"
flutter: ">=3.19.1"

View File

@ -31,9 +31,9 @@ dependencies:
# The following adds the Cupertino Icons font to your application.
# Use with the CupertinoIcons class for iOS style icons.
cupertino_icons: ^1.0.6
cupertino_icons: ^1.0.8
loggy: ^2.0.3
path_provider: ^2.1.2
path_provider: ^2.1.3
path: ^1.9.0
xterm: ^4.0.0
flutter_acrylic: ^1.1.3

View File

@ -21,18 +21,18 @@ dependencies:
flutter_web_plugins:
sdk: flutter
freezed_annotation: ^2.4.1
json_annotation: ^4.8.1
json_annotation: ^4.9.0
path: ^1.9.0
path_provider: ^2.1.2
path_provider: ^2.1.3
system_info2: ^4.0.0
system_info_plus: ^0.0.5
dev_dependencies:
build_runner: ^2.4.8
build_runner: ^2.4.10
flutter_test:
sdk: flutter
freezed: ^2.4.7
json_serializable: ^6.7.1
freezed: ^2.5.2
json_serializable: ^6.8.0
lint_hard: ^4.0.0
# The following section is specific to Flutter.