ant_core/data/client/mod.rs
1//! Client operations for the Autonomi network.
2//!
3//! Provides high-level APIs for storing and retrieving data
4//! on the Autonomi decentralized network.
5
6pub mod adaptive;
7pub mod batch;
8pub mod cache;
9pub mod chunk;
10pub mod data;
11pub mod file;
12pub mod merkle;
13pub mod payment;
14pub(crate) mod peer_cache;
15pub mod quote;
16
17use crate::data::client::adaptive::{AdaptiveConfig, AdaptiveController, ChannelStart, Outcome};
18use crate::data::client::cache::ChunkCache;
19use crate::data::error::{Error, Result};
20use crate::data::network::Network;
21use ant_protocol::evm::Wallet;
22use ant_protocol::transport::{MultiAddr, P2PNode, PeerId};
23use ant_protocol::{XorName, CLOSE_GROUP_SIZE};
24use std::path::PathBuf;
25use std::sync::atomic::{AtomicU64, Ordering};
26use std::sync::Arc;
27use tracing::debug;
28
29/// Classify a `data::error::Error` into a controller `Outcome`.
30///
31/// Capacity signals (Timeout / NetworkError) drive the controller
32/// down; application errors do not. The mapping is conservative:
33/// anything that COULD be transport-related is treated as a network
34/// signal, because under-classifying a real network failure as
35/// "application error" makes the controller blind to genuine stress.
36///
37/// Mapping policy:
38/// - `Timeout` -> `Timeout` (per-op deadline elapsed)
39/// - `Network`, `InsufficientPeers`, `Io` -> `NetworkError` (transport
40/// layer reported failure)
41/// - `Protocol`, `Storage` -> `NetworkError` (these wrap remote errors
42/// that frequently include peer disconnects mid-stream — under
43/// network stress these are how transport failures surface)
44/// - `PartialUpload` -> `NetworkError` (literal capacity signal: some
45/// chunks could not be stored)
46/// - `AlreadyStored`, `Encryption`, `Crypto`, `Payment`,
47/// `Serialization`, `InvalidData`, `SignatureVerification`,
48/// `Config`, `InsufficientDiskSpace`, `CostEstimationInconclusive`
49/// -> `ApplicationError` (would happen on a perfectly healthy link)
50pub(crate) fn classify_error(err: &Error) -> Outcome {
51 match err {
52 Error::Timeout(_) => Outcome::Timeout,
53 Error::Network(_)
54 | Error::InsufficientPeers(_)
55 | Error::Io(_)
56 | Error::Protocol(_)
57 | Error::Storage(_)
58 | Error::PartialUpload { .. } => Outcome::NetworkError,
59 Error::AlreadyStored
60 | Error::Encryption(_)
61 | Error::Crypto(_)
62 | Error::Payment(_)
63 | Error::Serialization(_)
64 | Error::InvalidData(_)
65 | Error::SignatureVerification(_)
66 | Error::Config(_)
67 | Error::InsufficientDiskSpace(_)
68 | Error::CostEstimationInconclusive(_) => Outcome::ApplicationError,
69 }
70}
71
72/// Default timeout for lightweight network operations (quotes, DHT lookups) in seconds.
73const DEFAULT_QUOTE_TIMEOUT_SECS: u64 = 10;
74
75/// Default timeout for chunk store operations in seconds.
76///
77/// Chunk PUTs transfer multi-MB payloads to multiple peers. On residential
78/// connections with limited upload bandwidth, the default quote timeout (10 s)
79/// is far too short — a 4 MB chunk at 1 Mbps takes ~32 s just for the data
80/// transfer, before accounting for QUIC slow-start and NAT traversal overhead.
81const DEFAULT_STORE_TIMEOUT_SECS: u64 = 10;
82
83/// Default quote concurrency: high because quoting is pure network I/O
84/// (DHT lookups + small request/response messages) with no CPU-bound work.
85const DEFAULT_QUOTE_CONCURRENCY: usize = 32;
86
87/// Default store concurrency: moderate because each chunk PUT sends ~4MB
88/// to 7 close-group peers. At 8 concurrent stores, ~225MB of outbound
89/// traffic can be in flight. Users on fast connections can increase this
90/// with --store-concurrency; users on slow connections can decrease it.
91const DEFAULT_STORE_CONCURRENCY: usize = 8;
92
93/// Configuration for the Autonomi client.
94#[derive(Debug, Clone)]
95pub struct ClientConfig {
96 /// Per-op timeout for lightweight network operations (quotes,
97 /// DHT lookups), in seconds. The adaptive controller does NOT
98 /// currently size timeouts; this remains a static knob.
99 pub quote_timeout_secs: u64,
100 /// Per-op timeout for chunk store (PUT) operations, in seconds.
101 /// Should be larger than `quote_timeout_secs` because chunk PUTs
102 /// transfer multi-MB payloads. The adaptive controller does NOT
103 /// currently size timeouts; this remains a static knob.
104 pub store_timeout_secs: u64,
105 /// Number of closest peers to consider for routing.
106 pub close_group_size: usize,
107 /// **Deprecated.** Pre-adaptive ceiling for quote concurrency.
108 ///
109 /// The adaptive controller now sizes quote fan-out from observed
110 /// signals. This field, when non-zero and smaller than the
111 /// controller's per-channel default, clamps the **quote channel
112 /// only** (it does NOT bleed into store or fetch). Removed in a
113 /// future release.
114 pub quote_concurrency: usize,
115 /// **Deprecated.** Pre-adaptive ceiling for store concurrency.
116 ///
117 /// The adaptive controller now sizes store fan-out from observed
118 /// signals. This field, when non-zero and smaller than the
119 /// controller's per-channel default, clamps the **store channel
120 /// only** (it does NOT bleed into quote or fetch). Removed in a
121 /// future release.
122 pub store_concurrency: usize,
123 /// Adaptive controller configuration. Defaults are tuned to match
124 /// or exceed the prior static behavior — disabling adaptation
125 /// (`adaptive.enabled = false`) reverts to the controller's
126 /// `initial` values without re-evaluation.
127 pub adaptive: AdaptiveConfig,
128 /// Allow loopback (`127.0.0.1`) connections in the saorsa-transport
129 /// layer. Set to `true` only for devnet / local testing. Production
130 /// peers on the public Autonomi network reject the QUIC handshake
131 /// variant produced when this is `true`, so the default is `false`.
132 ///
133 /// This mirrors the `--allow-loopback` flag in `ant-cli`, which already
134 /// defaults to `false` and threads through to the same
135 /// `CoreNodeConfig::builder().local(...)` call.
136 pub allow_loopback: bool,
137 /// Bind a dual-stack IPv6 socket (`true`) or an IPv4-only socket
138 /// (`false`). Defaults to `true`, matching the CLI default.
139 ///
140 /// Set to `false` only when running on hosts without a working IPv6
141 /// stack, to avoid advertising unreachable v6 addresses to the DHT
142 /// (which causes slow connects and junk DHT address records). This
143 /// mirrors the `--ipv4-only` flag in `ant-cli`.
144 pub ipv6: bool,
145}
146
147impl Default for ClientConfig {
148 fn default() -> Self {
149 Self {
150 quote_timeout_secs: DEFAULT_QUOTE_TIMEOUT_SECS,
151 store_timeout_secs: DEFAULT_STORE_TIMEOUT_SECS,
152 close_group_size: CLOSE_GROUP_SIZE,
153 quote_concurrency: DEFAULT_QUOTE_CONCURRENCY,
154 store_concurrency: DEFAULT_STORE_CONCURRENCY,
155 adaptive: AdaptiveConfig::default(),
156 allow_loopback: false,
157 ipv6: true,
158 }
159 }
160}
161
162/// Build the adaptive controller for a `Client`. Loads any persisted
163/// snapshot, clamps cold-start values into the deprecated-flag bounds
164/// **per channel** (so a pin on `--store-concurrency` does NOT bleed
165/// into the fetch / quote channels), and returns the persistence path
166/// so callers can save back at shutdown.
167fn build_controller(config: &ClientConfig) -> (AdaptiveController, Option<PathBuf>) {
168 let mut adaptive_cfg = config.adaptive.clone();
169
170 // Per-channel ceilings: each legacy field is interpreted as a cap
171 // for ONLY its matching channel. The fetch channel has no
172 // pre-existing legacy field; it always uses the controller's
173 // default ceiling.
174 //
175 // The legacy fields are non-zero by ClientConfig::default(), but
176 // we honor them as bounds only when they would actually CONSTRAIN
177 // the controller — i.e. when smaller than the per-channel default
178 // max. A default ClientConfig must not silently lower the
179 // controller's ceilings.
180 // A value equal to the historic legacy default is treated as
181 // "not pinned by the user" — without this, every default
182 // ClientConfig would silently lower the controller's per-channel
183 // ceilings to the prior static values (32/8) and the controller
184 // could never grow above them.
185 let user_quote_max = config.quote_concurrency;
186 let user_store_max = config.store_concurrency;
187 let quote_pinned = user_quote_max > 0 && user_quote_max != DEFAULT_QUOTE_CONCURRENCY;
188 let store_pinned = user_store_max > 0 && user_store_max != DEFAULT_STORE_CONCURRENCY;
189 if quote_pinned && user_quote_max < adaptive_cfg.max.quote {
190 adaptive_cfg.max.quote = user_quote_max;
191 }
192 if store_pinned && user_store_max < adaptive_cfg.max.store {
193 adaptive_cfg.max.store = user_store_max;
194 }
195
196 // Cold-start values: matched to the prior static defaults. If the
197 // legacy field caps the channel below the cold-start, lower the
198 // start to match — never start above the channel's max.
199 let mut start = ChannelStart::default();
200 start.quote = start.quote.min(adaptive_cfg.max.quote);
201 start.store = start.store.min(adaptive_cfg.max.store);
202 start.fetch = start.fetch.min(adaptive_cfg.max.fetch);
203
204 let adaptive_enabled = adaptive_cfg.enabled;
205 let controller = AdaptiveController::new(start, adaptive_cfg);
206 // Skip disk warm-start entirely when adaptation is disabled —
207 // fixed-concurrency mode means the user wants exactly the cold
208 // start, no surprises from prior runs. (warm_start is also a
209 // no-op when disabled, but skipping the load avoids file I/O
210 // and the path-resolution side effects.)
211 let persist_path = if adaptive_enabled {
212 let p = adaptive::default_persist_path();
213 if let Some(ref path) = p {
214 if let Some(snap) = adaptive::load_snapshot(path) {
215 debug!(path = %path.display(), "adaptive: warm-start from disk");
216 controller.warm_start(snap);
217 }
218 }
219 p
220 } else {
221 // Even with adaptation off, persist_path is computed so
222 // explicit save_adaptive_snapshot() calls still work — but
223 // the controller currently never moves, so saving the cold
224 // start is harmless.
225 adaptive::default_persist_path()
226 };
227
228 // Note: self_encryption's `STREAM_DECRYPT_BATCH_SIZE` is a
229 // `LazyLock<usize>` populated from the env var at first access
230 // and frozen for the process lifetime. Setting the env var from
231 // Rust would require `std::env::set_var`, which is `unsafe`
232 // since Rust 1.80 (it races against concurrent reads in any
233 // other thread); per project policy, `unsafe` is banned.
234 //
235 // The adaptive controller still drives fan-out *inside* each
236 // batch — we re-read `controller.fetch.current()` in the
237 // `streaming_decrypt` callback. The upstream batch size only
238 // controls how many chunks `self_encryption` asks us for at a
239 // time (default 10). For larger batch sizes export
240 // `STREAM_DECRYPT_BATCH_SIZE` before launching the process.
241
242 (controller, persist_path)
243}
244
245/// Client for the Autonomi decentralized network.
246///
247/// Provides high-level APIs for storing and retrieving chunks
248/// and files on the network.
249pub struct Client {
250 config: ClientConfig,
251 network: Network,
252 wallet: Option<Arc<Wallet>>,
253 evm_network: Option<ant_protocol::evm::Network>,
254 chunk_cache: ChunkCache,
255 next_request_id: AtomicU64,
256 /// Adaptive concurrency controller: replaces the static
257 /// quote/store concurrency knobs. See `adaptive` module.
258 controller: AdaptiveController,
259 /// Path the controller persists its snapshot to. `None` disables
260 /// persistence (useful for tests / non-disk environments).
261 persist_path: Option<PathBuf>,
262}
263
264impl Client {
265 /// Create a client connected to the given P2P node.
266 #[must_use]
267 pub fn from_node(node: Arc<P2PNode>, config: ClientConfig) -> Self {
268 let network = Network::from_node(node);
269 let (controller, persist_path) = build_controller(&config);
270 Self {
271 config,
272 network,
273 wallet: None,
274 evm_network: None,
275 chunk_cache: ChunkCache::default(),
276 next_request_id: AtomicU64::new(1),
277 controller,
278 persist_path,
279 }
280 }
281
282 /// Create a client connected to bootstrap peers.
283 ///
284 /// Threads `config.allow_loopback` and `config.ipv6` through to
285 /// `Network::new`, which controls the saorsa-transport `local` and
286 /// `ipv6` flags on the underlying `CoreNodeConfig`. See
287 /// `ClientConfig::allow_loopback` and `ClientConfig::ipv6` for details.
288 ///
289 /// # Errors
290 ///
291 /// Returns an error if the P2P node cannot be created or bootstrapping fails.
292 pub async fn connect(
293 bootstrap_peers: &[std::net::SocketAddr],
294 config: ClientConfig,
295 ) -> Result<Self> {
296 debug!(
297 "Connecting to Autonomi network with {} bootstrap peers (allow_loopback={}, ipv6={})",
298 bootstrap_peers.len(),
299 config.allow_loopback,
300 config.ipv6,
301 );
302 let network = Network::new(bootstrap_peers, config.allow_loopback, config.ipv6).await?;
303 let (controller, persist_path) = build_controller(&config);
304 Ok(Self {
305 config,
306 network,
307 wallet: None,
308 evm_network: None,
309 chunk_cache: ChunkCache::default(),
310 next_request_id: AtomicU64::new(1),
311 controller,
312 persist_path,
313 })
314 }
315
316 /// Set the wallet for payment operations.
317 ///
318 /// Also populates the EVM network from the wallet so that
319 /// token approvals work without a separate `with_evm_network` call.
320 #[must_use]
321 pub fn with_wallet(mut self, wallet: Wallet) -> Self {
322 self.evm_network = Some(wallet.network().clone());
323 self.wallet = Some(Arc::new(wallet));
324 self
325 }
326
327 /// Set the EVM network without requiring a wallet.
328 ///
329 /// This enables token approval and contract interactions
330 /// for external-signer flows where the private key lives outside Rust.
331 #[must_use]
332 pub fn with_evm_network(mut self, network: ant_protocol::evm::Network) -> Self {
333 self.evm_network = Some(network);
334 self
335 }
336
337 /// Get the EVM network, falling back to the wallet's network if available.
338 ///
339 /// # Errors
340 ///
341 /// Returns an error if neither `with_evm_network` nor `with_wallet` was called.
342 pub(crate) fn require_evm_network(&self) -> Result<&ant_protocol::evm::Network> {
343 if let Some(ref net) = self.evm_network {
344 return Ok(net);
345 }
346 if let Some(ref wallet) = self.wallet {
347 return Ok(wallet.network());
348 }
349 Err(Error::Payment(
350 "EVM network not configured — call with_evm_network() or with_wallet() first"
351 .to_string(),
352 ))
353 }
354
355 /// Get the client configuration.
356 #[must_use]
357 pub fn config(&self) -> &ClientConfig {
358 &self.config
359 }
360
361 /// Get a mutable reference to the client configuration.
362 pub fn config_mut(&mut self) -> &mut ClientConfig {
363 &mut self.config
364 }
365
366 /// Get a reference to the network layer.
367 #[must_use]
368 pub fn network(&self) -> &Network {
369 &self.network
370 }
371
372 /// Get the wallet, if configured.
373 #[must_use]
374 pub fn wallet(&self) -> Option<&Arc<Wallet>> {
375 self.wallet.as_ref()
376 }
377
378 /// Get a reference to the chunk cache.
379 #[must_use]
380 pub fn chunk_cache(&self) -> &ChunkCache {
381 &self.chunk_cache
382 }
383
384 /// Adaptive concurrency controller. Hot loops read
385 /// `controller().<channel>.current()` to size their fan-out and
386 /// call `.observe(...)` on each completion.
387 #[must_use]
388 pub fn controller(&self) -> &AdaptiveController {
389 &self.controller
390 }
391
392 /// Persist the current adaptive snapshot to disk so the next
393 /// `Client::connect` warm-starts at the learned values instead of
394 /// cold defaults. Best effort — failures log and are discarded.
395 /// Idempotent. Safe to call from a Drop impl or an explicit
396 /// shutdown hook.
397 pub fn save_adaptive_snapshot(&self) {
398 if let Some(ref path) = self.persist_path {
399 adaptive::save_snapshot(path, self.controller.snapshot());
400 }
401 }
402
403 /// Get the next request ID for protocol messages.
404 pub(crate) fn next_request_id(&self) -> u64 {
405 self.next_request_id.fetch_add(1, Ordering::Relaxed)
406 }
407
408 /// Return all peers in the close group for a target address.
409 ///
410 /// Queries the DHT for the closest peers by XOR distance.
411 /// Returns each peer paired with its known network addresses.
412 pub(crate) async fn close_group_peers(
413 &self,
414 target: &XorName,
415 ) -> Result<Vec<(PeerId, Vec<MultiAddr>)>> {
416 let peers = self
417 .network()
418 .find_closest_peers(target, self.config().close_group_size)
419 .await?;
420
421 if peers.is_empty() {
422 return Err(Error::InsufficientPeers(
423 "DHT returned no peers for target address".to_string(),
424 ));
425 }
426 Ok(peers)
427 }
428}
429
430/// Persist the adaptive snapshot when the `Client` is dropped, so any
431/// caller — CLI, daemon, library user, integration test — gets
432/// warm-start carry-over for free without remembering to call
433/// `save_adaptive_snapshot()` explicitly. Best effort, sync `std::fs`,
434/// no panic risk on a poisoned mutex (the inner helper handles it).
435///
436/// We deliberately write SYNCHRONOUSLY (not via `spawn_blocking`)
437/// because Drop runs during process shutdown / runtime teardown,
438/// when fire-and-forget background tasks can be dropped before they
439/// complete and the snapshot is silently lost. A small synchronous
440/// stall on a tokio worker (typically <1ms for a local-disk JSON
441/// write of ~50 bytes) is the right tradeoff for guaranteed
442/// persistence — BOUNDED by `DROP_SAVE_TIMEOUT` so a stalled
443/// network-mounted data dir cannot block process shutdown.
444const DROP_SAVE_TIMEOUT: std::time::Duration = std::time::Duration::from_millis(500);
445
446impl Drop for Client {
447 fn drop(&mut self) {
448 let Some(path) = self.persist_path.clone() else {
449 return;
450 };
451 let snap = self.controller.snapshot();
452 adaptive::save_snapshot_with_timeout(path, snap, DROP_SAVE_TIMEOUT);
453 }
454}
455
456#[cfg(test)]
457#[allow(clippy::unwrap_used)]
458mod tests {
459 use super::*;
460
461 /// Cover EVERY variant of `data::error::Error`. Build an instance of
462 /// each, classify it, and assert the resulting `Outcome` matches the
463 /// only sensible mapping. If a future commit adds a new error variant
464 /// without updating `classify_error`, this test fails to ensure the
465 /// adaptive controller always sees correct capacity signals.
466 ///
467 /// Mapping policy (mirrors `classify_error` doc):
468 /// - `Timeout` -> `Outcome::Timeout`
469 /// - `Network`, `InsufficientPeers`, `Io`, `Protocol`, `Storage`,
470 /// `PartialUpload` -> `Outcome::NetworkError` (transport-related
471 /// or literal capacity failure)
472 /// - everything else -> `Outcome::ApplicationError` (would happen
473 /// on a perfectly healthy network)
474 #[test]
475 fn classify_error_covers_all_variants() {
476 let cases: Vec<(Error, Outcome)> = vec![
477 (Error::Timeout("t".to_string()), Outcome::Timeout),
478 (Error::Network("n".to_string()), Outcome::NetworkError),
479 (
480 Error::InsufficientPeers("p".to_string()),
481 Outcome::NetworkError,
482 ),
483 (Error::Storage("s".to_string()), Outcome::NetworkError),
484 (Error::Payment("p".to_string()), Outcome::ApplicationError),
485 (Error::Protocol("p".to_string()), Outcome::NetworkError),
486 (
487 Error::InvalidData("d".to_string()),
488 Outcome::ApplicationError,
489 ),
490 (
491 Error::Serialization("s".to_string()),
492 Outcome::ApplicationError,
493 ),
494 (Error::Crypto("c".to_string()), Outcome::ApplicationError),
495 (
496 Error::Io(std::io::Error::other("io")),
497 Outcome::NetworkError,
498 ),
499 (Error::Config("c".to_string()), Outcome::ApplicationError),
500 (
501 Error::SignatureVerification("s".to_string()),
502 Outcome::ApplicationError,
503 ),
504 (
505 Error::Encryption("e".to_string()),
506 Outcome::ApplicationError,
507 ),
508 (Error::AlreadyStored, Outcome::ApplicationError),
509 (
510 Error::InsufficientDiskSpace("d".to_string()),
511 Outcome::ApplicationError,
512 ),
513 (
514 Error::CostEstimationInconclusive("c".to_string()),
515 Outcome::ApplicationError,
516 ),
517 (
518 Error::PartialUpload {
519 stored: vec![],
520 stored_count: 0,
521 failed: vec![],
522 failed_count: 0,
523 total_chunks: 0,
524 reason: "r".to_string(),
525 },
526 Outcome::NetworkError,
527 ),
528 ];
529 for (err, expected) in &cases {
530 let got = classify_error(err);
531 assert_eq!(
532 got, *expected,
533 "classify_error({err:?}) = {got:?}, expected {expected:?}",
534 );
535 }
536 }
537
538 /// C4 fix guard: pinning the legacy `quote_concurrency` /
539 /// `store_concurrency` ClientConfig fields must clamp ONLY the
540 /// matching channel's max in the resulting controller. The fetch
541 /// (download) channel must keep its full default ceiling.
542 #[test]
543 fn legacy_concurrency_pin_does_not_bleed_across_channels() {
544 let cfg = ClientConfig {
545 quote_concurrency: 4,
546 store_concurrency: 2,
547 ..ClientConfig::default()
548 };
549 let (controller, _) = build_controller(&cfg);
550 // The store/quote caps must be clamped to the user's pin.
551 assert_eq!(controller.config.max.quote, 4, "quote pin not respected");
552 assert_eq!(controller.config.max.store, 2, "store pin not respected");
553 // The fetch cap must NOT have been lowered — that's the
554 // regression C4 was about.
555 let default_fetch_max = adaptive::ChannelMax::default().fetch;
556 assert_eq!(
557 controller.config.max.fetch, default_fetch_max,
558 "fetch cap was lowered by store/quote pin (C4 regression)"
559 );
560 // Cold-start values must respect the lowered ceilings.
561 assert!(
562 controller.quote.current() <= 4,
563 "quote start exceeds its cap"
564 );
565 assert!(
566 controller.store.current() <= 2,
567 "store start exceeds its cap"
568 );
569 }
570
571 /// Default ClientConfig must NOT silently lower the controller's
572 /// per-channel ceilings — the adaptive defaults give every channel
573 /// real headroom to grow. This guards against future commits
574 /// re-introducing a global clamp.
575 #[test]
576 fn default_client_config_does_not_clamp_controller_max() {
577 let cfg = ClientConfig::default();
578 let (controller, _) = build_controller(&cfg);
579 let defaults = adaptive::ChannelMax::default();
580 // The legacy fields default to 32/8 (the prior static knobs),
581 // both of which are <= the per-channel adaptive defaults
582 // (128/64). build_controller must keep the larger, not clobber
583 // with the legacy values.
584 assert_eq!(controller.config.max.quote, defaults.quote);
585 assert_eq!(controller.config.max.store, defaults.store);
586 assert_eq!(controller.config.max.fetch, defaults.fetch);
587 // Compile-time-ish guard: if a new variant is added to Error,
588 // this match forces an update here.
589 let _ = |e: &Error| match e {
590 Error::Timeout(_)
591 | Error::Network(_)
592 | Error::InsufficientPeers(_)
593 | Error::Storage(_)
594 | Error::Payment(_)
595 | Error::Protocol(_)
596 | Error::InvalidData(_)
597 | Error::Serialization(_)
598 | Error::Crypto(_)
599 | Error::Io(_)
600 | Error::Config(_)
601 | Error::SignatureVerification(_)
602 | Error::Encryption(_)
603 | Error::AlreadyStored
604 | Error::InsufficientDiskSpace(_)
605 | Error::CostEstimationInconclusive(_)
606 | Error::PartialUpload { .. } => (),
607 };
608 }
609}