ant_core/data/client/mod.rs
1//! Client operations for the Autonomi network.
2//!
3//! Provides high-level APIs for storing and retrieving data
4//! on the Autonomi decentralized network.
5
6pub mod adaptive;
7pub mod batch;
8pub mod cache;
9pub mod chunk;
10pub mod data;
11pub mod file;
12pub mod merkle;
13pub mod payment;
14pub(crate) mod peer_cache;
15pub mod quote;
16
17use crate::data::client::adaptive::{AdaptiveConfig, AdaptiveController, ChannelStart, Outcome};
18use crate::data::client::cache::ChunkCache;
19use crate::data::error::{Error, Result};
20use crate::data::network::Network;
21use ant_protocol::evm::Wallet;
22use ant_protocol::transport::{MultiAddr, P2PNode, PeerId};
23use ant_protocol::{XorName, CLOSE_GROUP_SIZE};
24use std::path::PathBuf;
25use std::sync::atomic::{AtomicU64, Ordering};
26use std::sync::Arc;
27use tracing::debug;
28
29/// Classify a `data::error::Error` into a controller `Outcome`.
30///
31/// Capacity signals (Timeout / NetworkError) drive the controller
32/// down; application errors do not. The mapping is conservative:
33/// anything that COULD be transport-related is treated as a network
34/// signal, because under-classifying a real network failure as
35/// "application error" makes the controller blind to genuine stress.
36///
37/// Mapping policy:
38/// - `Timeout` -> `Timeout` (per-op deadline elapsed)
39/// - `Network`, `InsufficientPeers`, `Io` -> `NetworkError` (transport
40/// layer reported failure)
41/// - `Protocol`, `Storage` -> `NetworkError` (these wrap remote errors
42/// that frequently include peer disconnects mid-stream — under
43/// network stress these are how transport failures surface)
44/// - `PartialUpload` -> `NetworkError` (literal capacity signal: some
45/// chunks could not be stored)
46/// - `AlreadyStored`, `Encryption`, `Crypto`, `Payment`,
47/// `Serialization`, `InvalidData`, `SignatureVerification`,
48/// `Config`, `InsufficientDiskSpace`, `CostEstimationInconclusive`
49/// -> `ApplicationError` (would happen on a perfectly healthy link)
50pub(crate) fn classify_error(err: &Error) -> Outcome {
51 match err {
52 Error::Timeout(_) => Outcome::Timeout,
53 Error::Network(_)
54 | Error::InsufficientPeers(_)
55 | Error::Io(_)
56 | Error::Protocol(_)
57 | Error::Storage(_)
58 | Error::PartialUpload { .. } => Outcome::NetworkError,
59 Error::AlreadyStored
60 | Error::Encryption(_)
61 | Error::Crypto(_)
62 | Error::Payment(_)
63 | Error::Serialization(_)
64 | Error::InvalidData(_)
65 | Error::SignatureVerification(_)
66 | Error::Config(_)
67 | Error::InsufficientDiskSpace(_)
68 | Error::CostEstimationInconclusive(_)
69 | Error::BadQuoteBinding { .. } => Outcome::ApplicationError,
70 }
71}
72
73/// Default timeout for lightweight network operations (quotes, DHT lookups) in seconds.
74const DEFAULT_QUOTE_TIMEOUT_SECS: u64 = 10;
75
76/// Default timeout for chunk store operations in seconds.
77///
78/// Chunk PUTs transfer multi-MB payloads to multiple peers. On residential
79/// connections with limited upload bandwidth, the default quote timeout (10 s)
80/// is far too short — a 4 MB chunk at 1 Mbps takes ~32 s just for the data
81/// transfer, before accounting for QUIC slow-start and NAT traversal overhead.
82const DEFAULT_STORE_TIMEOUT_SECS: u64 = 10;
83
84/// Default quote concurrency: high because quoting is pure network I/O
85/// (DHT lookups + small request/response messages) with no CPU-bound work.
86const DEFAULT_QUOTE_CONCURRENCY: usize = 32;
87
88/// Default store concurrency: moderate because each chunk PUT sends ~4MB
89/// to 7 close-group peers. At 8 concurrent stores, ~225MB of outbound
90/// traffic can be in flight. Users on fast connections can increase this
91/// with --store-concurrency; users on slow connections can decrease it.
92const DEFAULT_STORE_CONCURRENCY: usize = 8;
93
94/// Configuration for the Autonomi client.
95#[derive(Debug, Clone)]
96pub struct ClientConfig {
97 /// Per-op timeout for lightweight network operations (quotes,
98 /// DHT lookups), in seconds. The adaptive controller does NOT
99 /// currently size timeouts; this remains a static knob.
100 pub quote_timeout_secs: u64,
101 /// Per-op timeout for chunk store (PUT) operations, in seconds.
102 /// Should be larger than `quote_timeout_secs` because chunk PUTs
103 /// transfer multi-MB payloads. The adaptive controller does NOT
104 /// currently size timeouts; this remains a static knob.
105 pub store_timeout_secs: u64,
106 /// Number of closest peers to consider for routing.
107 pub close_group_size: usize,
108 /// **Deprecated.** Pre-adaptive ceiling for quote concurrency.
109 ///
110 /// The adaptive controller now sizes quote fan-out from observed
111 /// signals. This field, when non-zero and smaller than the
112 /// controller's per-channel default, clamps the **quote channel
113 /// only** (it does NOT bleed into store or fetch). Removed in a
114 /// future release.
115 pub quote_concurrency: usize,
116 /// **Deprecated.** Pre-adaptive ceiling for store concurrency.
117 ///
118 /// The adaptive controller now sizes store fan-out from observed
119 /// signals. This field, when non-zero and smaller than the
120 /// controller's per-channel default, clamps the **store channel
121 /// only** (it does NOT bleed into quote or fetch). Removed in a
122 /// future release.
123 pub store_concurrency: usize,
124 /// Adaptive controller configuration. Defaults are tuned to match
125 /// or exceed the prior static behavior — disabling adaptation
126 /// (`adaptive.enabled = false`) reverts to the controller's
127 /// `initial` values without re-evaluation.
128 pub adaptive: AdaptiveConfig,
129 /// Allow loopback (`127.0.0.1`) connections in the saorsa-transport
130 /// layer. Set to `true` only for devnet / local testing. Production
131 /// peers on the public Autonomi network reject the QUIC handshake
132 /// variant produced when this is `true`, so the default is `false`.
133 ///
134 /// This mirrors the `--allow-loopback` flag in `ant-cli`, which already
135 /// defaults to `false` and threads through to the same
136 /// `CoreNodeConfig::builder().local(...)` call.
137 pub allow_loopback: bool,
138 /// Bind a dual-stack IPv6 socket (`true`) or an IPv4-only socket
139 /// (`false`). Defaults to `true`, matching the CLI default.
140 ///
141 /// Set to `false` only when running on hosts without a working IPv6
142 /// stack, to avoid advertising unreachable v6 addresses to the DHT
143 /// (which causes slow connects and junk DHT address records). This
144 /// mirrors the `--ipv4-only` flag in `ant-cli`.
145 pub ipv6: bool,
146}
147
148impl Default for ClientConfig {
149 fn default() -> Self {
150 Self {
151 quote_timeout_secs: DEFAULT_QUOTE_TIMEOUT_SECS,
152 store_timeout_secs: DEFAULT_STORE_TIMEOUT_SECS,
153 close_group_size: CLOSE_GROUP_SIZE,
154 quote_concurrency: DEFAULT_QUOTE_CONCURRENCY,
155 store_concurrency: DEFAULT_STORE_CONCURRENCY,
156 adaptive: AdaptiveConfig::default(),
157 allow_loopback: false,
158 ipv6: true,
159 }
160 }
161}
162
163/// Build the adaptive controller for a `Client`. Loads any persisted
164/// snapshot, clamps cold-start values into the deprecated-flag bounds
165/// **per channel** (so a pin on `--store-concurrency` does NOT bleed
166/// into the fetch / quote channels), and returns the persistence path
167/// so callers can save back at shutdown.
168fn build_controller(config: &ClientConfig) -> (AdaptiveController, Option<PathBuf>) {
169 let mut adaptive_cfg = config.adaptive.clone();
170
171 // Per-channel ceilings: each legacy field is interpreted as a cap
172 // for ONLY its matching channel. The fetch channel has no
173 // pre-existing legacy field; it always uses the controller's
174 // default ceiling.
175 //
176 // The legacy fields are non-zero by ClientConfig::default(), but
177 // we honor them as bounds only when they would actually CONSTRAIN
178 // the controller — i.e. when smaller than the per-channel default
179 // max. A default ClientConfig must not silently lower the
180 // controller's ceilings.
181 // A value equal to the historic legacy default is treated as
182 // "not pinned by the user" — without this, every default
183 // ClientConfig would silently lower the controller's per-channel
184 // ceilings to the prior static values (32/8) and the controller
185 // could never grow above them.
186 let user_quote_max = config.quote_concurrency;
187 let user_store_max = config.store_concurrency;
188 let quote_pinned = user_quote_max > 0 && user_quote_max != DEFAULT_QUOTE_CONCURRENCY;
189 let store_pinned = user_store_max > 0 && user_store_max != DEFAULT_STORE_CONCURRENCY;
190 if quote_pinned && user_quote_max < adaptive_cfg.max.quote {
191 adaptive_cfg.max.quote = user_quote_max;
192 }
193 if store_pinned && user_store_max < adaptive_cfg.max.store {
194 adaptive_cfg.max.store = user_store_max;
195 }
196
197 // Cold-start values: matched to the prior static defaults. If the
198 // legacy field caps the channel below the cold-start, lower the
199 // start to match — never start above the channel's max.
200 let mut start = ChannelStart::default();
201 start.quote = start.quote.min(adaptive_cfg.max.quote);
202 start.store = start.store.min(adaptive_cfg.max.store);
203 start.fetch = start.fetch.min(adaptive_cfg.max.fetch);
204
205 let adaptive_enabled = adaptive_cfg.enabled;
206 let controller = AdaptiveController::new(start, adaptive_cfg);
207 // Skip disk warm-start entirely when adaptation is disabled —
208 // fixed-concurrency mode means the user wants exactly the cold
209 // start, no surprises from prior runs. (warm_start is also a
210 // no-op when disabled, but skipping the load avoids file I/O
211 // and the path-resolution side effects.)
212 let persist_path = if adaptive_enabled {
213 let p = adaptive::default_persist_path();
214 if let Some(ref path) = p {
215 if let Some(snap) = adaptive::load_snapshot(path) {
216 debug!(path = %path.display(), "adaptive: warm-start from disk");
217 controller.warm_start(snap);
218 }
219 }
220 p
221 } else {
222 // Even with adaptation off, persist_path is computed so
223 // explicit save_adaptive_snapshot() calls still work — but
224 // the controller currently never moves, so saving the cold
225 // start is harmless.
226 adaptive::default_persist_path()
227 };
228
229 // Note: self_encryption's `STREAM_DECRYPT_BATCH_SIZE` is a
230 // `LazyLock<usize>` populated from the env var at first access
231 // and frozen for the process lifetime. Setting the env var from
232 // Rust would require `std::env::set_var`, which is `unsafe`
233 // since Rust 1.80 (it races against concurrent reads in any
234 // other thread); per project policy, `unsafe` is banned.
235 //
236 // The adaptive controller still drives fan-out *inside* each
237 // batch — we re-read `controller.fetch.current()` in the
238 // `streaming_decrypt` callback. The upstream batch size only
239 // controls how many chunks `self_encryption` asks us for at a
240 // time (default 10). For larger batch sizes export
241 // `STREAM_DECRYPT_BATCH_SIZE` before launching the process.
242
243 (controller, persist_path)
244}
245
246/// Client for the Autonomi decentralized network.
247///
248/// Provides high-level APIs for storing and retrieving chunks
249/// and files on the network.
250pub struct Client {
251 config: ClientConfig,
252 network: Network,
253 wallet: Option<Arc<Wallet>>,
254 evm_network: Option<ant_protocol::evm::Network>,
255 chunk_cache: ChunkCache,
256 next_request_id: AtomicU64,
257 /// Adaptive concurrency controller: replaces the static
258 /// quote/store concurrency knobs. See `adaptive` module.
259 controller: AdaptiveController,
260 /// Path the controller persists its snapshot to. `None` disables
261 /// persistence (useful for tests / non-disk environments).
262 persist_path: Option<PathBuf>,
263}
264
265impl Client {
266 /// Create a client connected to the given P2P node.
267 #[must_use]
268 pub fn from_node(node: Arc<P2PNode>, config: ClientConfig) -> Self {
269 let network = Network::from_node(node);
270 let (controller, persist_path) = build_controller(&config);
271 Self {
272 config,
273 network,
274 wallet: None,
275 evm_network: None,
276 chunk_cache: ChunkCache::default(),
277 next_request_id: AtomicU64::new(1),
278 controller,
279 persist_path,
280 }
281 }
282
283 /// Create a client connected to bootstrap peers.
284 ///
285 /// Threads `config.allow_loopback` and `config.ipv6` through to
286 /// `Network::new`, which controls the saorsa-transport `local` and
287 /// `ipv6` flags on the underlying `CoreNodeConfig`. See
288 /// `ClientConfig::allow_loopback` and `ClientConfig::ipv6` for details.
289 ///
290 /// # Errors
291 ///
292 /// Returns an error if the P2P node cannot be created or bootstrapping fails.
293 pub async fn connect(
294 bootstrap_peers: &[std::net::SocketAddr],
295 config: ClientConfig,
296 ) -> Result<Self> {
297 debug!(
298 "Connecting to Autonomi network with {} bootstrap peers (allow_loopback={}, ipv6={})",
299 bootstrap_peers.len(),
300 config.allow_loopback,
301 config.ipv6,
302 );
303 let network = Network::new(bootstrap_peers, config.allow_loopback, config.ipv6).await?;
304 let (controller, persist_path) = build_controller(&config);
305 Ok(Self {
306 config,
307 network,
308 wallet: None,
309 evm_network: None,
310 chunk_cache: ChunkCache::default(),
311 next_request_id: AtomicU64::new(1),
312 controller,
313 persist_path,
314 })
315 }
316
317 /// Set the wallet for payment operations.
318 ///
319 /// Also populates the EVM network from the wallet so that
320 /// token approvals work without a separate `with_evm_network` call.
321 #[must_use]
322 pub fn with_wallet(mut self, wallet: Wallet) -> Self {
323 self.evm_network = Some(wallet.network().clone());
324 self.wallet = Some(Arc::new(wallet));
325 self
326 }
327
328 /// Set the EVM network without requiring a wallet.
329 ///
330 /// This enables token approval and contract interactions
331 /// for external-signer flows where the private key lives outside Rust.
332 #[must_use]
333 pub fn with_evm_network(mut self, network: ant_protocol::evm::Network) -> Self {
334 self.evm_network = Some(network);
335 self
336 }
337
338 /// Get the EVM network, falling back to the wallet's network if available.
339 ///
340 /// # Errors
341 ///
342 /// Returns an error if neither `with_evm_network` nor `with_wallet` was called.
343 pub(crate) fn require_evm_network(&self) -> Result<&ant_protocol::evm::Network> {
344 if let Some(ref net) = self.evm_network {
345 return Ok(net);
346 }
347 if let Some(ref wallet) = self.wallet {
348 return Ok(wallet.network());
349 }
350 Err(Error::Payment(
351 "EVM network not configured — call with_evm_network() or with_wallet() first"
352 .to_string(),
353 ))
354 }
355
356 /// Get the client configuration.
357 #[must_use]
358 pub fn config(&self) -> &ClientConfig {
359 &self.config
360 }
361
362 /// Get a mutable reference to the client configuration.
363 pub fn config_mut(&mut self) -> &mut ClientConfig {
364 &mut self.config
365 }
366
367 /// Get a reference to the network layer.
368 #[must_use]
369 pub fn network(&self) -> &Network {
370 &self.network
371 }
372
373 /// Get the wallet, if configured.
374 #[must_use]
375 pub fn wallet(&self) -> Option<&Arc<Wallet>> {
376 self.wallet.as_ref()
377 }
378
379 /// Get a reference to the chunk cache.
380 #[must_use]
381 pub fn chunk_cache(&self) -> &ChunkCache {
382 &self.chunk_cache
383 }
384
385 /// Adaptive concurrency controller. Hot loops read
386 /// `controller().<channel>.current()` to size their fan-out and
387 /// call `.observe(...)` on each completion.
388 #[must_use]
389 pub fn controller(&self) -> &AdaptiveController {
390 &self.controller
391 }
392
393 /// Persist the current adaptive snapshot to disk so the next
394 /// `Client::connect` warm-starts at the learned values instead of
395 /// cold defaults. Best effort — failures log and are discarded.
396 /// Idempotent. Safe to call from a Drop impl or an explicit
397 /// shutdown hook.
398 pub fn save_adaptive_snapshot(&self) {
399 if let Some(ref path) = self.persist_path {
400 adaptive::save_snapshot(path, self.controller.snapshot());
401 }
402 }
403
404 /// Get the next request ID for protocol messages.
405 pub(crate) fn next_request_id(&self) -> u64 {
406 self.next_request_id.fetch_add(1, Ordering::Relaxed)
407 }
408
409 /// Return all peers in the close group for a target address.
410 ///
411 /// Queries the DHT for the closest peers by XOR distance.
412 /// Returns each peer paired with its known network addresses.
413 pub(crate) async fn close_group_peers(
414 &self,
415 target: &XorName,
416 ) -> Result<Vec<(PeerId, Vec<MultiAddr>)>> {
417 let peers = self
418 .network()
419 .find_closest_peers(target, self.config().close_group_size)
420 .await?;
421
422 if peers.is_empty() {
423 return Err(Error::InsufficientPeers(
424 "DHT returned no peers for target address".to_string(),
425 ));
426 }
427 Ok(peers)
428 }
429}
430
431/// Persist the adaptive snapshot when the `Client` is dropped, so any
432/// caller — CLI, daemon, library user, integration test — gets
433/// warm-start carry-over for free without remembering to call
434/// `save_adaptive_snapshot()` explicitly. Best effort, sync `std::fs`,
435/// no panic risk on a poisoned mutex (the inner helper handles it).
436///
437/// We deliberately write SYNCHRONOUSLY (not via `spawn_blocking`)
438/// because Drop runs during process shutdown / runtime teardown,
439/// when fire-and-forget background tasks can be dropped before they
440/// complete and the snapshot is silently lost. A small synchronous
441/// stall on a tokio worker (typically <1ms for a local-disk JSON
442/// write of ~50 bytes) is the right tradeoff for guaranteed
443/// persistence — BOUNDED by `DROP_SAVE_TIMEOUT` so a stalled
444/// network-mounted data dir cannot block process shutdown.
445const DROP_SAVE_TIMEOUT: std::time::Duration = std::time::Duration::from_millis(500);
446
447impl Drop for Client {
448 fn drop(&mut self) {
449 let Some(path) = self.persist_path.clone() else {
450 return;
451 };
452 let snap = self.controller.snapshot();
453 adaptive::save_snapshot_with_timeout(path, snap, DROP_SAVE_TIMEOUT);
454 }
455}
456
457#[cfg(test)]
458#[allow(clippy::unwrap_used)]
459mod tests {
460 use super::*;
461
462 /// Cover EVERY variant of `data::error::Error`. Build an instance of
463 /// each, classify it, and assert the resulting `Outcome` matches the
464 /// only sensible mapping. If a future commit adds a new error variant
465 /// without updating `classify_error`, this test fails to ensure the
466 /// adaptive controller always sees correct capacity signals.
467 ///
468 /// Mapping policy (mirrors `classify_error` doc):
469 /// - `Timeout` -> `Outcome::Timeout`
470 /// - `Network`, `InsufficientPeers`, `Io`, `Protocol`, `Storage`,
471 /// `PartialUpload` -> `Outcome::NetworkError` (transport-related
472 /// or literal capacity failure)
473 /// - everything else -> `Outcome::ApplicationError` (would happen
474 /// on a perfectly healthy network)
475 #[test]
476 fn classify_error_covers_all_variants() {
477 let cases: Vec<(Error, Outcome)> = vec![
478 (Error::Timeout("t".to_string()), Outcome::Timeout),
479 (Error::Network("n".to_string()), Outcome::NetworkError),
480 (
481 Error::InsufficientPeers("p".to_string()),
482 Outcome::NetworkError,
483 ),
484 (Error::Storage("s".to_string()), Outcome::NetworkError),
485 (Error::Payment("p".to_string()), Outcome::ApplicationError),
486 (Error::Protocol("p".to_string()), Outcome::NetworkError),
487 (
488 Error::InvalidData("d".to_string()),
489 Outcome::ApplicationError,
490 ),
491 (
492 Error::Serialization("s".to_string()),
493 Outcome::ApplicationError,
494 ),
495 (Error::Crypto("c".to_string()), Outcome::ApplicationError),
496 (
497 Error::Io(std::io::Error::other("io")),
498 Outcome::NetworkError,
499 ),
500 (Error::Config("c".to_string()), Outcome::ApplicationError),
501 (
502 Error::SignatureVerification("s".to_string()),
503 Outcome::ApplicationError,
504 ),
505 (
506 Error::Encryption("e".to_string()),
507 Outcome::ApplicationError,
508 ),
509 (Error::AlreadyStored, Outcome::ApplicationError),
510 (
511 Error::InsufficientDiskSpace("d".to_string()),
512 Outcome::ApplicationError,
513 ),
514 (
515 Error::CostEstimationInconclusive("c".to_string()),
516 Outcome::ApplicationError,
517 ),
518 (
519 Error::PartialUpload {
520 stored: vec![],
521 stored_count: 0,
522 failed: vec![],
523 failed_count: 0,
524 total_chunks: 0,
525 reason: "r".to_string(),
526 },
527 Outcome::NetworkError,
528 ),
529 ];
530 for (err, expected) in &cases {
531 let got = classify_error(err);
532 assert_eq!(
533 got, *expected,
534 "classify_error({err:?}) = {got:?}, expected {expected:?}",
535 );
536 }
537 }
538
539 /// C4 fix guard: pinning the legacy `quote_concurrency` /
540 /// `store_concurrency` ClientConfig fields must clamp ONLY the
541 /// matching channel's max in the resulting controller. The fetch
542 /// (download) channel must keep its full default ceiling.
543 #[test]
544 fn legacy_concurrency_pin_does_not_bleed_across_channels() {
545 let cfg = ClientConfig {
546 quote_concurrency: 4,
547 store_concurrency: 2,
548 ..ClientConfig::default()
549 };
550 let (controller, _) = build_controller(&cfg);
551 // The store/quote caps must be clamped to the user's pin.
552 assert_eq!(controller.config.max.quote, 4, "quote pin not respected");
553 assert_eq!(controller.config.max.store, 2, "store pin not respected");
554 // The fetch cap must NOT have been lowered — that's the
555 // regression C4 was about.
556 let default_fetch_max = adaptive::ChannelMax::default().fetch;
557 assert_eq!(
558 controller.config.max.fetch, default_fetch_max,
559 "fetch cap was lowered by store/quote pin (C4 regression)"
560 );
561 // Cold-start values must respect the lowered ceilings.
562 assert!(
563 controller.quote.current() <= 4,
564 "quote start exceeds its cap"
565 );
566 assert!(
567 controller.store.current() <= 2,
568 "store start exceeds its cap"
569 );
570 }
571
572 /// Default ClientConfig must NOT silently lower the controller's
573 /// per-channel ceilings — the adaptive defaults give every channel
574 /// real headroom to grow. This guards against future commits
575 /// re-introducing a global clamp.
576 #[test]
577 fn default_client_config_does_not_clamp_controller_max() {
578 let cfg = ClientConfig::default();
579 let (controller, _) = build_controller(&cfg);
580 let defaults = adaptive::ChannelMax::default();
581 // The legacy fields default to 32/8 (the prior static knobs),
582 // both of which are <= the per-channel adaptive defaults
583 // (128/64). build_controller must keep the larger, not clobber
584 // with the legacy values.
585 assert_eq!(controller.config.max.quote, defaults.quote);
586 assert_eq!(controller.config.max.store, defaults.store);
587 assert_eq!(controller.config.max.fetch, defaults.fetch);
588 // Compile-time-ish guard: if a new variant is added to Error,
589 // this match forces an update here.
590 let _ = |e: &Error| match e {
591 Error::Timeout(_)
592 | Error::Network(_)
593 | Error::InsufficientPeers(_)
594 | Error::Storage(_)
595 | Error::Payment(_)
596 | Error::Protocol(_)
597 | Error::InvalidData(_)
598 | Error::Serialization(_)
599 | Error::Crypto(_)
600 | Error::Io(_)
601 | Error::Config(_)
602 | Error::SignatureVerification(_)
603 | Error::Encryption(_)
604 | Error::AlreadyStored
605 | Error::InsufficientDiskSpace(_)
606 | Error::CostEstimationInconclusive(_)
607 | Error::PartialUpload { .. }
608 | Error::BadQuoteBinding { .. } => (),
609 };
610 }
611}