ant-core 0.2.3

Headless Rust library for the Autonomi network: data storage and retrieval with self-encryption and EVM payments, plus node lifecycle management.
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
//! In-memory data operations using self-encryption.
//!
//! Upload and download raw byte data. Content is encrypted via
//! convergent encryption and stored as content-addressed chunks.
//! Use this when you already have data in memory (e.g., `Bytes`).
//! For file-based streaming uploads that avoid loading the entire
//! file into memory, see the `file` module.

use crate::data::client::adaptive::{observe_op, rebucketed_ordered};
use crate::data::client::batch::{PaymentIntent, PreparedChunk};
use crate::data::client::classify_error;
use crate::data::client::file::{ExternalPaymentInfo, PreparedUpload, Visibility};
use crate::data::client::merkle::PaymentMode;
use crate::data::client::Client;
use crate::data::error::{Error, Result};
use ant_protocol::{compute_address, DATA_TYPE_CHUNK};
use bytes::Bytes;
use futures::stream::StreamExt;
use self_encryption::{decrypt, encrypt, DataMap, EncryptedChunk};
use tracing::{debug, info};

/// Result of an in-memory data upload: the `DataMap` needed to retrieve the data.
#[derive(Debug, Clone)]
pub struct DataUploadResult {
    /// The data map containing chunk metadata for reconstruction.
    pub data_map: DataMap,
    /// Number of chunks stored on the network.
    pub chunks_stored: usize,
    /// Which payment mode was actually used (not just requested).
    pub payment_mode_used: PaymentMode,
}

impl Client {
    /// Upload in-memory data to the network using self-encryption.
    ///
    /// The content is encrypted and split into chunks, each stored
    /// as a content-addressed chunk on the network. Returns a `DataMap`
    /// that can be used to retrieve and decrypt the data.
    ///
    /// # Errors
    ///
    /// Returns an error if encryption fails or any chunk cannot be stored.
    pub async fn data_upload(&self, content: Bytes) -> Result<DataUploadResult> {
        let content_len = content.len();
        debug!("Encrypting data ({content_len} bytes)");

        let (data_map, encrypted_chunks) = encrypt(content)
            .map_err(|e| Error::Encryption(format!("Failed to encrypt data: {e}")))?;

        info!("Data encrypted into {} chunks", encrypted_chunks.len());

        let chunk_contents: Vec<Bytes> = encrypted_chunks
            .into_iter()
            .map(|chunk| chunk.content)
            .collect();

        let (addresses, _storage_cost, _gas_cost) =
            self.batch_upload_chunks(chunk_contents).await?;
        let chunks_stored = addresses.len();

        info!("Data uploaded: {chunks_stored} chunks stored ({content_len} bytes original)");

        Ok(DataUploadResult {
            data_map,
            chunks_stored,
            payment_mode_used: PaymentMode::Single,
        })
    }

    /// Upload in-memory data with a specific payment mode.
    ///
    /// When `mode` is `Auto` and the chunk count >= threshold, or when `mode`
    /// is `Merkle`, this buffers all chunks and pays via a single merkle
    /// batch transaction. Otherwise falls back to per-chunk payment.
    ///
    /// # Errors
    ///
    /// Returns an error if encryption fails or any chunk cannot be stored.
    pub async fn data_upload_with_mode(
        &self,
        content: Bytes,
        mode: PaymentMode,
    ) -> Result<DataUploadResult> {
        let content_len = content.len();
        debug!("Encrypting data ({content_len} bytes) with mode {mode:?}");

        let (data_map, encrypted_chunks) = encrypt(content)
            .map_err(|e| Error::Encryption(format!("Failed to encrypt data: {e}")))?;

        let chunk_count = encrypted_chunks.len();
        info!("Data encrypted into {chunk_count} chunks");

        let chunk_contents: Vec<Bytes> = encrypted_chunks
            .into_iter()
            .map(|chunk| chunk.content)
            .collect();

        if self.should_use_merkle(chunk_count, mode) {
            // Merkle batch payment path
            info!("Using merkle batch payment for {chunk_count} chunks");

            let addresses: Vec<[u8; 32]> =
                chunk_contents.iter().map(|c| compute_address(c)).collect();

            // Compute average chunk size for quoting
            let avg_size =
                chunk_contents.iter().map(bytes::Bytes::len).sum::<usize>() / chunk_count.max(1);
            let avg_size_u64 = u64::try_from(avg_size).unwrap_or(0);

            // Try merkle batch; in Auto mode, fall back to per-chunk on network issues
            let batch_result = match self
                .pay_for_merkle_batch(&addresses, DATA_TYPE_CHUNK, avg_size_u64)
                .await
            {
                Ok(result) => result,
                Err(Error::InsufficientPeers(ref msg)) if mode == PaymentMode::Auto => {
                    info!("Merkle needs more peers ({msg}), falling back to wave-batch");
                    let (addresses, _sc, _gc) = self.batch_upload_chunks(chunk_contents).await?;
                    return Ok(DataUploadResult {
                        data_map,
                        chunks_stored: addresses.len(),
                        payment_mode_used: PaymentMode::Single,
                    });
                }
                Err(e) => return Err(e),
            };

            let chunks_stored = self
                .merkle_upload_chunks(chunk_contents, addresses, &batch_result, None)
                .await?;

            info!("Data uploaded via merkle: {chunks_stored} chunks stored ({content_len} bytes)");
            Ok(DataUploadResult {
                data_map,
                chunks_stored,
                payment_mode_used: PaymentMode::Merkle,
            })
        } else {
            // Wave-based batch payment path (single EVM tx per wave).
            let (addresses, _sc, _gc) = self.batch_upload_chunks(chunk_contents).await?;

            info!(
                "Data uploaded: {} chunks stored ({content_len} bytes original)",
                addresses.len()
            );
            Ok(DataUploadResult {
                data_map,
                chunks_stored: addresses.len(),
                payment_mode_used: PaymentMode::Single,
            })
        }
    }

    /// Phase 1 of external-signer data upload: encrypt and collect quotes.
    ///
    /// Equivalent to [`Client::data_prepare_upload_with_visibility`] with
    /// [`Visibility::Private`] — see that method for details.
    pub async fn data_prepare_upload(&self, content: Bytes) -> Result<PreparedUpload> {
        self.data_prepare_upload_with_visibility(content, Visibility::Private)
            .await
    }

    /// Phase 1 of external-signer data upload with explicit [`Visibility`] control.
    ///
    /// Encrypts in-memory data via self-encryption, then collects storage
    /// quotes for each chunk without making any on-chain payment. Returns
    /// a [`PreparedUpload`] containing the data map and a [`PaymentIntent`]
    /// with the payment details for external signing.
    ///
    /// When `visibility` is [`Visibility::Public`], the serialized `DataMap`
    /// is bundled into the payment batch as an additional chunk and its
    /// address is recorded on the returned [`PreparedUpload`]. After
    /// [`Client::finalize_upload`] succeeds, that address is surfaced via
    /// [`crate::data::client::file::FileUploadResult::data_map_address`] so
    /// the uploader can share a single address from which anyone can retrieve
    /// the data.
    ///
    /// Wave-batch payment only — the in-memory data path does not currently
    /// support merkle batching. Use [`Client::file_prepare_upload_with_visibility`]
    /// for merkle-eligible public uploads.
    ///
    /// After the caller signs and submits the payment transaction, call
    /// [`Client::finalize_upload`] with the tx hashes to complete storage.
    ///
    /// # Errors
    ///
    /// Returns an error if encryption fails, DataMap serialization fails
    /// (public only), or quote collection fails.
    pub async fn data_prepare_upload_with_visibility(
        &self,
        content: Bytes,
        visibility: Visibility,
    ) -> Result<PreparedUpload> {
        let content_len = content.len();
        debug!("Preparing data upload for external signing (visibility={visibility:?}, {content_len} bytes)");

        let (data_map, encrypted_chunks) = encrypt(content)
            .map_err(|e| Error::Encryption(format!("Failed to encrypt data: {e}")))?;

        let mut chunk_contents: Vec<Bytes> = encrypted_chunks
            .into_iter()
            .map(|chunk| chunk.content)
            .collect();

        info!("Data encrypted into {} chunks", chunk_contents.len());

        // For public uploads, bundle the serialized DataMap as an extra chunk
        // in the same payment batch. This lets the external signer pay for
        // the data chunks and the DataMap chunk in one flow, and lets the
        // finalize step return the DataMap's chunk address as the shareable
        // retrieval address.
        let data_map_address = match visibility {
            Visibility::Private => None,
            Visibility::Public => {
                let serialized = rmp_serde::to_vec(&data_map).map_err(|e| {
                    Error::Serialization(format!("Failed to serialize DataMap: {e}"))
                })?;
                let bytes = Bytes::from(serialized);
                let address = compute_address(&bytes);
                info!(
                    "Public upload: bundling DataMap chunk ({} bytes) at address {}",
                    bytes.len(),
                    hex::encode(address)
                );
                chunk_contents.push(bytes);
                Some(address)
            }
        };

        let chunk_count = chunk_contents.len();

        let quote_limiter = self.controller().quote.clone();
        let quote_concurrency = quote_limiter.current().min(chunk_count.max(1));
        let results: Vec<Result<Option<PreparedChunk>>> = futures::stream::iter(chunk_contents)
            .map(|content| {
                let limiter = quote_limiter.clone();
                async move {
                    observe_op(
                        &limiter,
                        || async move { self.prepare_chunk_payment(content).await },
                        classify_error,
                    )
                    .await
                }
            })
            .buffer_unordered(quote_concurrency)
            .collect()
            .await;

        let mut prepared_chunks = Vec::with_capacity(results.len());
        for result in results {
            if let Some(prepared) = result? {
                prepared_chunks.push(prepared);
            }
        }

        let payment_intent = PaymentIntent::from_prepared_chunks(&prepared_chunks);

        info!(
            "Data prepared for external signing: {} chunks, total {} atto ({content_len} bytes)",
            prepared_chunks.len(),
            payment_intent.total_amount,
        );

        Ok(PreparedUpload {
            data_map,
            payment_info: ExternalPaymentInfo::WaveBatch {
                prepared_chunks,
                payment_intent,
            },
            data_map_address,
        })
    }

    /// Store a `DataMap` on the network as a public chunk.
    ///
    /// The serialized `DataMap` is stored as a regular content-addressed chunk.
    /// Anyone who knows the returned address can retrieve and use the `DataMap`
    /// to download the original data.
    ///
    /// # Errors
    ///
    /// Returns an error if serialization or the chunk store fails.
    pub async fn data_map_store(&self, data_map: &DataMap) -> Result<[u8; 32]> {
        let serialized = rmp_serde::to_vec(data_map)
            .map_err(|e| Error::Serialization(format!("Failed to serialize DataMap: {e}")))?;

        info!(
            "Storing DataMap as public chunk ({} bytes serialized)",
            serialized.len()
        );

        self.chunk_put(Bytes::from(serialized)).await
    }

    /// Fetch a `DataMap` from the network by its chunk address.
    ///
    /// Retrieves the chunk at `address` and deserializes it as a `DataMap`.
    ///
    /// # Errors
    ///
    /// Returns an error if the chunk is not found or deserialization fails.
    pub async fn data_map_fetch(&self, address: &[u8; 32]) -> Result<DataMap> {
        let chunk = self.chunk_get(address).await?.ok_or_else(|| {
            Error::InvalidData(format!(
                "DataMap chunk not found at {}",
                hex::encode(address)
            ))
        })?;

        rmp_serde::from_slice(&chunk.content)
            .map_err(|e| Error::Serialization(format!("Failed to deserialize DataMap: {e}")))
    }

    /// Download and decrypt data from the network using its `DataMap`.
    ///
    /// Retrieves all chunks referenced by the data map, then decrypts
    /// and reassembles the original content. Fetches chunks concurrently;
    /// the fan-out is sized by the adaptive controller's `fetch` channel
    /// and ramps up under healthy conditions.
    ///
    /// # Errors
    ///
    /// Returns an error if any chunk cannot be retrieved or decryption fails.
    pub async fn data_download(&self, data_map: &DataMap) -> Result<Bytes> {
        let chunk_infos = data_map.infos();
        debug!("Downloading data ({} chunks)", chunk_infos.len());

        // Extract owned addresses to avoid HRTB lifetime issue with
        // stream::iter over references combined with async closures.
        let addresses: Vec<[u8; 32]> = chunk_infos.iter().map(|info| info.dst_hash.0).collect();

        // Rolling rebucketing: re-reads the controller's fetch cap as
        // each slot frees, so a long download (e.g. 10 GB = ~2500
        // chunks) sees adaptive growth/decay mid-flight without batch
        // fences. Output is index-sorted so self_encryption decrypt
        // sees DataMap-ordered chunks.
        let fetch_limiter = self.controller().fetch.clone();
        let encrypted_chunks: Vec<EncryptedChunk> = rebucketed_ordered(
            &fetch_limiter,
            addresses.into_iter().enumerate(),
            |(idx, address)| {
                let limiter = fetch_limiter.clone();
                async move {
                    let chunk = observe_op(
                        &limiter,
                        || async move { self.chunk_get(&address).await },
                        classify_error,
                    )
                    .await?
                    .ok_or_else(|| {
                        Error::InvalidData(format!(
                            "Missing chunk {} required for data reconstruction",
                            hex::encode(address)
                        ))
                    })?;
                    Ok::<_, Error>((
                        idx,
                        EncryptedChunk {
                            content: chunk.content,
                        },
                    ))
                }
            },
        )
        .await?;

        debug!(
            "All {} chunks retrieved, decrypting",
            encrypted_chunks.len()
        );

        let content = decrypt(data_map, &encrypted_chunks)
            .map_err(|e| Error::Encryption(format!("Failed to decrypt data: {e}")))?;

        info!("Data downloaded and decrypted ({} bytes)", content.len());

        Ok(content)
    }
}

/// Compile-time assertions that Client method futures are Send.
///
/// These methods are called from axum handlers and tokio::spawn contexts
/// that require Send + 'static. The async closures inside stream
/// combinators must not capture references with concrete lifetimes
/// (HRTB issue). If any of these checks fail, the stream closures
/// need restructuring to use owned values instead of references.
#[cfg(test)]
mod send_assertions {
    use super::*;

    fn _assert_send<T: Send>(_: &T) {}

    #[allow(
        dead_code,
        unreachable_code,
        unused_variables,
        clippy::diverging_sub_expression
    )]
    async fn _data_download_is_send(client: &Client) {
        let dm: DataMap = todo!();
        let fut = client.data_download(&dm);
        _assert_send(&fut);
    }

    #[allow(dead_code, unreachable_code, clippy::diverging_sub_expression)]
    async fn _data_upload_is_send(client: &Client) {
        let fut = client.data_upload(Bytes::new());
        _assert_send(&fut);
    }

    #[allow(dead_code, unreachable_code, clippy::diverging_sub_expression)]
    async fn _data_upload_with_mode_is_send(client: &Client) {
        let fut = client.data_upload_with_mode(Bytes::new(), PaymentMode::Auto);
        _assert_send(&fut);
    }

    #[allow(dead_code, unreachable_code, clippy::diverging_sub_expression)]
    async fn _data_prepare_upload_is_send(client: &Client) {
        let fut = client.data_prepare_upload(Bytes::new());
        _assert_send(&fut);
    }

    #[allow(dead_code, unreachable_code, clippy::diverging_sub_expression)]
    async fn _data_prepare_upload_with_visibility_is_send(client: &Client) {
        let fut = client.data_prepare_upload_with_visibility(Bytes::new(), Visibility::Public);
        _assert_send(&fut);
    }
}