spark_rust/wallet/handlers/
transfer.rs

1use crate::with_handler_lock;
2use crate::{
3    constants::spark::DEFAULT_TRANSFER_EXPIRY,
4    error::{SparkSdkError, ValidationError},
5    signer::traits::{derivation_path::SparkKeyType, SparkSigner},
6    wallet::{
7        internal_handlers::traits::leaves::LeavesInternalHandlers,
8        internal_handlers::traits::transfer::{LeafKeyTweak, TransferInternalHandlers},
9        leaf_manager::SparkNodeStatus,
10    },
11    SparkSdk,
12};
13use bitcoin::secp256k1::PublicKey;
14use spark_protos::spark::{
15    query_pending_transfers_request::Participant, QueryAllTransfersResponse,
16    QueryPendingTransfersRequest, Transfer, TransferStatus,
17};
18
19impl<S: SparkSigner + Send + Sync + Clone + 'static> SparkSdk<S> {
20    /// Queries all pending transfers where the current user is the receiver.
21    ///
22    /// This function retrieves all pending transfers that are waiting to be accepted by the current user.
23    /// A pending transfer represents funds that have been sent to the user but have not yet been claimed.
24    /// The transfers remain in a pending state until the receiver claims them, at which point the funds
25    /// become available in their wallet.
26    ///
27    /// # Returns
28    ///
29    /// * `Ok(Vec<Transfer>)` - A vector of pending [`Transfer`] objects if successful
30    /// * `Err(SparkSdkError)` - If there was an error querying the transfers
31    ///
32    /// # Example
33    ///
34    /// ```
35    /// # use spark_rust::SparkSdk;
36    /// # async fn example(sdk: SparkSdk) -> Result<(), Box<dyn std::error::Error>> {
37    /// let pending = sdk.query_pending_transfers().await?;
38    /// for transfer in pending {
39    ///     println!("Pending transfer: {:?} satoshis", transfer.total_value);
40    /// }
41    /// # Ok(())
42    /// # }
43    /// ```
44    #[cfg_attr(feature = "telemetry", tracing::instrument(skip_all))]
45    pub async fn query_pending_transfers(&self) -> Result<Vec<Transfer>, SparkSdkError> {
46        with_handler_lock!(self, async {
47            self.query_pending_transfers_internal().await
48        })
49        .await
50    }
51
52    #[cfg_attr(feature = "telemetry", tracing::instrument(skip_all))]
53    pub(crate) async fn query_pending_transfers_internal(
54        &self,
55    ) -> Result<Vec<Transfer>, SparkSdkError> {
56        let request_data = QueryPendingTransfersRequest {
57            transfer_ids: vec![],
58            participant: Some(Participant::ReceiverIdentityPublicKey(
59                self.get_spark_address()?.serialize().to_vec(),
60            )),
61            network: self.config.spark_config.network.marshal_proto(),
62        };
63
64        let response = self
65            .config
66            .spark_config
67            .call_with_retry(
68                request_data,
69                |mut client, req| {
70                    Box::pin(async move { client.query_pending_transfers(req).await })
71                },
72                None,
73            )
74            .await?;
75
76        Ok(response.transfers)
77    }
78
79    /// Initiates a transfer of funds to another user.
80    ///
81    /// This function handles the process of transferring funds from the current user's wallet to another user,
82    /// identified by their public key. The transfer process involves several steps:
83    ///
84    /// 1. Selecting appropriate leaves (UTXOs) that contain sufficient funds for the transfer
85    /// 2. Locking the selected leaves to prevent concurrent usage
86    /// 3. Generating new signing keys for the transfer
87    /// 4. Creating and signing the transfer transaction
88    /// 5. Removing the used leaves from the wallet
89    ///
90    /// The transfer remains in a pending state until the receiver claims it. The expiry time is set to
91    /// 30 days by default (see `DEFAULT_TRANSFER_EXPIRY`).
92    ///
93    /// # Arguments
94    ///
95    /// * `amount` - The amount to transfer in satoshis. Must be greater than the dust limit and the wallet
96    ///             must have a leaf with exactly this amount.
97    /// * `receiver_spark_address` - The Spark address identifying the receiver of the transfer. This should
98    ///                               be the receiver's Spark address, not a regular Bitcoin public key.
99    ///
100    /// # Returns
101    ///
102    /// * `Ok(String)` - The transfer ID if successful. This ID can be used to track the transfer status.
103    /// * `Err(SparkSdkError)` - If the transfer fails. Common error cases include:
104    ///   - No leaf with exact amount available
105    ///   - Failed to lock leaves
106    ///   - Failed to generate new signing keys
107    ///   - Network errors when communicating with Spark operators
108    ///
109    /// # Example
110    ///
111    /// ```
112    /// # use spark_rust::SparkSdk;
113    /// # use bitcoin::secp256k1::PublicKey;
114    /// # use std::str::FromStr;
115    /// # use uuid::Uuid;
116    /// # async fn example(sdk: &mut SparkSdk) -> Result<(), Box<dyn std::error::Error>> {
117    /// let amount = 100_000;
118    ///
119    /// // Currently, a user's Spark address is their public key.
120    /// let receiver_spark_address = PublicKey::from_str("02782d7ba8764306bd324e23082f785f7c880b7202cb10c85a2cb96496aedcaba7").unwrap();
121    ///
122    /// let transfer_id_string = sdk.transfer(amount, &receiver_spark_address).await?;
123    /// let transfer_id = Uuid::parse_str(&transfer_id_string).unwrap();
124    /// println!("Transfer ID is {}", transfer_id);
125    /// # Ok(())
126    /// # }
127    /// ```
128    ///
129    /// # Notes
130    ///
131    /// Currently, the leaf selection algorithm only supports selecting a single leaf with the exact
132    /// transfer amount. Future versions will support combining multiple leaves and handling change outputs.
133    #[cfg_attr(feature = "telemetry", tracing::instrument(skip_all))]
134    pub async fn transfer(
135        &self,
136        amount: u64,
137        receiver_spark_address: &PublicKey,
138    ) -> Result<String, SparkSdkError> {
139        with_handler_lock!(self, async {
140            let expiry_time = chrono::Utc::now().timestamp() as u64 + DEFAULT_TRANSFER_EXPIRY;
141
142            self.refresh_timelock_nodes(None).await?;
143
144            // do leaf selection
145            // if not sufficient leaves are found, a swap with the SSP will be requested
146            let leaf_selection_response = self.prepare_leaves_for_amount(amount).await?;
147            let unlocking_id = leaf_selection_response.unlocking_id.unwrap();
148
149            // TODO: expect that at this point, leaf_selection_response.total_value == amount, because a swap should happen between the SSP and the wallet.
150            let selected_leaves = leaf_selection_response.leaves;
151            let leaf_ids = selected_leaves
152                .iter()
153                .map(|leaf| leaf.get_id().clone())
154                .collect::<Vec<String>>();
155
156            let mut leaves_to_transfer = Vec::new();
157            for leaf in selected_leaves {
158                // get new
159                let new_signing_public_key = self.signer.new_ephemeral_keypair()?;
160
161                // get old
162                let old_signing_private_key = self.signer.expose_leaf_secret_key_for_transfer(
163                    leaf.get_id().clone(),
164                    SparkKeyType::BaseSigning,
165                    0,
166                    self.config.spark_config.network.to_bitcoin_network(),
167                )?;
168
169                leaves_to_transfer.push(LeafKeyTweak {
170                    leaf: leaf.get_tree_node()?,
171                    old_signing_private_key,
172                    new_signing_public_key,
173                });
174            }
175
176            // TODO - when add actual leaf selection, this might be an array of length > 1.
177            let transfer = self
178                .start_send_transfer(&leaves_to_transfer, receiver_spark_address, expiry_time)
179                .await?;
180
181            // unlock and remove leaves from the leaf manager
182            self.leaf_manager
183                .unlock_leaves(unlocking_id.clone(), &leaf_ids, true)?;
184
185            Ok(transfer.id)
186        })
187        .await
188    }
189
190    #[cfg_attr(feature = "telemetry", tracing::instrument(skip_all))]
191    pub async fn transfer_leaf_ids(
192        &self,
193        leaf_ids: Vec<String>,
194        receiver_identity_pubkey: &PublicKey,
195    ) -> Result<String, SparkSdkError> {
196        with_handler_lock!(self, async {
197            self.transfer_leaf_ids_internal(leaf_ids, receiver_identity_pubkey)
198                .await
199        })
200        .await
201    }
202
203    #[cfg_attr(feature = "telemetry", tracing::instrument(skip_all))]
204    pub(crate) async fn transfer_leaf_ids_internal(
205        &self,
206        leaf_ids: Vec<String>,
207        receiver_identity_pubkey: &PublicKey,
208    ) -> Result<String, SparkSdkError> {
209        let expiry_time = chrono::Utc::now().timestamp() as u64 + DEFAULT_TRANSFER_EXPIRY;
210
211        let leaf_selection_response = self
212            .leaf_manager
213            .lock_leaf_ids(&leaf_ids, SparkNodeStatus::Transfer)?;
214
215        let unlocking_id = leaf_selection_response.unlocking_id.unwrap();
216
217        let selected_leaves = leaf_selection_response.leaves;
218
219        let mut leaves_to_transfer = Vec::new();
220        for leaf in selected_leaves {
221            // get new
222            let new_signing_public_key = self.signer.new_ephemeral_keypair()?;
223
224            let network = self.config.spark_config.network.to_bitcoin_network();
225            let old_signing_pubkey = self.signer.get_deposit_signing_key(network)?;
226            let old_signing_private_key = self
227                .signer
228                .sensitive_expose_secret_key_from_pubkey(&old_signing_pubkey, false)?;
229
230            leaves_to_transfer.push(LeafKeyTweak {
231                leaf: leaf.get_tree_node()?,
232                old_signing_private_key,
233                new_signing_public_key,
234            });
235        }
236
237        // TODO - when add actual leaf selection, this might be an array of length > 1.
238        let transfer = self
239            .start_send_transfer(&leaves_to_transfer, receiver_identity_pubkey, expiry_time)
240            .await?;
241
242        // unlock and remove leaves from the leaf manager
243        self.leaf_manager
244            .unlock_leaves(unlocking_id.clone(), &leaf_ids, true)?;
245
246        Ok(transfer.id)
247    }
248
249    /// Claims a pending transfer that was sent to this wallet.
250    ///
251    /// This function processes a pending transfer and claims the funds into the wallet. It performs the following steps:
252    /// 1. Verifies the transfer is in the correct state (SenderKeyTweaked)
253    /// 2. Verifies and decrypts the leaf private keys using the wallet's identity key
254    /// 3. Generates new signing keys for the claimed leaves
255    /// 4. Finalizes the transfer by:
256    ///    - Tweaking the leaf keys
257    ///    - Signing refund transactions
258    ///    - Submitting the signatures to the Spark network
259    ///    - Storing the claimed leaves in the wallet's database
260    ///
261    /// # Arguments
262    ///
263    /// * `transfer` - The pending transfer to claim, must be in SenderKeyTweaked status
264    ///
265    /// # Returns
266    ///
267    /// * `Ok(())` - If the transfer was successfully claimed
268    /// * `Err(SparkSdkError)` - If there was an error during the claim process
269    ///
270    /// # Errors
271    ///
272    /// Returns [`SparkSdkError::InvalidInput`] if:
273    /// - The transfer is not in SenderKeyTweaked status
274    ///
275    /// May also return other `SparkSdkError` variants for network, signing or storage errors.
276    ///
277    /// # Example
278    ///
279    /// ```
280    /// # use spark_rust::{SparkSdk, SparkNetwork, signer::default_signer::DefaultSigner, signer::traits::SparkSigner};
281    ///
282    /// async fn example() {
283    ///     let mnemonic = "abandon ability able about above absent absorb abstract absurd abuse access accident";
284    ///     let network = SparkNetwork::Regtest;
285    ///     let signer = DefaultSigner::from_mnemonic(mnemonic, network.clone()).await.unwrap();
286    ///     let sdk = SparkSdk::new(network, signer).await.unwrap();
287    ///     let pending = sdk.query_pending_transfers().await.unwrap();
288    ///     for transfer in pending {
289    ///         sdk.claim_transfer(transfer).await.unwrap();
290    ///     }
291    /// }
292    /// ```
293    #[cfg_attr(feature = "telemetry", tracing::instrument(skip_all))]
294    pub async fn claim_transfer(&self, transfer: Transfer) -> Result<(), SparkSdkError> {
295        with_handler_lock!(self, async { self.claim_transfer_internal(transfer).await }).await
296    }
297
298    #[cfg_attr(feature = "telemetry", tracing::instrument(skip_all))]
299    async fn claim_transfer_internal(&self, transfer: Transfer) -> Result<(), SparkSdkError> {
300        // Validate the request
301        for transfer_leaf in &transfer.leaves {
302            if transfer_leaf.leaf.is_none() {
303                return Err(SparkSdkError::from(ValidationError::InvalidInput {
304                    field: "Transfer leaf is not found".to_string(),
305                }));
306            }
307        }
308
309        if transfer.status != TransferStatus::SenderKeyTweaked as i32 {
310            return Err(SparkSdkError::from(ValidationError::InvalidInput {
311                field: "Transfer is not in the correct status".to_string(),
312            }));
313        }
314
315        let mut leaves_to_claim = Vec::new();
316        for leaf in &transfer.leaves {
317            let leaf_private_key_map = self.verify_pending_transfer(&transfer).await?;
318
319            let leaf_id = leaf.leaf.as_ref().unwrap().id.clone();
320            let new_pubkey = self.signer.new_secp256k1_keypair(
321                leaf_id.clone(),
322                SparkKeyType::BaseSigning,
323                0,
324                self.config.spark_config.network.to_bitcoin_network(),
325            )?;
326
327            self.signer
328                .insert_secp256k1_keypair_from_secret_key(&leaf_private_key_map[&leaf_id])
329                .unwrap();
330
331            let claim_node = LeafKeyTweak {
332                leaf: leaf.leaf.as_ref().unwrap().clone(),
333                old_signing_private_key: leaf_private_key_map[&leaf_id],
334                new_signing_public_key: new_pubkey,
335            };
336
337            leaves_to_claim.push(claim_node);
338        }
339
340        self.claim_finalize_incoming_transfer(&transfer, &leaves_to_claim)
341            .await?;
342
343        // refresh timelock nodes
344        self.refresh_timelock_nodes(None).await?;
345
346        Ok(())
347    }
348
349    #[cfg_attr(feature = "telemetry", tracing::instrument(skip_all))]
350    pub async fn claim_transfers(&self) -> Result<(), SparkSdkError> {
351        with_handler_lock!(self, async { self.claim_transfers_internal().await }).await
352    }
353
354    #[cfg_attr(feature = "telemetry", tracing::instrument(skip_all))]
355    pub async fn claim_transfers_internal(&self) -> Result<(), SparkSdkError> {
356        let pending = self.query_pending_transfers_internal().await?;
357
358        let pending_len = pending.len();
359        let claim_futures = pending
360            .into_iter()
361            .map(|transfer| self.claim_transfer_internal(transfer.clone()));
362        futures::future::try_join_all(claim_futures).await?;
363
364        #[cfg(feature = "telemetry")]
365        tracing::debug!("Claimed {:?} pending transfers", pending_len);
366
367        Ok(())
368    }
369
370    #[cfg_attr(feature = "telemetry", tracing::instrument(skip_all))]
371    pub async fn get_all_transfers(
372        &self,
373        limit: Option<u32>,
374        offset: Option<u32>,
375    ) -> Result<QueryAllTransfersResponse, SparkSdkError> {
376        let limit = limit.unwrap_or(20);
377        let offset = offset.unwrap_or(0);
378
379        let response = self.query_all_transfers(limit, offset).await?;
380        Ok(response)
381    }
382}