spark_rust/wallet/internal_handlers/implementations/
timelock.rs

1use std::collections::HashMap;
2
3use bitcoin::secp256k1::PublicKey;
4use bitcoin::{Sequence, Transaction};
5use frost_secp256k1_tr_unofficial::round1::SigningCommitments;
6use spark_protos::common::SignatureIntent;
7use spark_protos::frost::{AggregateFrostRequest, FrostSigningJob};
8use spark_protos::spark::{
9    FinalizeNodeSignaturesRequest, NodeSignatures, RefreshTimelockRequest, SigningJob, TreeNode,
10};
11use tonic::{async_trait, Request};
12
13use crate::constants::spark::TIME_LOCK_INTERVAL;
14use crate::error::{network::NetworkError, validation::ValidationError, SparkSdkError};
15use crate::signer::default_signer::{
16    create_user_key_package, marshal_frost_commitments, marshal_frost_nonces,
17};
18use crate::signer::traits::SparkSigner;
19use crate::wallet::internal_handlers::traits::timelock::TimelockInternalHandlers;
20use crate::wallet::utils::bitcoin::{
21    bitcoin_tx_from_bytes, serialize_bitcoin_transaction, sighash_from_tx,
22};
23use crate::wallet::utils::frost::frost_commitment_to_proto_commitment;
24use crate::wallet::utils::sequence::{initial_sequence, next_sequence};
25use crate::SparkSdk;
26
27#[async_trait]
28impl<S: SparkSigner + Send + Sync> TimelockInternalHandlers for SparkSdk<S> {
29    #[cfg_attr(feature = "telemetry", tracing::instrument(skip_all))]
30    async fn refresh_timelock_refund_tx(
31        &self,
32        leaf: &TreeNode,
33        signing_public_key: &PublicKey,
34    ) -> Result<(), SparkSdkError> {
35        // create and encode the new refund tx
36        let mut new_refund_tx = bitcoin_tx_from_bytes(&leaf.refund_tx)?;
37        let curr_sequence = new_refund_tx.input[0].sequence;
38        let next_sequence = next_sequence(curr_sequence.0);
39        new_refund_tx.input[0].sequence = Sequence(next_sequence);
40        let new_refund_tx_buf = serialize_bitcoin_transaction(&new_refund_tx)?;
41
42        // prepare the signing job
43        let commitment = self.signer.new_frost_signing_noncepair()?;
44        let commitment_bytes = commitment.serialize().unwrap();
45        let commitment_proto = frost_commitment_to_proto_commitment(&commitment)?;
46        let signing_jobs = vec![SigningJob {
47            signing_public_key: signing_public_key.serialize().to_vec(),
48            raw_tx: new_refund_tx_buf,
49            signing_nonce_commitment: Some(commitment_proto),
50        }];
51
52        // get the nonce, save the signing data
53        let nonce = self
54            .signer
55            .sensitive_expose_nonces_from_commitments(&commitment_bytes)?;
56        let signing_datas = vec![nonce];
57
58        // call Spark to refresh timelock on the leaf
59        let signing_jobs_len = signing_jobs.len();
60        let mut spark_client = self.config.spark_config.get_spark_connection(None).await?;
61        let mut request = Request::new(RefreshTimelockRequest {
62            leaf_id: leaf.id.clone(),
63            owner_identity_public_key: self
64                .config
65                .spark_config
66                .ssp_identity_public_key
67                .serialize()
68                .to_vec(),
69            signing_jobs: signing_jobs.clone(),
70        });
71        self.add_authorization_header_to_request(&mut request, None);
72        let refresh_timelock_response = spark_client
73            .refresh_timelock(request)
74            .await
75            .map_err(|status| SparkSdkError::from(NetworkError::Status(status)))?
76            .into_inner();
77
78        // validate response
79        if signing_jobs_len != refresh_timelock_response.signing_results.len() {
80            return Err(SparkSdkError::from(NetworkError::InvalidResponse));
81        }
82
83        // prepare signing and aggregation jobs
84        let mut user_signing_jobs = Vec::new();
85        let mut job_to_aggregate_request_map = HashMap::new();
86        let mut job_to_node_id_map = HashMap::new();
87
88        let signing_results = refresh_timelock_response.signing_results;
89        for (i, signing_result) in signing_results.iter().enumerate() {
90            let signing_data = &signing_datas[i];
91            let signing_job = &signing_jobs[i];
92            let refund_tx = bitcoin_tx_from_bytes(&signing_job.raw_tx)?;
93            let node_tx = bitcoin_tx_from_bytes(&leaf.node_tx)?;
94
95            let refund_tx_sighash = sighash_from_tx(&refund_tx, 0, &node_tx.output[0])?;
96
97            let proto_signing_nonce = marshal_frost_nonces(signing_data)?;
98            let proto_signing_commitment = marshal_frost_commitments(signing_data.commitments())?;
99            let signing_secret_key = self.signer.sensitive_expose_secret_key_from_pubkey(
100                &PublicKey::from_slice(&signing_job.signing_public_key)?,
101                false,
102            )?;
103            let user_key_package = create_user_key_package(&signing_secret_key.secret_bytes());
104
105            let signing_result_inner = signing_result.signing_result.as_ref().unwrap();
106            let operator_commitments = &signing_result_inner.signing_nonce_commitments;
107
108            let user_signing_job_id = uuid::Uuid::now_v7().to_string();
109
110            user_signing_jobs.push(FrostSigningJob {
111                job_id: user_signing_job_id.clone(),
112                message: refund_tx_sighash.to_vec(),
113                key_package: Some(user_key_package),
114                verifying_key: signing_result.verifying_key.clone(),
115                nonce: Some(proto_signing_nonce),
116                commitments: operator_commitments.clone(),
117                user_commitments: Some(proto_signing_commitment.clone()),
118                adaptor_public_key: Default::default(),
119            });
120
121            job_to_aggregate_request_map.insert(
122                user_signing_job_id.clone(),
123                AggregateFrostRequest {
124                    message: refund_tx_sighash.to_vec(),
125                    signature_shares: signing_result_inner.signature_shares.clone(),
126                    public_shares: signing_result_inner.public_keys.clone(),
127                    verifying_key: signing_result.verifying_key.clone(),
128                    commitments: operator_commitments.clone(),
129                    user_commitments: Some(proto_signing_commitment),
130                    user_public_key: signing_public_key.serialize().to_vec(),
131                    adaptor_public_key: Default::default(),
132                    user_signature_share: Default::default(),
133                },
134            );
135
136            job_to_node_id_map.insert(user_signing_job_id, leaf.id.clone());
137        }
138
139        // sign with user
140        let user_signatures = self.signer.sign_frost(user_signing_jobs)?;
141
142        // aggregate signatures
143        let mut node_signatures = Vec::new();
144        for (job_id, user_signature) in user_signatures.results.iter() {
145            let request = job_to_aggregate_request_map.get_mut(job_id).unwrap();
146            request.user_signature_share = user_signature.signature_share.clone();
147
148            let response = self.signer.aggregate_frost(request.clone())?;
149            node_signatures.push(NodeSignatures {
150                node_id: job_to_node_id_map[job_id].clone(),
151                refund_tx_signature: response.signature,
152                node_tx_signature: Default::default(),
153            });
154        }
155
156        // finalize node signatures for the flow
157        let mut request = Request::new(FinalizeNodeSignaturesRequest {
158            intent: SignatureIntent::Refresh.into(),
159            node_signatures,
160        });
161        self.add_authorization_header_to_request(&mut request, None);
162        spark_client
163            .finalize_node_signatures(request)
164            .await
165            .map_err(|status| SparkSdkError::from(NetworkError::Status(status)))?;
166
167        Ok(())
168    }
169
170    #[cfg_attr(feature = "telemetry", tracing::instrument(skip_all))]
171    async fn refresh_timelock_nodes(
172        &self,
173        nodes: &Vec<TreeNode>,
174        parent_nodes: &Vec<TreeNode>,
175        signing_public_key: &PublicKey,
176    ) -> Result<(), SparkSdkError> {
177        if nodes.is_empty() {
178            return Err(SparkSdkError::from(ValidationError::InvalidInput {
179                field: "no nodes to refresh timelock".to_string(),
180            }));
181        }
182
183        let mut signing_jobs = Vec::with_capacity(nodes.len() + 1);
184        let mut nonces = Vec::with_capacity(nodes.len() + 1);
185
186        for (i, node) in nodes.iter().enumerate() {
187            let mut new_node_tx = bitcoin_tx_from_bytes(&node.node_tx)?;
188            if i == 0 {
189                let curr_sequence = new_node_tx.input[0].sequence;
190                new_node_tx.input[0].sequence = Sequence(next_sequence(curr_sequence.0));
191            } else {
192                new_node_tx.input[0].sequence = Sequence(TIME_LOCK_INTERVAL);
193            }
194
195            let (signing_job, signing_commitment) =
196                self.signing_job_from_tx(&new_node_tx, signing_public_key)?;
197
198            signing_jobs.push(signing_job);
199
200            let commitment_bytes = signing_commitment.serialize().unwrap();
201            let nonce = self
202                .signer
203                .sensitive_expose_nonces_from_commitments(&commitment_bytes)?;
204            nonces.push(nonce);
205        }
206
207        // add one more job for the refund tx
208        let leaf = &nodes[nodes.len() - 1];
209        let mut new_refund_tx = bitcoin_tx_from_bytes(&leaf.refund_tx)?;
210        new_refund_tx.input[0].sequence = initial_sequence(); // set the sequence to the initial sequence
211        let (signing_job, signing_commitment) =
212            self.signing_job_from_tx(&new_refund_tx, signing_public_key)?;
213        signing_jobs.push(signing_job);
214        let commitment_bytes = signing_commitment.serialize().unwrap();
215        let refund_nonce = self
216            .signer
217            .sensitive_expose_nonces_from_commitments(&commitment_bytes)?;
218        nonces.push(refund_nonce);
219
220        // call Spark to refresh timelock on the nodes
221        let mut spark_client = self.config.spark_config.get_spark_connection(None).await?;
222        let mut request = Request::new(RefreshTimelockRequest {
223            leaf_id: leaf.id.clone(),
224            owner_identity_public_key: self
225                .config
226                .spark_config
227                .ssp_identity_public_key
228                .serialize()
229                .to_vec(),
230            signing_jobs: signing_jobs.clone(),
231        });
232        self.add_authorization_header_to_request(&mut request, None);
233
234        let refresh_timelock_response = spark_client
235            .refresh_timelock(request)
236            .await
237            .map_err(|status| SparkSdkError::from(NetworkError::Status(status)))?
238            .into_inner();
239
240        if signing_jobs.len() != refresh_timelock_response.signing_results.len() {
241            return Err(SparkSdkError::from(ValidationError::InvalidInput {
242                field: format!(
243                    "number of signing jobs and signing results do not match: {} != {}",
244                    signing_jobs.len(),
245                    refresh_timelock_response.signing_results.len()
246                ),
247            }));
248        }
249
250        // sign and aggregate
251        let mut user_signing_jobs = Vec::new();
252        let mut job_to_aggregate_request_map = HashMap::new();
253        let mut job_to_node_id_map = HashMap::new();
254        let mut job_to_refund_map = HashMap::new();
255
256        for (i, signing_result) in refresh_timelock_response.signing_results.iter().enumerate() {
257            let nonce = nonces[i].clone();
258            let signing_job = &signing_jobs[i];
259            let raw_tx = bitcoin_tx_from_bytes(&signing_job.raw_tx)?;
260
261            // get parent node for txout for sighash
262            let (parent_node, node, refund, vout) = if i == nodes.len() {
263                // Refund tx
264                let node = &nodes[i - 1];
265                (node, node, true, 0)
266            } else {
267                let node = &nodes[i];
268                let parent_node = &parent_nodes[i];
269                (parent_node, node, false, node.vout)
270            };
271            let parent_tx = bitcoin_tx_from_bytes(&parent_node.node_tx)?;
272            let txout = parent_tx.output[vout as usize].clone();
273
274            let raw_tx_sighash = sighash_from_tx(&raw_tx, 0, &txout)?;
275            let proto_signing_nonce = marshal_frost_nonces(&nonce)?;
276            let proto_signing_commitment = marshal_frost_commitments(nonce.commitments())?;
277            let signing_secret_key = self.signer.sensitive_expose_secret_key_from_pubkey(
278                &PublicKey::from_slice(&signing_job.signing_public_key)?,
279                false,
280            )?;
281            let user_key_package = create_user_key_package(&signing_secret_key.secret_bytes());
282
283            let signing_result_inner = signing_result.signing_result.as_ref().unwrap();
284            let operator_commitments = &signing_result_inner.signing_nonce_commitments;
285
286            let user_signing_job_id = uuid::Uuid::now_v7().to_string();
287            user_signing_jobs.push(FrostSigningJob {
288                job_id: user_signing_job_id.clone(),
289                message: raw_tx_sighash.to_vec(),
290                key_package: Some(user_key_package),
291                verifying_key: signing_result.verifying_key.clone(),
292                nonce: Some(proto_signing_nonce),
293                commitments: operator_commitments.clone(),
294                user_commitments: Some(proto_signing_commitment.clone()),
295                adaptor_public_key: Default::default(),
296            });
297
298            job_to_aggregate_request_map.insert(
299                user_signing_job_id.clone(),
300                AggregateFrostRequest {
301                    message: raw_tx_sighash.to_vec(),
302                    signature_shares: signing_result_inner.signature_shares.clone(),
303                    public_shares: signing_result_inner.public_keys.clone(),
304                    verifying_key: signing_result.verifying_key.clone(),
305                    commitments: operator_commitments.clone(),
306                    user_commitments: Some(proto_signing_commitment),
307                    user_public_key: signing_public_key.serialize().to_vec(),
308                    adaptor_public_key: Default::default(),
309                    user_signature_share: Default::default(),
310                },
311            );
312
313            job_to_node_id_map.insert(user_signing_job_id.clone(), node.id.clone());
314            job_to_refund_map.insert(user_signing_job_id, refund);
315        }
316
317        let user_signatures = self.signer.sign_frost(user_signing_jobs)?;
318        let mut node_signatures = Vec::new();
319
320        for (job_id, user_signature) in user_signatures.results.iter() {
321            let request = job_to_aggregate_request_map.get_mut(job_id).unwrap();
322            request.user_signature_share = user_signature.signature_share.clone();
323
324            let response = self.signer.aggregate_frost(request.clone())?;
325
326            if job_to_refund_map[job_id] {
327                node_signatures.push(NodeSignatures {
328                    node_id: job_to_node_id_map[job_id].clone(),
329                    refund_tx_signature: response.signature,
330                    node_tx_signature: Default::default(),
331                });
332            } else {
333                node_signatures.push(NodeSignatures {
334                    node_id: job_to_node_id_map[job_id].clone(),
335                    node_tx_signature: response.signature,
336                    refund_tx_signature: Default::default(),
337                });
338            }
339        }
340
341        // finalize node signatures for the flow
342        let mut request = Request::new(FinalizeNodeSignaturesRequest {
343            intent: SignatureIntent::Refresh.into(),
344            node_signatures,
345        });
346        self.add_authorization_header_to_request(&mut request, None);
347        spark_client
348            .finalize_node_signatures(request)
349            .await
350            .map_err(|status| SparkSdkError::from(NetworkError::Status(status)))?;
351
352        Ok(())
353    }
354
355    #[cfg_attr(feature = "telemetry", tracing::instrument(skip_all))]
356    fn signing_job_from_tx(
357        &self,
358        new_tx: &Transaction,
359        signing_public_key: &PublicKey,
360    ) -> Result<(SigningJob, SigningCommitments), SparkSdkError> {
361        let tx_bytes = serialize_bitcoin_transaction(new_tx)?;
362
363        let signing_commitment = self.signer.new_frost_signing_noncepair()?;
364        let proto_signing_commitment = marshal_frost_commitments(&signing_commitment)?;
365
366        let signing_job = SigningJob {
367            signing_public_key: signing_public_key.serialize().to_vec(),
368            raw_tx: tx_bytes,
369            signing_nonce_commitment: Some(proto_signing_commitment),
370        };
371
372        Ok((signing_job, signing_commitment))
373    }
374}