spark_rust/wallet/config/
spark.rs

1//! # Spark Protocol Configuration
2//!
3//! This module defines configuration for connecting to and interacting with
4//! the Spark protocol network, including:
5//!
6//! - Operator connection management
7//! - Threshold signing configuration
8//! - Authentication session handling
9//! - RPC and GraphQL client configuration
10//!
11//! The Spark protocol uses a threshold signing approach where multiple operators
12//! collaborate with the client to create valid signatures. This module manages
13//! connections to these operators and handles the related configuration.
14
15use crate::constants::spark::connection::{DEFAULT_COORDINATOR_INDEX, DEFAULT_RETRY_COUNT};
16use crate::constants::spark::{
17    LIGHTSPARK_SSP_MAINNET_IDENTITY_PUBLIC_KEY, LIGHTSPARK_SSP_REGTEST_IDENTITY_PUBLIC_KEY,
18};
19use crate::error::CryptoError;
20use crate::error::{NetworkError, SparkSdkError, ValidationError};
21use crate::rpc::connections::connection::SparkConnection;
22use crate::rpc::SparkRpcClient;
23use crate::wallet::graphql::GraphqlClient;
24use crate::SparkNetwork;
25use bitcoin::secp256k1::PublicKey;
26use frost_secp256k1_tr_unofficial::Identifier;
27use hashbrown::HashMap;
28use parking_lot::RwLock;
29use spark_cryptography::signing::identifier_to_hex_string;
30use spark_protos::spark::spark_service_client::SparkServiceClient;
31use std::str::FromStr;
32use std::sync::Arc;
33use std::time::Duration;
34use tokio::time::sleep;
35use tonic::transport::Channel;
36use tonic::transport::Uri;
37use tonic::Request;
38
39use crate::constants::spark::connection::SPARK_DEV_OPERATORS;
40use crate::constants::spark::SPARK_REGTEST_SIGNING_THRESHOLD;
41
42use crate::rpc::traits::SparkRpcConnection;
43
44/// Primary configuration for the Spark protocol
45///
46/// This struct manages the network-specific configuration for the Spark protocol,
47/// including connections to operators, threshold signing parameters, and authentication.
48/// It serves as the central point for all protocol-related settings and provides methods
49/// for interacting with the Spark operator network.
50///
51/// Key responsibilities:
52/// - Managing operator connections and authentication
53/// - Providing access to RPC and GraphQL clients
54/// - Maintaining threshold signing configuration
55/// - Handling connection retries and backoff
56#[derive(Clone)]
57pub(crate) struct SparkConfig {
58    /// Network to use for the wallet (mainnet, testnet, etc.)
59    pub(crate) network: SparkNetwork,
60
61    /// Spark operator pool
62    pub(crate) operator_pool: Arc<SparkOperatorPool>,
63
64    /// SSP identity public key
65    pub(crate) ssp_identity_public_key: PublicKey,
66
67    /// The threshold constant used for signing
68    pub(crate) threshold: u32,
69
70    /// GraphQL client for SSP API interactions
71    pub(crate) ssp_graphql_client: Arc<GraphqlClient>,
72
73    /// gRPC client used for Spark Operator RPC calls
74    spark_clients: Arc<RwLock<HashMap<u32, SparkRpcClient>>>,
75
76    /// Session
77    pub(crate) sessions: Arc<parking_lot::RwLock<Vec<Session>>>,
78}
79
80/// Authentication session for Spark operator communication
81///
82/// Represents an authenticated session with a Spark operator, containing
83/// the session token and expiration information needed for secure communication.
84#[derive(Debug, Clone)]
85pub struct Session {
86    /// The session token
87    pub(crate) session_token: String,
88
89    /// The expiration timestamp
90    pub(crate) _expiration_timestamp: i64, // TODO: add auto-refresh until Spark Authn server adds refresh token support
91}
92
93/// Pool of Spark operators for threshold signing
94///
95/// Manages a collection of Spark operators that participate in threshold signing
96/// operations. The pool includes configuration for retry behavior and identifies
97/// the coordinator operator that orchestrates multi-party signing.
98#[derive(Clone)]
99pub(crate) struct SparkOperatorPool {
100    /// List of available Spark operators
101    pub(crate) operators: Vec<Arc<SparkOperator>>,
102
103    /// Index of the operator that acts as coordinator
104    pub(crate) coordinator_index: u32,
105
106    /// Number of times to retry failed operations
107    retry_count: u32,
108
109    /// Milliseconds to wait between retry attempts
110    backoff_ms: u64,
111}
112
113impl SparkOperatorPool {}
114
115/// Configuration for an individual Spark operator node
116///
117/// Represents a single Spark operator that participates in threshold signing
118/// operations. Each operator has a unique identifier, network address, and
119/// identity public key used for authentication and FROST threshold signatures.
120#[derive(Debug, Clone)]
121pub(crate) struct SparkOperator {
122    /// The index of the signing operator.
123    pub(crate) id: u32,
124
125    /// Identifier is the FROST identifier of the signing operator, which will be index + 1 in 32 bytes big endian hex string.
126    /// Used as shamir secret share identifier in DKG key shares.
127    pub(crate) frost_identifier: Identifier,
128
129    /// Address of the signing operator
130    pub(crate) address: Uri,
131
132    /// Public key of the signing operator
133    pub(crate) identity_public_key: PublicKey,
134}
135
136impl SparkOperator {
137    // Parse an operator specified as a string (in the format <pubkey>@<address>) into a
138    // [`SparkOperator`].
139    pub(crate) fn parse(id: u32, op: &str) -> Result<Self, SparkSdkError> {
140        let parts: Vec<&str> = op.split('@').collect();
141        if parts.len() != 2 {
142            return Err(SparkSdkError::from(ValidationError::InvalidArgument {
143                argument: format!(
144                    "Invalid operator string format {} (should be <pubkey>@<address>)",
145                    op
146                ),
147            }));
148        }
149
150        let identity_public_key = PublicKey::from_str(parts[0]).map_err(|err| {
151            SparkSdkError::from(ValidationError::InvalidArgument {
152                argument: format!("Invalid public key for operator {}: {}", parts[0], err),
153            })
154        })?;
155
156        let address = Uri::from_str(parts[1])
157            .map_err(|err| SparkSdkError::from(NetworkError::InvalidUri(err)))?;
158
159        let frost_identifier = Identifier::try_from(id as u16 + 1).map_err(|err| {
160            SparkSdkError::from(ValidationError::InvalidArgument {
161                argument: format!(
162                    "Invalid frost identifier for operator {}: {}",
163                    parts[0], err
164                ),
165            })
166        })?;
167
168        Ok(SparkOperator {
169            id,
170            frost_identifier,
171            address,
172            identity_public_key,
173        })
174    }
175
176    pub(crate) fn frost_identifier_str(&self) -> String {
177        identifier_to_hex_string(&self.frost_identifier)
178    }
179}
180
181impl SparkConfig {
182    /// Try and get operators from environment variables. Each operator should be in a separate
183    /// environment variable in the format `SPARK_OPERATOR_<index>`.
184    ///
185    /// Returns `None` if operators are not specified as environmental variables. Otherwise,
186    /// returns Ok with the parsed operators, or an error if parsing operators fails.
187    fn operators_from_env() -> Option<Result<Vec<SparkOperator>, SparkSdkError>> {
188        // If the first operator is not set, assume the rest aren't set either.
189        if std::env::var("SPARK_OPERATOR_0").is_err() {
190            return None;
191        }
192
193        let mut operators = vec![];
194        for i in 0..10 {
195            let variable = format!("SPARK_OPERATOR_{}", i);
196            let operator = std::env::var(&variable);
197            let operator = match operator {
198                Ok(operator) => operator,
199                Err(_) => break,
200            };
201
202            match SparkOperator::parse(i as u32, &operator) {
203                Ok(operator) => operators.push(operator),
204                Err(err) => {
205                    return Some(Err(SparkSdkError::from(ValidationError::InvalidArgument {
206                        argument: format!("Unable to parse operator {}: {}", variable, err),
207                    })))
208                }
209            }
210        }
211
212        Some(Ok(operators))
213    }
214
215    /// Try to get the operators from the environment first. Otherwise, parse the defaults from
216    /// [`SPARK_DEV_OPERATORS`].
217    fn operators() -> Result<Vec<SparkOperator>, SparkSdkError> {
218        if let Some(operators) = Self::operators_from_env() {
219            return operators;
220        }
221
222        let mut spark_operators = vec![];
223        for (i, operator) in SPARK_DEV_OPERATORS.iter().enumerate() {
224            // TODO: Add environment
225            let address = Uri::from_str(operator.0)
226                .map_err(|err| SparkSdkError::from(NetworkError::InvalidUri(err)))?;
227
228            let identity_public_key = PublicKey::from_str(operator.1).map_err(|err| {
229                SparkSdkError::from(ValidationError::InvalidArgument {
230                    argument: format!("Invalid public key for operator {}: {}", operator.1, err),
231                })
232            })?;
233
234            let frost_identifier = Identifier::try_from(i as u16 + 1).map_err(|err| {
235                SparkSdkError::from(ValidationError::InvalidArgument {
236                    argument: format!(
237                        "Invalid frost identifier for operator {}: {}",
238                        operator.1, err
239                    ),
240                })
241            })?;
242
243            spark_operators.push(SparkOperator {
244                id: i as u32,
245                frost_identifier,
246                address,
247                identity_public_key,
248            });
249        }
250
251        Ok(spark_operators)
252    }
253
254    /// Creates a new wallet configuration with the specified network.
255    ///
256    /// This function:
257    /// 1. Creates a vector of 5 Spark operators with their configurations
258    /// 2. Sets up gRPC connections to each operator
259    /// 3. Initializes the wallet configuration with default values
260    ///
261    /// # Arguments
262    ///
263    /// * `network` - The Spark network to use (mainnet, testnet, regtest)
264    ///
265    /// # Returns
266    ///
267    /// Returns a Result containing either:
268    /// * The initialized SparkConfig struct on success
269    /// * A [`SparkSdkError`] on failure (e.g. connection errors, invalid URLs)
270    ///
271    /// # Errors
272    ///
273    /// This function will return an error if:
274    /// * Failed to establish gRPC connections to operators
275    /// * Failed to parse operator URLs
276    /// * Failed to decode operator public keys
277    pub async fn new(network: SparkNetwork) -> Result<Self, SparkSdkError> {
278        // set the threshold
279        let threshold = SPARK_REGTEST_SIGNING_THRESHOLD;
280
281        // set the default coordinator index
282        let coordinator_index = DEFAULT_COORDINATOR_INDEX;
283
284        // set the operators
285        let spark_operators = Self::operators()?;
286
287        // set the connection pool for operators
288        let mut spark_clients = HashMap::new();
289        for operator in &spark_operators {
290            // establish the secure connection
291            // since this uses rustls, self-signed certificates will fail
292            let spark_rpc_client =
293                SparkConnection::establish_connection(operator.address.clone()).await?;
294            spark_clients.insert(operator.id, spark_rpc_client);
295        }
296
297        let ssp_identity_public_key = match network {
298            SparkNetwork::Regtest => PublicKey::from_str(
299                std::env::var("LIGHTSPARK_SSP_REGTEST_IDENTITY_PUBLIC_KEY")
300                    .as_deref()
301                    .unwrap_or(LIGHTSPARK_SSP_REGTEST_IDENTITY_PUBLIC_KEY),
302            )
303            .map_err(|err| SparkSdkError::from(CryptoError::Secp256k1(err)))?,
304            SparkNetwork::Mainnet => PublicKey::from_str(
305                std::env::var("LIGHTSPARK_SSP_MAINNET_IDENTITY_PUBLIC_KEY")
306                    .as_deref()
307                    .unwrap_or(LIGHTSPARK_SSP_MAINNET_IDENTITY_PUBLIC_KEY),
308            )
309            .map_err(|err| SparkSdkError::from(CryptoError::Secp256k1(err)))?,
310        };
311
312        let ssp_graphql_client = GraphqlClient::new().map(Arc::new)?;
313
314        #[cfg(feature = "telemetry")]
315        tracing::info!(
316            ssp_identity_public_key = ssp_identity_public_key.to_string(),
317            "ssp public key"
318        );
319
320        let operator_pool = SparkOperatorPool {
321            operators: spark_operators.into_iter().map(Arc::new).collect(),
322            coordinator_index,
323            retry_count: DEFAULT_RETRY_COUNT,
324            backoff_ms: 200,
325        };
326
327        let wallet_config = Self {
328            network,
329            operator_pool: Arc::new(operator_pool),
330            threshold,
331            spark_clients: Arc::new(RwLock::new(spark_clients)),
332            ssp_graphql_client,
333            ssp_identity_public_key,
334            sessions: Arc::new(parking_lot::RwLock::new(vec![])),
335        };
336
337        Ok(wallet_config)
338    }
339
340    /// Gets a connection to a Spark operator service.
341    ///
342    /// This function manages connections to Spark operators, creating new connections if needed
343    /// and reusing existing ones. It handles both connecting to a specific operator or defaulting
344    /// to the coordinator.
345    ///
346    /// # Arguments
347    ///
348    /// * `operator_id` - Optional ID of the specific operator to connect to. If None, connects to
349    ///                   the default coordinator.
350    ///
351    /// # Returns
352    ///
353    /// * [`Result<SparkServiceClient<Channel>, SparkSdkError>`] - A client for the Spark service on success,
354    ///    or an error if the connection fails
355    ///
356    /// # Errors
357    ///
358    /// Returns [`SparkSdkError::InvalidArgument`] if:
359    /// * The operator_id is out of bounds for the available operators
360    ///
361    /// May also return errors from:
362    /// * Channel creation/connection
363    /// * URI parsing
364    #[cfg_attr(feature = "telemetry", tracing::instrument(skip_all))]
365    pub(crate) async fn get_spark_connection(
366        &self,
367        operator_id: Option<u32>,
368    ) -> Result<SparkServiceClient<Channel>, SparkSdkError> {
369        let operator_id = operator_id.unwrap_or(self.operator_pool.coordinator_index);
370
371        // if doesn't exist, create the connection for the operator
372        if !self.spark_clients.read().contains_key(&operator_id) {
373            let spark_operators = self.operator_pool.operators.clone();
374            if operator_id >= spark_operators.len() as u32 {
375                drop(spark_operators);
376                return Err(SparkSdkError::from(ValidationError::InvalidArgument {
377                    argument: format!("Operator index {} is out of bounds", operator_id),
378                }));
379            }
380
381            let uri = spark_operators[operator_id as usize].address.clone();
382            let spark_rpc_instance = SparkConnection::establish_connection(uri).await?;
383            self.spark_clients
384                .write()
385                .insert(operator_id, spark_rpc_instance);
386        }
387
388        // get the connection
389        let client = self.spark_clients.read().get(&operator_id).unwrap().clone();
390        let spark_client = client.get_new_spark_service_connection()?;
391
392        Ok(spark_client)
393    }
394
395    /// Helper method to execute a gRPC call with automatic retries
396    ///
397    /// This provides a simple wrapper around the more complex retry logic
398    ///
399    /// # Example
400    /// ```
401    /// let response = config.execute_with_retry(
402    ///     || Request::new(GenerateDepositAddressRequest { ... }),
403    ///     |client, req| client.generate_deposit_address(req)
404    /// ).await?;
405    /// ```
406    pub(crate) async fn execute_with_retry<T, R>(
407        &self,
408        create_request: impl Fn() -> Request<T>,
409        rpc_call: impl Fn(
410            SparkServiceClient<Channel>,
411            Request<T>,
412        ) -> std::pin::Pin<
413            Box<dyn std::future::Future<Output = Result<tonic::Response<R>, tonic::Status>> + Send>,
414        >,
415        operator_id: Option<u32>,
416    ) -> Result<R, SparkSdkError> {
417        let operator_id = operator_id.unwrap_or(self.operator_pool.coordinator_index);
418        let max_retries = self.operator_pool.retry_count;
419        let backoff = Duration::from_millis(self.operator_pool.backoff_ms);
420        let mut attempt = 0;
421
422        loop {
423            // Get a fresh client
424            let client = self.get_spark_connection(Some(operator_id)).await?;
425
426            // Create a fresh request for this attempt
427            let mut request = create_request();
428
429            // Add auth header
430            request.metadata_mut().insert(
431                "authorization",
432                self.sessions.read().clone()[operator_id as usize]
433                    .session_token
434                    .clone()
435                    .to_string()
436                    .parse()
437                    .unwrap(),
438            );
439
440            match rpc_call(client, request).await {
441                Ok(response) => return Ok(response.into_inner()),
442                Err(err) => {
443                    if attempt >= max_retries - 1 {
444                        return Err(SparkSdkError::from(NetworkError::Status(err)));
445                    }
446
447                    sleep(backoff).await;
448                    attempt += 1;
449
450                    #[cfg(feature = "telemetry")]
451                    tracing::debug!(
452                        "Retrying RPC call to operator {} (attempt {}/{})",
453                        operator_id,
454                        attempt + 1,
455                        max_retries
456                    );
457                }
458            }
459        }
460    }
461
462    /// A super-simple way to use retries with cloneable request types.
463    ///
464    /// This further simplifies retry usage when the request inner type (T) implements Clone.
465    ///
466    /// # Example
467    /// ```
468    /// let request = GenerateDepositAddressRequest { ... };
469    /// let response = config.call_with_retry(
470    ///     request,
471    ///     |client, req| Box::pin(async move { client.generate_deposit_address(req).await })
472    /// ).await?;
473    /// ```
474    pub(crate) async fn call_with_retry<T: Clone, R>(
475        &self,
476        request_data: T,
477        rpc_call: impl Fn(
478            SparkServiceClient<Channel>,
479            Request<T>,
480        ) -> std::pin::Pin<
481            Box<dyn std::future::Future<Output = Result<tonic::Response<R>, tonic::Status>> + Send>,
482        >,
483        operator_id: Option<u32>,
484    ) -> Result<R, SparkSdkError> {
485        let request_factory = move || Request::new(request_data.clone());
486        self.execute_with_retry(request_factory, rpc_call, operator_id)
487            .await
488    }
489}
490
491#[cfg(test)]
492mod test {
493    use super::SparkOperator;
494
495    #[test]
496    fn test_parse_spark_operator() {
497        let operator = SparkOperator::parse(
498            0,
499            "0322ca18fc489ae25418a0e768273c2c61cabb823edfb14feb891e9bec62016510@http://localhost:8535",
500        )
501        .unwrap();
502
503        assert_eq!(operator.id, 0);
504        assert_eq!(
505            operator.frost_identifier_str(),
506            "0000000000000000000000000000000000000000000000000000000000000001"
507        );
508        assert_eq!(operator.address.to_string(), "http://localhost:8535/");
509        assert_eq!(
510            operator.identity_public_key.to_string(),
511            "0322ca18fc489ae25418a0e768273c2c61cabb823edfb14feb891e9bec62016510"
512        );
513    }
514}