ldk_node/
builder.rs

1// This file is Copyright its original authors, visible in version control history.
2//
3// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
4// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. You may not use this file except in
6// accordance with one or both of these licenses.
7
8use std::collections::HashMap;
9use std::convert::TryInto;
10use std::default::Default;
11use std::path::PathBuf;
12use std::sync::{Arc, Mutex, Once, RwLock};
13use std::time::SystemTime;
14use std::{fmt, fs};
15
16use bdk_wallet::template::Bip84;
17use bdk_wallet::{KeychainKind, Wallet as BdkWallet};
18use bip39::Mnemonic;
19use bitcoin::bip32::{ChildNumber, Xpriv};
20use bitcoin::secp256k1::PublicKey;
21use bitcoin::{BlockHash, Network};
22use lightning::chain::{chainmonitor, BestBlock, Watch};
23use lightning::io::Cursor;
24use lightning::ln::channelmanager::{self, ChainParameters, ChannelManagerReadArgs};
25use lightning::ln::msgs::{RoutingMessageHandler, SocketAddress};
26use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler};
27use lightning::log_trace;
28use lightning::routing::gossip::NodeAlias;
29use lightning::routing::router::DefaultRouter;
30use lightning::routing::scoring::{
31	CombinedScorer, ProbabilisticScorer, ProbabilisticScoringDecayParameters,
32	ProbabilisticScoringFeeParameters,
33};
34use lightning::sign::{EntropySource, NodeSigner};
35use lightning::util::persist::{
36	KVStoreSync, CHANNEL_MANAGER_PERSISTENCE_KEY, CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE,
37	CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE,
38};
39use lightning::util::ser::ReadableArgs;
40use lightning::util::sweep::OutputSweeper;
41use lightning_persister::fs_store::FilesystemStore;
42use vss_client::headers::{FixedHeaders, LnurlAuthToJwtProvider, VssHeaderProvider};
43
44use crate::chain::ChainSource;
45use crate::config::{
46	default_user_config, may_announce_channel, AnnounceError, AsyncPaymentsRole,
47	BitcoindRestClientConfig, Config, ElectrumSyncConfig, EsploraSyncConfig,
48	DEFAULT_ESPLORA_SERVER_URL, DEFAULT_LOG_FILENAME, DEFAULT_LOG_LEVEL, WALLET_KEYS_SEED_LEN,
49};
50use crate::connection::ConnectionManager;
51use crate::event::EventQueue;
52use crate::fee_estimator::OnchainFeeEstimator;
53use crate::gossip::GossipSource;
54use crate::io::sqlite_store::SqliteStore;
55use crate::io::utils::{
56	read_external_pathfinding_scores_from_cache, read_node_metrics, write_node_metrics,
57};
58use crate::io::vss_store::VssStore;
59use crate::io::{
60	self, PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE,
61};
62use crate::liquidity::{
63	LSPS1ClientConfig, LSPS2ClientConfig, LSPS2ServiceConfig, LiquiditySourceBuilder,
64};
65use crate::logger::{log_error, LdkLogger, LogLevel, LogWriter, Logger};
66use crate::message_handler::NodeCustomMessageHandler;
67use crate::payment::asynchronous::om_mailbox::OnionMessageMailbox;
68use crate::peer_store::PeerStore;
69use crate::runtime::Runtime;
70use crate::tx_broadcaster::TransactionBroadcaster;
71use crate::types::{
72	ChainMonitor, ChannelManager, DynStore, GossipSync, Graph, KeysManager, MessageRouter,
73	OnionMessenger, PaymentStore, PeerManager, Persister,
74};
75use crate::wallet::persist::KVStoreWalletPersister;
76use crate::wallet::Wallet;
77use crate::{Node, NodeMetrics};
78
79const VSS_HARDENED_CHILD_INDEX: u32 = 877;
80const VSS_LNURL_AUTH_HARDENED_CHILD_INDEX: u32 = 138;
81const LSPS_HARDENED_CHILD_INDEX: u32 = 577;
82const PERSISTER_MAX_PENDING_UPDATES: u64 = 100;
83
84#[derive(Debug, Clone)]
85enum ChainDataSourceConfig {
86	Esplora {
87		server_url: String,
88		headers: HashMap<String, String>,
89		sync_config: Option<EsploraSyncConfig>,
90	},
91	Electrum {
92		server_url: String,
93		sync_config: Option<ElectrumSyncConfig>,
94	},
95	Bitcoind {
96		rpc_host: String,
97		rpc_port: u16,
98		rpc_user: String,
99		rpc_password: String,
100		rest_client_config: Option<BitcoindRestClientConfig>,
101	},
102}
103
104#[derive(Debug, Clone)]
105enum EntropySourceConfig {
106	SeedFile(String),
107	SeedBytes([u8; WALLET_KEYS_SEED_LEN]),
108	Bip39Mnemonic { mnemonic: Mnemonic, passphrase: Option<String> },
109}
110
111#[derive(Debug, Clone)]
112enum GossipSourceConfig {
113	P2PNetwork,
114	RapidGossipSync(String),
115}
116
117#[derive(Debug, Clone)]
118struct PathfindingScoresSyncConfig {
119	url: String,
120}
121
122#[derive(Debug, Clone, Default)]
123struct LiquiditySourceConfig {
124	// Act as an LSPS1 client connecting to the given service.
125	lsps1_client: Option<LSPS1ClientConfig>,
126	// Act as an LSPS2 client connecting to the given service.
127	lsps2_client: Option<LSPS2ClientConfig>,
128	// Act as an LSPS2 service.
129	lsps2_service: Option<LSPS2ServiceConfig>,
130}
131
132#[derive(Clone)]
133enum LogWriterConfig {
134	File { log_file_path: Option<String>, max_log_level: Option<LogLevel> },
135	Log,
136	Custom(Arc<dyn LogWriter>),
137}
138
139impl std::fmt::Debug for LogWriterConfig {
140	fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
141		match self {
142			LogWriterConfig::File { max_log_level, log_file_path } => f
143				.debug_struct("LogWriterConfig")
144				.field("max_log_level", max_log_level)
145				.field("log_file_path", log_file_path)
146				.finish(),
147			LogWriterConfig::Log => write!(f, "LogWriterConfig::Log"),
148			LogWriterConfig::Custom(_) => {
149				f.debug_tuple("Custom").field(&"<config internal to custom log writer>").finish()
150			},
151		}
152	}
153}
154
155/// An error encountered during building a [`Node`].
156///
157/// [`Node`]: crate::Node
158#[derive(Debug, Clone, PartialEq)]
159pub enum BuildError {
160	/// The given seed bytes are invalid, e.g., have invalid length.
161	InvalidSeedBytes,
162	/// The given seed file is invalid, e.g., has invalid length, or could not be read.
163	InvalidSeedFile,
164	/// The current system time is invalid, clocks might have gone backwards.
165	InvalidSystemTime,
166	/// The a read channel monitor is invalid.
167	InvalidChannelMonitor,
168	/// The given listening addresses are invalid, e.g. too many were passed.
169	InvalidListeningAddresses,
170	/// The given announcement addresses are invalid, e.g. too many were passed.
171	InvalidAnnouncementAddresses,
172	/// The provided alias is invalid.
173	InvalidNodeAlias,
174	/// An attempt to setup a runtime has failed.
175	RuntimeSetupFailed,
176	/// We failed to read data from the [`KVStore`].
177	///
178	/// [`KVStore`]: lightning::util::persist::KVStoreSync
179	ReadFailed,
180	/// We failed to write data to the [`KVStore`].
181	///
182	/// [`KVStore`]: lightning::util::persist::KVStoreSync
183	WriteFailed,
184	/// We failed to access the given `storage_dir_path`.
185	StoragePathAccessFailed,
186	/// We failed to setup our [`KVStore`].
187	///
188	/// [`KVStore`]: lightning::util::persist::KVStoreSync
189	KVStoreSetupFailed,
190	/// We failed to setup the onchain wallet.
191	WalletSetupFailed,
192	/// We failed to setup the logger.
193	LoggerSetupFailed,
194	/// The given network does not match the node's previously configured network.
195	NetworkMismatch,
196	/// The role of the node in an asynchronous payments context is not compatible with the current configuration.
197	AsyncPaymentsConfigMismatch,
198}
199
200impl fmt::Display for BuildError {
201	fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
202		match *self {
203			Self::InvalidSeedBytes => write!(f, "Given seed bytes are invalid."),
204			Self::InvalidSeedFile => write!(f, "Given seed file is invalid or could not be read."),
205			Self::InvalidSystemTime => {
206				write!(f, "System time is invalid. Clocks might have gone back in time.")
207			},
208			Self::InvalidChannelMonitor => {
209				write!(f, "Failed to watch a deserialized ChannelMonitor")
210			},
211			Self::InvalidListeningAddresses => write!(f, "Given listening addresses are invalid."),
212			Self::InvalidAnnouncementAddresses => {
213				write!(f, "Given announcement addresses are invalid.")
214			},
215			Self::RuntimeSetupFailed => write!(f, "Failed to setup a runtime."),
216			Self::ReadFailed => write!(f, "Failed to read from store."),
217			Self::WriteFailed => write!(f, "Failed to write to store."),
218			Self::StoragePathAccessFailed => write!(f, "Failed to access the given storage path."),
219			Self::KVStoreSetupFailed => write!(f, "Failed to setup KVStore."),
220			Self::WalletSetupFailed => write!(f, "Failed to setup onchain wallet."),
221			Self::LoggerSetupFailed => write!(f, "Failed to setup the logger."),
222			Self::InvalidNodeAlias => write!(f, "Given node alias is invalid."),
223			Self::NetworkMismatch => {
224				write!(f, "Given network does not match the node's previously configured network.")
225			},
226			Self::AsyncPaymentsConfigMismatch => {
227				write!(
228					f,
229					"The async payments role is not compatible with the current configuration."
230				)
231			},
232		}
233	}
234}
235
236impl std::error::Error for BuildError {}
237
238/// A builder for an [`Node`] instance, allowing to set some configuration and module choices from
239/// the getgo.
240///
241/// ### Defaults
242/// - Wallet entropy is sourced from a `keys_seed` file located under [`Config::storage_dir_path`]
243/// - Chain data is sourced from the Esplora endpoint `https://blockstream.info/api`
244/// - Gossip data is sourced via the peer-to-peer network
245#[derive(Debug)]
246pub struct NodeBuilder {
247	config: Config,
248	entropy_source_config: Option<EntropySourceConfig>,
249	chain_data_source_config: Option<ChainDataSourceConfig>,
250	gossip_source_config: Option<GossipSourceConfig>,
251	liquidity_source_config: Option<LiquiditySourceConfig>,
252	log_writer_config: Option<LogWriterConfig>,
253	async_payments_role: Option<AsyncPaymentsRole>,
254	runtime_handle: Option<tokio::runtime::Handle>,
255	pathfinding_scores_sync_config: Option<PathfindingScoresSyncConfig>,
256}
257
258impl NodeBuilder {
259	/// Creates a new builder instance with the default configuration.
260	pub fn new() -> Self {
261		let config = Config::default();
262		Self::from_config(config)
263	}
264
265	/// Creates a new builder instance from an [`Config`].
266	pub fn from_config(config: Config) -> Self {
267		let entropy_source_config = None;
268		let chain_data_source_config = None;
269		let gossip_source_config = None;
270		let liquidity_source_config = None;
271		let log_writer_config = None;
272		let runtime_handle = None;
273		let pathfinding_scores_sync_config = None;
274		Self {
275			config,
276			entropy_source_config,
277			chain_data_source_config,
278			gossip_source_config,
279			liquidity_source_config,
280			log_writer_config,
281			runtime_handle,
282			async_payments_role: None,
283			pathfinding_scores_sync_config,
284		}
285	}
286
287	/// Configures the [`Node`] instance to (re-)use a specific `tokio` runtime.
288	///
289	/// If not provided, the node will spawn its own runtime or reuse any outer runtime context it
290	/// can detect.
291	#[cfg_attr(feature = "uniffi", allow(dead_code))]
292	pub fn set_runtime(&mut self, runtime_handle: tokio::runtime::Handle) -> &mut Self {
293		self.runtime_handle = Some(runtime_handle);
294		self
295	}
296
297	/// Configures the [`Node`] instance to source its wallet entropy from a seed file on disk.
298	///
299	/// If the given file does not exist a new random seed file will be generated and
300	/// stored at the given location.
301	pub fn set_entropy_seed_path(&mut self, seed_path: String) -> &mut Self {
302		self.entropy_source_config = Some(EntropySourceConfig::SeedFile(seed_path));
303		self
304	}
305
306	/// Configures the [`Node`] instance to source its wallet entropy from the given
307	/// [`WALLET_KEYS_SEED_LEN`] seed bytes.
308	pub fn set_entropy_seed_bytes(&mut self, seed_bytes: [u8; WALLET_KEYS_SEED_LEN]) -> &mut Self {
309		self.entropy_source_config = Some(EntropySourceConfig::SeedBytes(seed_bytes));
310		self
311	}
312
313	/// Configures the [`Node`] instance to source its wallet entropy from a [BIP 39] mnemonic.
314	///
315	/// [BIP 39]: https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki
316	pub fn set_entropy_bip39_mnemonic(
317		&mut self, mnemonic: Mnemonic, passphrase: Option<String>,
318	) -> &mut Self {
319		self.entropy_source_config =
320			Some(EntropySourceConfig::Bip39Mnemonic { mnemonic, passphrase });
321		self
322	}
323
324	/// Configures the [`Node`] instance to source its chain data from the given Esplora server.
325	///
326	/// If no `sync_config` is given, default values are used. See [`EsploraSyncConfig`] for more
327	/// information.
328	pub fn set_chain_source_esplora(
329		&mut self, server_url: String, sync_config: Option<EsploraSyncConfig>,
330	) -> &mut Self {
331		self.chain_data_source_config = Some(ChainDataSourceConfig::Esplora {
332			server_url,
333			headers: Default::default(),
334			sync_config,
335		});
336		self
337	}
338
339	/// Configures the [`Node`] instance to source its chain data from the given Esplora server.
340	///
341	/// The given `headers` will be included in all requests to the Esplora server, typically used for
342	/// authentication purposes.
343	///
344	/// If no `sync_config` is given, default values are used. See [`EsploraSyncConfig`] for more
345	/// information.
346	pub fn set_chain_source_esplora_with_headers(
347		&mut self, server_url: String, headers: HashMap<String, String>,
348		sync_config: Option<EsploraSyncConfig>,
349	) -> &mut Self {
350		self.chain_data_source_config =
351			Some(ChainDataSourceConfig::Esplora { server_url, headers, sync_config });
352		self
353	}
354
355	/// Configures the [`Node`] instance to source its chain data from the given Electrum server.
356	///
357	/// If no `sync_config` is given, default values are used. See [`ElectrumSyncConfig`] for more
358	/// information.
359	pub fn set_chain_source_electrum(
360		&mut self, server_url: String, sync_config: Option<ElectrumSyncConfig>,
361	) -> &mut Self {
362		self.chain_data_source_config =
363			Some(ChainDataSourceConfig::Electrum { server_url, sync_config });
364		self
365	}
366
367	/// Configures the [`Node`] instance to connect to a Bitcoin Core node via RPC.
368	///
369	/// This method establishes an RPC connection that enables all essential chain operations including
370	/// transaction broadcasting and chain data synchronization.
371	///
372	/// ## Parameters:
373	/// * `rpc_host`, `rpc_port`, `rpc_user`, `rpc_password` - Required parameters for the Bitcoin Core RPC
374	///   connection.
375	pub fn set_chain_source_bitcoind_rpc(
376		&mut self, rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String,
377	) -> &mut Self {
378		self.chain_data_source_config = Some(ChainDataSourceConfig::Bitcoind {
379			rpc_host,
380			rpc_port,
381			rpc_user,
382			rpc_password,
383			rest_client_config: None,
384		});
385		self
386	}
387
388	/// Configures the [`Node`] instance to synchronize chain data from a Bitcoin Core REST endpoint.
389	///
390	/// This method enables chain data synchronization via Bitcoin Core's REST interface. We pass
391	/// additional RPC configuration to non-REST-supported API calls like transaction broadcasting.
392	///
393	/// ## Parameters:
394	/// * `rest_host`, `rest_port` - Required parameters for the Bitcoin Core REST connection.
395	/// * `rpc_host`, `rpc_port`, `rpc_user`, `rpc_password` - Required parameters for the Bitcoin Core RPC
396	///   connection
397	pub fn set_chain_source_bitcoind_rest(
398		&mut self, rest_host: String, rest_port: u16, rpc_host: String, rpc_port: u16,
399		rpc_user: String, rpc_password: String,
400	) -> &mut Self {
401		self.chain_data_source_config = Some(ChainDataSourceConfig::Bitcoind {
402			rpc_host,
403			rpc_port,
404			rpc_user,
405			rpc_password,
406			rest_client_config: Some(BitcoindRestClientConfig { rest_host, rest_port }),
407		});
408
409		self
410	}
411
412	/// Configures the [`Node`] instance to source its gossip data from the Lightning peer-to-peer
413	/// network.
414	pub fn set_gossip_source_p2p(&mut self) -> &mut Self {
415		self.gossip_source_config = Some(GossipSourceConfig::P2PNetwork);
416		self
417	}
418
419	/// Configures the [`Node`] instance to source its gossip data from the given RapidGossipSync
420	/// server.
421	pub fn set_gossip_source_rgs(&mut self, rgs_server_url: String) -> &mut Self {
422		self.gossip_source_config = Some(GossipSourceConfig::RapidGossipSync(rgs_server_url));
423		self
424	}
425
426	/// Configures the [`Node`] instance to source its external scores from the given URL.
427	///
428	/// The external scores are merged into the local scoring system to improve routing.
429	pub fn set_pathfinding_scores_source(&mut self, url: String) -> &mut Self {
430		self.pathfinding_scores_sync_config = Some(PathfindingScoresSyncConfig { url });
431		self
432	}
433
434	/// Configures the [`Node`] instance to source inbound liquidity from the given
435	/// [bLIP-51 / LSPS1] service.
436	///
437	/// Will mark the LSP as trusted for 0-confirmation channels, see [`Config::trusted_peers_0conf`].
438	///
439	/// The given `token` will be used by the LSP to authenticate the user.
440	///
441	/// [bLIP-51 / LSPS1]: https://github.com/lightning/blips/blob/master/blip-0051.md
442	pub fn set_liquidity_source_lsps1(
443		&mut self, node_id: PublicKey, address: SocketAddress, token: Option<String>,
444	) -> &mut Self {
445		// Mark the LSP as trusted for 0conf
446		self.config.trusted_peers_0conf.push(node_id.clone());
447
448		let liquidity_source_config =
449			self.liquidity_source_config.get_or_insert(LiquiditySourceConfig::default());
450		let lsps1_client_config = LSPS1ClientConfig { node_id, address, token };
451		liquidity_source_config.lsps1_client = Some(lsps1_client_config);
452		self
453	}
454
455	/// Configures the [`Node`] instance to source just-in-time inbound liquidity from the given
456	/// [bLIP-52 / LSPS2] service.
457	///
458	/// Will mark the LSP as trusted for 0-confirmation channels, see [`Config::trusted_peers_0conf`].
459	///
460	/// The given `token` will be used by the LSP to authenticate the user.
461	///
462	/// [bLIP-52 / LSPS2]: https://github.com/lightning/blips/blob/master/blip-0052.md
463	pub fn set_liquidity_source_lsps2(
464		&mut self, node_id: PublicKey, address: SocketAddress, token: Option<String>,
465	) -> &mut Self {
466		// Mark the LSP as trusted for 0conf
467		self.config.trusted_peers_0conf.push(node_id.clone());
468
469		let liquidity_source_config =
470			self.liquidity_source_config.get_or_insert(LiquiditySourceConfig::default());
471		let lsps2_client_config = LSPS2ClientConfig { node_id, address, token };
472		liquidity_source_config.lsps2_client = Some(lsps2_client_config);
473		self
474	}
475
476	/// Configures the [`Node`] instance to provide an [LSPS2] service, issuing just-in-time
477	/// channels to clients.
478	///
479	/// **Caution**: LSP service support is in **alpha** and is considered an experimental feature.
480	///
481	/// [LSPS2]: https://github.com/BitcoinAndLightningLayerSpecs/lsp/blob/main/LSPS2/README.md
482	pub fn set_liquidity_provider_lsps2(
483		&mut self, service_config: LSPS2ServiceConfig,
484	) -> &mut Self {
485		let liquidity_source_config =
486			self.liquidity_source_config.get_or_insert(LiquiditySourceConfig::default());
487		liquidity_source_config.lsps2_service = Some(service_config);
488		self
489	}
490
491	/// Sets the used storage directory path.
492	pub fn set_storage_dir_path(&mut self, storage_dir_path: String) -> &mut Self {
493		self.config.storage_dir_path = storage_dir_path;
494		self
495	}
496
497	/// Configures the [`Node`] instance to write logs to the filesystem.
498	///
499	/// The `log_file_path` defaults to [`DEFAULT_LOG_FILENAME`] in the configured
500	/// [`Config::storage_dir_path`] if set to `None`.
501	///
502	/// If set, the `max_log_level` sets the maximum log level. Otherwise, the latter defaults to
503	/// [`DEFAULT_LOG_LEVEL`].
504	///
505	/// [`DEFAULT_LOG_FILENAME`]: crate::config::DEFAULT_LOG_FILENAME
506	pub fn set_filesystem_logger(
507		&mut self, log_file_path: Option<String>, max_log_level: Option<LogLevel>,
508	) -> &mut Self {
509		self.log_writer_config = Some(LogWriterConfig::File { log_file_path, max_log_level });
510		self
511	}
512
513	/// Configures the [`Node`] instance to write logs to the [`log`](https://crates.io/crates/log) facade.
514	pub fn set_log_facade_logger(&mut self) -> &mut Self {
515		self.log_writer_config = Some(LogWriterConfig::Log);
516		self
517	}
518
519	/// Configures the [`Node`] instance to write logs to the provided custom [`LogWriter`].
520	pub fn set_custom_logger(&mut self, log_writer: Arc<dyn LogWriter>) -> &mut Self {
521		self.log_writer_config = Some(LogWriterConfig::Custom(log_writer));
522		self
523	}
524
525	/// Sets the Bitcoin network used.
526	pub fn set_network(&mut self, network: Network) -> &mut Self {
527		self.config.network = network;
528		self
529	}
530
531	/// Sets the IP address and TCP port on which [`Node`] will listen for incoming network connections.
532	pub fn set_listening_addresses(
533		&mut self, listening_addresses: Vec<SocketAddress>,
534	) -> Result<&mut Self, BuildError> {
535		if listening_addresses.len() > 100 {
536			return Err(BuildError::InvalidListeningAddresses);
537		}
538
539		self.config.listening_addresses = Some(listening_addresses);
540		Ok(self)
541	}
542
543	/// Sets the IP address and TCP port which [`Node`] will announce to the gossip network that it accepts connections on.
544	///
545	/// **Note**: If unset, the [`listening_addresses`] will be used as the list of addresses to announce.
546	///
547	/// [`listening_addresses`]: Self::set_listening_addresses
548	pub fn set_announcement_addresses(
549		&mut self, announcement_addresses: Vec<SocketAddress>,
550	) -> Result<&mut Self, BuildError> {
551		if announcement_addresses.len() > 100 {
552			return Err(BuildError::InvalidAnnouncementAddresses);
553		}
554
555		self.config.announcement_addresses = Some(announcement_addresses);
556		Ok(self)
557	}
558
559	/// Sets the node alias that will be used when broadcasting announcements to the gossip
560	/// network.
561	///
562	/// The provided alias must be a valid UTF-8 string and no longer than 32 bytes in total.
563	pub fn set_node_alias(&mut self, node_alias: String) -> Result<&mut Self, BuildError> {
564		let node_alias = sanitize_alias(&node_alias)?;
565
566		self.config.node_alias = Some(node_alias);
567		Ok(self)
568	}
569
570	/// Sets the role of the node in an asynchronous payments context.
571	///
572	/// See <https://github.com/lightning/bolts/pull/1149> for more information about the async payments protocol.
573	pub fn set_async_payments_role(
574		&mut self, role: Option<AsyncPaymentsRole>,
575	) -> Result<&mut Self, BuildError> {
576		if let Some(AsyncPaymentsRole::Server) = role {
577			may_announce_channel(&self.config)
578				.map_err(|_| BuildError::AsyncPaymentsConfigMismatch)?;
579		}
580
581		self.async_payments_role = role;
582		Ok(self)
583	}
584
585	/// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options
586	/// previously configured.
587	pub fn build(&self) -> Result<Node, BuildError> {
588		let storage_dir_path = self.config.storage_dir_path.clone();
589		fs::create_dir_all(storage_dir_path.clone())
590			.map_err(|_| BuildError::StoragePathAccessFailed)?;
591		let kv_store = Arc::new(
592			SqliteStore::new(
593				storage_dir_path.into(),
594				Some(io::sqlite_store::SQLITE_DB_FILE_NAME.to_string()),
595				Some(io::sqlite_store::KV_TABLE_NAME.to_string()),
596			)
597			.map_err(|_| BuildError::KVStoreSetupFailed)?,
598		);
599		self.build_with_store(kv_store)
600	}
601
602	/// Builds a [`Node`] instance with a [`FilesystemStore`] backend and according to the options
603	/// previously configured.
604	pub fn build_with_fs_store(&self) -> Result<Node, BuildError> {
605		let mut storage_dir_path: PathBuf = self.config.storage_dir_path.clone().into();
606		storage_dir_path.push("fs_store");
607
608		fs::create_dir_all(storage_dir_path.clone())
609			.map_err(|_| BuildError::StoragePathAccessFailed)?;
610		let kv_store = Arc::new(FilesystemStore::new(storage_dir_path));
611		self.build_with_store(kv_store)
612	}
613
614	/// Builds a [`Node`] instance with a [VSS] backend and according to the options
615	/// previously configured.
616	///
617	/// Uses [LNURL-auth] based authentication scheme as default method for authentication/authorization.
618	///
619	/// The LNURL challenge will be retrieved by making a request to the given `lnurl_auth_server_url`.
620	/// The returned JWT token in response to the signed LNURL request, will be used for
621	/// authentication/authorization of all the requests made to VSS.
622	///
623	/// `fixed_headers` are included as it is in all the requests made to VSS and LNURL auth server.
624	///
625	/// **Caution**: VSS support is in **alpha** and is considered experimental.
626	/// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are
627	/// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted.
628	///
629	/// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md
630	/// [LNURL-auth]: https://github.com/lnurl/luds/blob/luds/04.md
631	pub fn build_with_vss_store(
632		&self, vss_url: String, store_id: String, lnurl_auth_server_url: String,
633		fixed_headers: HashMap<String, String>,
634	) -> Result<Node, BuildError> {
635		use bitcoin::key::Secp256k1;
636
637		let logger = setup_logger(&self.log_writer_config, &self.config)?;
638
639		let seed_bytes = seed_bytes_from_config(
640			&self.config,
641			self.entropy_source_config.as_ref(),
642			Arc::clone(&logger),
643		)?;
644
645		let config = Arc::new(self.config.clone());
646
647		let vss_xprv =
648			derive_xprv(config, &seed_bytes, VSS_HARDENED_CHILD_INDEX, Arc::clone(&logger))?;
649
650		let lnurl_auth_xprv = vss_xprv
651			.derive_priv(
652				&Secp256k1::new(),
653				&[ChildNumber::Hardened { index: VSS_LNURL_AUTH_HARDENED_CHILD_INDEX }],
654			)
655			.map_err(|e| {
656				log_error!(logger, "Failed to derive VSS secret: {}", e);
657				BuildError::KVStoreSetupFailed
658			})?;
659
660		let lnurl_auth_jwt_provider =
661			LnurlAuthToJwtProvider::new(lnurl_auth_xprv, lnurl_auth_server_url, fixed_headers)
662				.map_err(|e| {
663					log_error!(logger, "Failed to create LnurlAuthToJwtProvider: {}", e);
664					BuildError::KVStoreSetupFailed
665				})?;
666
667		let header_provider = Arc::new(lnurl_auth_jwt_provider);
668
669		self.build_with_vss_store_and_header_provider(vss_url, store_id, header_provider)
670	}
671
672	/// Builds a [`Node`] instance with a [VSS] backend and according to the options
673	/// previously configured.
674	///
675	/// Uses [`FixedHeaders`] as default method for authentication/authorization.
676	///
677	/// Given `fixed_headers` are included as it is in all the requests made to VSS.
678	///
679	/// **Caution**: VSS support is in **alpha** and is considered experimental.
680	/// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are
681	/// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted.
682	///
683	/// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md
684	pub fn build_with_vss_store_and_fixed_headers(
685		&self, vss_url: String, store_id: String, fixed_headers: HashMap<String, String>,
686	) -> Result<Node, BuildError> {
687		let header_provider = Arc::new(FixedHeaders::new(fixed_headers));
688
689		self.build_with_vss_store_and_header_provider(vss_url, store_id, header_provider)
690	}
691
692	/// Builds a [`Node`] instance with a [VSS] backend and according to the options
693	/// previously configured.
694	///
695	/// Given `header_provider` is used to attach headers to every request made
696	/// to VSS.
697	///
698	/// **Caution**: VSS support is in **alpha** and is considered experimental.
699	/// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are
700	/// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted.
701	///
702	/// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md
703	pub fn build_with_vss_store_and_header_provider(
704		&self, vss_url: String, store_id: String, header_provider: Arc<dyn VssHeaderProvider>,
705	) -> Result<Node, BuildError> {
706		let logger = setup_logger(&self.log_writer_config, &self.config)?;
707
708		let runtime = if let Some(handle) = self.runtime_handle.as_ref() {
709			Arc::new(Runtime::with_handle(handle.clone(), Arc::clone(&logger)))
710		} else {
711			Arc::new(Runtime::new(Arc::clone(&logger)).map_err(|e| {
712				log_error!(logger, "Failed to setup tokio runtime: {}", e);
713				BuildError::RuntimeSetupFailed
714			})?)
715		};
716
717		let seed_bytes = seed_bytes_from_config(
718			&self.config,
719			self.entropy_source_config.as_ref(),
720			Arc::clone(&logger),
721		)?;
722
723		let config = Arc::new(self.config.clone());
724
725		let vss_xprv = derive_xprv(
726			config.clone(),
727			&seed_bytes,
728			VSS_HARDENED_CHILD_INDEX,
729			Arc::clone(&logger),
730		)?;
731
732		let vss_seed_bytes: [u8; 32] = vss_xprv.private_key.secret_bytes();
733
734		let vss_store =
735			VssStore::new(vss_url, store_id, vss_seed_bytes, header_provider).map_err(|e| {
736				log_error!(logger, "Failed to setup VSS store: {}", e);
737				BuildError::KVStoreSetupFailed
738			})?;
739
740		build_with_store_internal(
741			config,
742			self.chain_data_source_config.as_ref(),
743			self.gossip_source_config.as_ref(),
744			self.liquidity_source_config.as_ref(),
745			self.pathfinding_scores_sync_config.as_ref(),
746			self.async_payments_role,
747			seed_bytes,
748			runtime,
749			logger,
750			Arc::new(vss_store),
751		)
752	}
753
754	/// Builds a [`Node`] instance according to the options previously configured.
755	pub fn build_with_store(&self, kv_store: Arc<DynStore>) -> Result<Node, BuildError> {
756		let logger = setup_logger(&self.log_writer_config, &self.config)?;
757
758		let runtime = if let Some(handle) = self.runtime_handle.as_ref() {
759			Arc::new(Runtime::with_handle(handle.clone(), Arc::clone(&logger)))
760		} else {
761			Arc::new(Runtime::new(Arc::clone(&logger)).map_err(|e| {
762				log_error!(logger, "Failed to setup tokio runtime: {}", e);
763				BuildError::RuntimeSetupFailed
764			})?)
765		};
766
767		let seed_bytes = seed_bytes_from_config(
768			&self.config,
769			self.entropy_source_config.as_ref(),
770			Arc::clone(&logger),
771		)?;
772		let config = Arc::new(self.config.clone());
773
774		build_with_store_internal(
775			config,
776			self.chain_data_source_config.as_ref(),
777			self.gossip_source_config.as_ref(),
778			self.liquidity_source_config.as_ref(),
779			self.pathfinding_scores_sync_config.as_ref(),
780			self.async_payments_role,
781			seed_bytes,
782			runtime,
783			logger,
784			kv_store,
785		)
786	}
787}
788
789/// A builder for an [`Node`] instance, allowing to set some configuration and module choices from
790/// the getgo.
791///
792/// ### Defaults
793/// - Wallet entropy is sourced from a `keys_seed` file located under [`Config::storage_dir_path`]
794/// - Chain data is sourced from the Esplora endpoint `https://blockstream.info/api`
795/// - Gossip data is sourced via the peer-to-peer network
796#[derive(Debug)]
797#[cfg(feature = "uniffi")]
798pub struct ArcedNodeBuilder {
799	inner: RwLock<NodeBuilder>,
800}
801
802#[cfg(feature = "uniffi")]
803impl ArcedNodeBuilder {
804	/// Creates a new builder instance with the default configuration.
805	pub fn new() -> Self {
806		let inner = RwLock::new(NodeBuilder::new());
807		Self { inner }
808	}
809
810	/// Creates a new builder instance from an [`Config`].
811	pub fn from_config(config: Config) -> Self {
812		let inner = RwLock::new(NodeBuilder::from_config(config));
813		Self { inner }
814	}
815
816	/// Configures the [`Node`] instance to source its wallet entropy from a seed file on disk.
817	///
818	/// If the given file does not exist a new random seed file will be generated and
819	/// stored at the given location.
820	pub fn set_entropy_seed_path(&self, seed_path: String) {
821		self.inner.write().unwrap().set_entropy_seed_path(seed_path);
822	}
823
824	/// Configures the [`Node`] instance to source its wallet entropy from the given
825	/// [`WALLET_KEYS_SEED_LEN`] seed bytes.
826	///
827	/// **Note:** Will return an error if the length of the given `seed_bytes` differs from
828	/// [`WALLET_KEYS_SEED_LEN`].
829	pub fn set_entropy_seed_bytes(&self, seed_bytes: Vec<u8>) -> Result<(), BuildError> {
830		if seed_bytes.len() != WALLET_KEYS_SEED_LEN {
831			return Err(BuildError::InvalidSeedBytes);
832		}
833		let mut bytes = [0u8; WALLET_KEYS_SEED_LEN];
834		bytes.copy_from_slice(&seed_bytes);
835
836		self.inner.write().unwrap().set_entropy_seed_bytes(bytes);
837		Ok(())
838	}
839
840	/// Configures the [`Node`] instance to source its wallet entropy from a [BIP 39] mnemonic.
841	///
842	/// [BIP 39]: https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki
843	pub fn set_entropy_bip39_mnemonic(&self, mnemonic: Mnemonic, passphrase: Option<String>) {
844		self.inner.write().unwrap().set_entropy_bip39_mnemonic(mnemonic, passphrase);
845	}
846
847	/// Configures the [`Node`] instance to source its chain data from the given Esplora server.
848	///
849	/// If no `sync_config` is given, default values are used. See [`EsploraSyncConfig`] for more
850	/// information.
851	pub fn set_chain_source_esplora(
852		&self, server_url: String, sync_config: Option<EsploraSyncConfig>,
853	) {
854		self.inner.write().unwrap().set_chain_source_esplora(server_url, sync_config);
855	}
856
857	/// Configures the [`Node`] instance to source its chain data from the given Esplora server.
858	///
859	/// The given `headers` will be included in all requests to the Esplora server, typically used for
860	/// authentication purposes.
861	///
862	/// If no `sync_config` is given, default values are used. See [`EsploraSyncConfig`] for more
863	/// information.
864	pub fn set_chain_source_esplora_with_headers(
865		&self, server_url: String, headers: HashMap<String, String>,
866		sync_config: Option<EsploraSyncConfig>,
867	) {
868		self.inner.write().unwrap().set_chain_source_esplora_with_headers(
869			server_url,
870			headers,
871			sync_config,
872		);
873	}
874
875	/// Configures the [`Node`] instance to source its chain data from the given Electrum server.
876	///
877	/// If no `sync_config` is given, default values are used. See [`ElectrumSyncConfig`] for more
878	/// information.
879	pub fn set_chain_source_electrum(
880		&self, server_url: String, sync_config: Option<ElectrumSyncConfig>,
881	) {
882		self.inner.write().unwrap().set_chain_source_electrum(server_url, sync_config);
883	}
884
885	/// Configures the [`Node`] instance to connect to a Bitcoin Core node via RPC.
886	///
887	/// This method establishes an RPC connection that enables all essential chain operations including
888	/// transaction broadcasting and chain data synchronization.
889	///
890	/// ## Parameters:
891	/// * `rpc_host`, `rpc_port`, `rpc_user`, `rpc_password` - Required parameters for the Bitcoin Core RPC
892	///   connection.
893	pub fn set_chain_source_bitcoind_rpc(
894		&self, rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String,
895	) {
896		self.inner.write().unwrap().set_chain_source_bitcoind_rpc(
897			rpc_host,
898			rpc_port,
899			rpc_user,
900			rpc_password,
901		);
902	}
903
904	/// Configures the [`Node`] instance to synchronize chain data from a Bitcoin Core REST endpoint.
905	///
906	/// This method enables chain data synchronization via Bitcoin Core's REST interface. We pass
907	/// additional RPC configuration to non-REST-supported API calls like transaction broadcasting.
908	///
909	/// ## Parameters:
910	/// * `rest_host`, `rest_port` - Required parameters for the Bitcoin Core REST connection.
911	/// * `rpc_host`, `rpc_port`, `rpc_user`, `rpc_password` - Required parameters for the Bitcoin Core RPC
912	///   connection
913	pub fn set_chain_source_bitcoind_rest(
914		&self, rest_host: String, rest_port: u16, rpc_host: String, rpc_port: u16,
915		rpc_user: String, rpc_password: String,
916	) {
917		self.inner.write().unwrap().set_chain_source_bitcoind_rest(
918			rest_host,
919			rest_port,
920			rpc_host,
921			rpc_port,
922			rpc_user,
923			rpc_password,
924		);
925	}
926
927	/// Configures the [`Node`] instance to source its gossip data from the Lightning peer-to-peer
928	/// network.
929	pub fn set_gossip_source_p2p(&self) {
930		self.inner.write().unwrap().set_gossip_source_p2p();
931	}
932
933	/// Configures the [`Node`] instance to source its gossip data from the given RapidGossipSync
934	/// server.
935	pub fn set_gossip_source_rgs(&self, rgs_server_url: String) {
936		self.inner.write().unwrap().set_gossip_source_rgs(rgs_server_url);
937	}
938
939	/// Configures the [`Node`] instance to source its external scores from the given URL.
940	///
941	/// The external scores are merged into the local scoring system to improve routing.
942	pub fn set_pathfinding_scores_source(&self, url: String) {
943		self.inner.write().unwrap().set_pathfinding_scores_source(url);
944	}
945
946	/// Configures the [`Node`] instance to source inbound liquidity from the given
947	/// [bLIP-51 / LSPS1] service.
948	///
949	/// Will mark the LSP as trusted for 0-confirmation channels, see [`Config::trusted_peers_0conf`].
950	///
951	/// The given `token` will be used by the LSP to authenticate the user.
952	///
953	/// [bLIP-51 / LSPS1]: https://github.com/lightning/blips/blob/master/blip-0051.md
954	pub fn set_liquidity_source_lsps1(
955		&self, node_id: PublicKey, address: SocketAddress, token: Option<String>,
956	) {
957		self.inner.write().unwrap().set_liquidity_source_lsps1(node_id, address, token);
958	}
959
960	/// Configures the [`Node`] instance to source just-in-time inbound liquidity from the given
961	/// [bLIP-52 / LSPS2] service.
962	///
963	/// Will mark the LSP as trusted for 0-confirmation channels, see [`Config::trusted_peers_0conf`].
964	///
965	/// The given `token` will be used by the LSP to authenticate the user.
966	///
967	/// [bLIP-52 / LSPS2]: https://github.com/lightning/blips/blob/master/blip-0052.md
968	pub fn set_liquidity_source_lsps2(
969		&self, node_id: PublicKey, address: SocketAddress, token: Option<String>,
970	) {
971		self.inner.write().unwrap().set_liquidity_source_lsps2(node_id, address, token);
972	}
973
974	/// Configures the [`Node`] instance to provide an [LSPS2] service, issuing just-in-time
975	/// channels to clients.
976	///
977	/// **Caution**: LSP service support is in **alpha** and is considered an experimental feature.
978	///
979	/// [LSPS2]: https://github.com/BitcoinAndLightningLayerSpecs/lsp/blob/main/LSPS2/README.md
980	pub fn set_liquidity_provider_lsps2(&self, service_config: LSPS2ServiceConfig) {
981		self.inner.write().unwrap().set_liquidity_provider_lsps2(service_config);
982	}
983
984	/// Sets the used storage directory path.
985	pub fn set_storage_dir_path(&self, storage_dir_path: String) {
986		self.inner.write().unwrap().set_storage_dir_path(storage_dir_path);
987	}
988
989	/// Configures the [`Node`] instance to write logs to the filesystem.
990	///
991	/// The `log_file_path` defaults to [`DEFAULT_LOG_FILENAME`] in the configured
992	/// [`Config::storage_dir_path`] if set to `None`.
993	///
994	/// If set, the `max_log_level` sets the maximum log level. Otherwise, the latter defaults to
995	/// [`DEFAULT_LOG_LEVEL`].
996	///
997	/// [`DEFAULT_LOG_FILENAME`]: crate::config::DEFAULT_LOG_FILENAME
998	pub fn set_filesystem_logger(
999		&self, log_file_path: Option<String>, log_level: Option<LogLevel>,
1000	) {
1001		self.inner.write().unwrap().set_filesystem_logger(log_file_path, log_level);
1002	}
1003
1004	/// Configures the [`Node`] instance to write logs to the [`log`](https://crates.io/crates/log) facade.
1005	pub fn set_log_facade_logger(&self) {
1006		self.inner.write().unwrap().set_log_facade_logger();
1007	}
1008
1009	/// Configures the [`Node`] instance to write logs to the provided custom [`LogWriter`].
1010	pub fn set_custom_logger(&self, log_writer: Arc<dyn LogWriter>) {
1011		self.inner.write().unwrap().set_custom_logger(log_writer);
1012	}
1013
1014	/// Sets the Bitcoin network used.
1015	pub fn set_network(&self, network: Network) {
1016		self.inner.write().unwrap().set_network(network);
1017	}
1018
1019	/// Sets the IP address and TCP port on which [`Node`] will listen for incoming network connections.
1020	pub fn set_listening_addresses(
1021		&self, listening_addresses: Vec<SocketAddress>,
1022	) -> Result<(), BuildError> {
1023		self.inner.write().unwrap().set_listening_addresses(listening_addresses).map(|_| ())
1024	}
1025
1026	/// Sets the IP address and TCP port which [`Node`] will announce to the gossip network that it accepts connections on.
1027	///
1028	/// **Note**: If unset, the [`listening_addresses`] will be used as the list of addresses to announce.
1029	///
1030	/// [`listening_addresses`]: Self::set_listening_addresses
1031	pub fn set_announcement_addresses(
1032		&self, announcement_addresses: Vec<SocketAddress>,
1033	) -> Result<(), BuildError> {
1034		self.inner.write().unwrap().set_announcement_addresses(announcement_addresses).map(|_| ())
1035	}
1036
1037	/// Sets the node alias that will be used when broadcasting announcements to the gossip
1038	/// network.
1039	///
1040	/// The provided alias must be a valid UTF-8 string and no longer than 32 bytes in total.
1041	pub fn set_node_alias(&self, node_alias: String) -> Result<(), BuildError> {
1042		self.inner.write().unwrap().set_node_alias(node_alias).map(|_| ())
1043	}
1044
1045	/// Sets the role of the node in an asynchronous payments context.
1046	pub fn set_async_payments_role(
1047		&self, role: Option<AsyncPaymentsRole>,
1048	) -> Result<(), BuildError> {
1049		self.inner.write().unwrap().set_async_payments_role(role).map(|_| ())
1050	}
1051
1052	/// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options
1053	/// previously configured.
1054	pub fn build(&self) -> Result<Arc<Node>, BuildError> {
1055		self.inner.read().unwrap().build().map(Arc::new)
1056	}
1057
1058	/// Builds a [`Node`] instance with a [`FilesystemStore`] backend and according to the options
1059	/// previously configured.
1060	pub fn build_with_fs_store(&self) -> Result<Arc<Node>, BuildError> {
1061		self.inner.read().unwrap().build_with_fs_store().map(Arc::new)
1062	}
1063
1064	/// Builds a [`Node`] instance with a [VSS] backend and according to the options
1065	/// previously configured.
1066	///
1067	/// Uses [LNURL-auth] based authentication scheme as default method for authentication/authorization.
1068	///
1069	/// The LNURL challenge will be retrieved by making a request to the given `lnurl_auth_server_url`.
1070	/// The returned JWT token in response to the signed LNURL request, will be used for
1071	/// authentication/authorization of all the requests made to VSS.
1072	///
1073	/// `fixed_headers` are included as it is in all the requests made to VSS and LNURL auth server.
1074	///
1075	/// **Caution**: VSS support is in **alpha** and is considered experimental.
1076	/// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are
1077	/// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted.
1078	///
1079	/// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md
1080	/// [LNURL-auth]: https://github.com/lnurl/luds/blob/luds/04.md
1081	pub fn build_with_vss_store(
1082		&self, vss_url: String, store_id: String, lnurl_auth_server_url: String,
1083		fixed_headers: HashMap<String, String>,
1084	) -> Result<Arc<Node>, BuildError> {
1085		self.inner
1086			.read()
1087			.unwrap()
1088			.build_with_vss_store(vss_url, store_id, lnurl_auth_server_url, fixed_headers)
1089			.map(Arc::new)
1090	}
1091
1092	/// Builds a [`Node`] instance with a [VSS] backend and according to the options
1093	/// previously configured.
1094	///
1095	/// Uses [`FixedHeaders`] as default method for authentication/authorization.
1096	///
1097	/// Given `fixed_headers` are included as it is in all the requests made to VSS.
1098	///
1099	/// **Caution**: VSS support is in **alpha** and is considered experimental.
1100	/// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are
1101	/// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted.
1102	///
1103	/// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md
1104	pub fn build_with_vss_store_and_fixed_headers(
1105		&self, vss_url: String, store_id: String, fixed_headers: HashMap<String, String>,
1106	) -> Result<Arc<Node>, BuildError> {
1107		self.inner
1108			.read()
1109			.unwrap()
1110			.build_with_vss_store_and_fixed_headers(vss_url, store_id, fixed_headers)
1111			.map(Arc::new)
1112	}
1113
1114	/// Builds a [`Node`] instance with a [VSS] backend and according to the options
1115	/// previously configured.
1116	///
1117	/// Given `header_provider` is used to attach headers to every request made
1118	/// to VSS.
1119	///
1120	/// **Caution**: VSS support is in **alpha** and is considered experimental.
1121	/// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are
1122	/// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted.
1123	///
1124	/// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md
1125	pub fn build_with_vss_store_and_header_provider(
1126		&self, vss_url: String, store_id: String, header_provider: Arc<dyn VssHeaderProvider>,
1127	) -> Result<Arc<Node>, BuildError> {
1128		self.inner
1129			.read()
1130			.unwrap()
1131			.build_with_vss_store_and_header_provider(vss_url, store_id, header_provider)
1132			.map(Arc::new)
1133	}
1134
1135	/// Builds a [`Node`] instance according to the options previously configured.
1136	pub fn build_with_store(&self, kv_store: Arc<DynStore>) -> Result<Arc<Node>, BuildError> {
1137		self.inner.read().unwrap().build_with_store(kv_store).map(Arc::new)
1138	}
1139}
1140
1141/// Builds a [`Node`] instance according to the options previously configured.
1142fn build_with_store_internal(
1143	config: Arc<Config>, chain_data_source_config: Option<&ChainDataSourceConfig>,
1144	gossip_source_config: Option<&GossipSourceConfig>,
1145	liquidity_source_config: Option<&LiquiditySourceConfig>,
1146	pathfinding_scores_sync_config: Option<&PathfindingScoresSyncConfig>,
1147	async_payments_role: Option<AsyncPaymentsRole>, seed_bytes: [u8; 64], runtime: Arc<Runtime>,
1148	logger: Arc<Logger>, kv_store: Arc<DynStore>,
1149) -> Result<Node, BuildError> {
1150	optionally_install_rustls_cryptoprovider();
1151
1152	if let Err(err) = may_announce_channel(&config) {
1153		if config.announcement_addresses.is_some() {
1154			log_error!(logger, "Announcement addresses were set but some required configuration options for node announcement are missing: {}", err);
1155			let build_error = if matches!(err, AnnounceError::MissingNodeAlias) {
1156				BuildError::InvalidNodeAlias
1157			} else {
1158				BuildError::InvalidListeningAddresses
1159			};
1160			return Err(build_error);
1161		}
1162
1163		if config.node_alias.is_some() {
1164			log_error!(logger, "Node alias was set but some required configuration options for node announcement are missing: {}", err);
1165			return Err(BuildError::InvalidListeningAddresses);
1166		}
1167	}
1168
1169	// Initialize the status fields.
1170	let node_metrics = match read_node_metrics(Arc::clone(&kv_store), Arc::clone(&logger)) {
1171		Ok(metrics) => Arc::new(RwLock::new(metrics)),
1172		Err(e) => {
1173			if e.kind() == std::io::ErrorKind::NotFound {
1174				Arc::new(RwLock::new(NodeMetrics::default()))
1175			} else {
1176				log_error!(logger, "Failed to read node metrics from store: {}", e);
1177				return Err(BuildError::ReadFailed);
1178			}
1179		},
1180	};
1181	let tx_broadcaster = Arc::new(TransactionBroadcaster::new(Arc::clone(&logger)));
1182	let fee_estimator = Arc::new(OnchainFeeEstimator::new());
1183
1184	let payment_store = match io::utils::read_payments(Arc::clone(&kv_store), Arc::clone(&logger)) {
1185		Ok(payments) => Arc::new(PaymentStore::new(
1186			payments,
1187			PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE.to_string(),
1188			PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE.to_string(),
1189			Arc::clone(&kv_store),
1190			Arc::clone(&logger),
1191		)),
1192		Err(e) => {
1193			log_error!(logger, "Failed to read payment data from store: {}", e);
1194			return Err(BuildError::ReadFailed);
1195		},
1196	};
1197
1198	let (chain_source, chain_tip_opt) = match chain_data_source_config {
1199		Some(ChainDataSourceConfig::Esplora { server_url, headers, sync_config }) => {
1200			let sync_config = sync_config.unwrap_or(EsploraSyncConfig::default());
1201			ChainSource::new_esplora(
1202				server_url.clone(),
1203				headers.clone(),
1204				sync_config,
1205				Arc::clone(&fee_estimator),
1206				Arc::clone(&tx_broadcaster),
1207				Arc::clone(&kv_store),
1208				Arc::clone(&config),
1209				Arc::clone(&logger),
1210				Arc::clone(&node_metrics),
1211			)
1212		},
1213		Some(ChainDataSourceConfig::Electrum { server_url, sync_config }) => {
1214			let sync_config = sync_config.unwrap_or(ElectrumSyncConfig::default());
1215			ChainSource::new_electrum(
1216				server_url.clone(),
1217				sync_config,
1218				Arc::clone(&fee_estimator),
1219				Arc::clone(&tx_broadcaster),
1220				Arc::clone(&kv_store),
1221				Arc::clone(&config),
1222				Arc::clone(&logger),
1223				Arc::clone(&node_metrics),
1224			)
1225		},
1226		Some(ChainDataSourceConfig::Bitcoind {
1227			rpc_host,
1228			rpc_port,
1229			rpc_user,
1230			rpc_password,
1231			rest_client_config,
1232		}) => match rest_client_config {
1233			Some(rest_client_config) => runtime.block_on(async {
1234				ChainSource::new_bitcoind_rest(
1235					rpc_host.clone(),
1236					*rpc_port,
1237					rpc_user.clone(),
1238					rpc_password.clone(),
1239					Arc::clone(&fee_estimator),
1240					Arc::clone(&tx_broadcaster),
1241					Arc::clone(&kv_store),
1242					Arc::clone(&config),
1243					rest_client_config.clone(),
1244					Arc::clone(&logger),
1245					Arc::clone(&node_metrics),
1246				)
1247				.await
1248			}),
1249			None => runtime.block_on(async {
1250				ChainSource::new_bitcoind_rpc(
1251					rpc_host.clone(),
1252					*rpc_port,
1253					rpc_user.clone(),
1254					rpc_password.clone(),
1255					Arc::clone(&fee_estimator),
1256					Arc::clone(&tx_broadcaster),
1257					Arc::clone(&kv_store),
1258					Arc::clone(&config),
1259					Arc::clone(&logger),
1260					Arc::clone(&node_metrics),
1261				)
1262				.await
1263			}),
1264		},
1265
1266		None => {
1267			// Default to Esplora client.
1268			let server_url = DEFAULT_ESPLORA_SERVER_URL.to_string();
1269			let sync_config = EsploraSyncConfig::default();
1270			ChainSource::new_esplora(
1271				server_url.clone(),
1272				HashMap::new(),
1273				sync_config,
1274				Arc::clone(&fee_estimator),
1275				Arc::clone(&tx_broadcaster),
1276				Arc::clone(&kv_store),
1277				Arc::clone(&config),
1278				Arc::clone(&logger),
1279				Arc::clone(&node_metrics),
1280			)
1281		},
1282	};
1283	let chain_source = Arc::new(chain_source);
1284
1285	// Initialize the on-chain wallet and chain access
1286	let xprv = bitcoin::bip32::Xpriv::new_master(config.network, &seed_bytes).map_err(|e| {
1287		log_error!(logger, "Failed to derive master secret: {}", e);
1288		BuildError::InvalidSeedBytes
1289	})?;
1290
1291	let descriptor = Bip84(xprv, KeychainKind::External);
1292	let change_descriptor = Bip84(xprv, KeychainKind::Internal);
1293	let mut wallet_persister =
1294		KVStoreWalletPersister::new(Arc::clone(&kv_store), Arc::clone(&logger));
1295	let wallet_opt = BdkWallet::load()
1296		.descriptor(KeychainKind::External, Some(descriptor.clone()))
1297		.descriptor(KeychainKind::Internal, Some(change_descriptor.clone()))
1298		.extract_keys()
1299		.check_network(config.network)
1300		.load_wallet(&mut wallet_persister)
1301		.map_err(|e| match e {
1302			bdk_wallet::LoadWithPersistError::InvalidChangeSet(
1303				bdk_wallet::LoadError::Mismatch(bdk_wallet::LoadMismatch::Network {
1304					loaded,
1305					expected,
1306				}),
1307			) => {
1308				log_error!(
1309					logger,
1310					"Failed to setup wallet: Networks do not match. Expected {} but got {}",
1311					expected,
1312					loaded
1313				);
1314				BuildError::NetworkMismatch
1315			},
1316			_ => {
1317				log_error!(logger, "Failed to set up wallet: {}", e);
1318				BuildError::WalletSetupFailed
1319			},
1320		})?;
1321	let bdk_wallet = match wallet_opt {
1322		Some(wallet) => wallet,
1323		None => {
1324			let mut wallet = BdkWallet::create(descriptor, change_descriptor)
1325				.network(config.network)
1326				.create_wallet(&mut wallet_persister)
1327				.map_err(|e| {
1328					log_error!(logger, "Failed to set up wallet: {}", e);
1329					BuildError::WalletSetupFailed
1330				})?;
1331
1332			if let Some(best_block) = chain_tip_opt {
1333				// Insert the first checkpoint if we have it, to avoid resyncing from genesis.
1334				// TODO: Use a proper wallet birthday once BDK supports it.
1335				let mut latest_checkpoint = wallet.latest_checkpoint();
1336				let block_id =
1337					bdk_chain::BlockId { height: best_block.height, hash: best_block.block_hash };
1338				latest_checkpoint = latest_checkpoint.insert(block_id);
1339				let update =
1340					bdk_wallet::Update { chain: Some(latest_checkpoint), ..Default::default() };
1341				wallet.apply_update(update).map_err(|e| {
1342					log_error!(logger, "Failed to apply checkpoint during wallet setup: {}", e);
1343					BuildError::WalletSetupFailed
1344				})?;
1345			}
1346			wallet
1347		},
1348	};
1349
1350	let wallet = Arc::new(Wallet::new(
1351		bdk_wallet,
1352		wallet_persister,
1353		Arc::clone(&tx_broadcaster),
1354		Arc::clone(&fee_estimator),
1355		Arc::clone(&payment_store),
1356		Arc::clone(&config),
1357		Arc::clone(&logger),
1358	));
1359
1360	// Initialize the KeysManager
1361	let cur_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).map_err(|e| {
1362		log_error!(logger, "Failed to get current time: {}", e);
1363		BuildError::InvalidSystemTime
1364	})?;
1365
1366	let ldk_seed_bytes: [u8; 32] = xprv.private_key.secret_bytes();
1367	let keys_manager = Arc::new(KeysManager::new(
1368		&ldk_seed_bytes,
1369		cur_time.as_secs(),
1370		cur_time.subsec_nanos(),
1371		Arc::clone(&wallet),
1372		Arc::clone(&logger),
1373	));
1374
1375	let peer_storage_key = keys_manager.get_peer_storage_key();
1376	let persister = Arc::new(Persister::new(
1377		Arc::clone(&kv_store),
1378		Arc::clone(&logger),
1379		PERSISTER_MAX_PENDING_UPDATES,
1380		Arc::clone(&keys_manager),
1381		Arc::clone(&keys_manager),
1382		Arc::clone(&tx_broadcaster),
1383		Arc::clone(&fee_estimator),
1384	));
1385
1386	// Read ChannelMonitor state from store
1387	let channel_monitors = match persister.read_all_channel_monitors_with_updates() {
1388		Ok(monitors) => monitors,
1389		Err(e) => {
1390			if e.kind() == lightning::io::ErrorKind::NotFound {
1391				Vec::new()
1392			} else {
1393				log_error!(logger, "Failed to read channel monitors from store: {}", e.to_string());
1394				return Err(BuildError::ReadFailed);
1395			}
1396		},
1397	};
1398
1399	// Initialize the ChainMonitor
1400	let chain_monitor: Arc<ChainMonitor> = Arc::new(chainmonitor::ChainMonitor::new(
1401		Some(Arc::clone(&chain_source)),
1402		Arc::clone(&tx_broadcaster),
1403		Arc::clone(&logger),
1404		Arc::clone(&fee_estimator),
1405		Arc::clone(&persister),
1406		Arc::clone(&keys_manager),
1407		peer_storage_key,
1408	));
1409
1410	// Initialize the network graph, scorer, and router
1411	let network_graph =
1412		match io::utils::read_network_graph(Arc::clone(&kv_store), Arc::clone(&logger)) {
1413			Ok(graph) => Arc::new(graph),
1414			Err(e) => {
1415				if e.kind() == std::io::ErrorKind::NotFound {
1416					Arc::new(Graph::new(config.network.into(), Arc::clone(&logger)))
1417				} else {
1418					log_error!(logger, "Failed to read network graph from store: {}", e);
1419					return Err(BuildError::ReadFailed);
1420				}
1421			},
1422		};
1423
1424	let local_scorer = match io::utils::read_scorer(
1425		Arc::clone(&kv_store),
1426		Arc::clone(&network_graph),
1427		Arc::clone(&logger),
1428	) {
1429		Ok(scorer) => scorer,
1430		Err(e) => {
1431			if e.kind() == std::io::ErrorKind::NotFound {
1432				let params = ProbabilisticScoringDecayParameters::default();
1433				ProbabilisticScorer::new(params, Arc::clone(&network_graph), Arc::clone(&logger))
1434			} else {
1435				log_error!(logger, "Failed to read scoring data from store: {}", e);
1436				return Err(BuildError::ReadFailed);
1437			}
1438		},
1439	};
1440
1441	let scorer = Arc::new(Mutex::new(CombinedScorer::new(local_scorer)));
1442
1443	// Restore external pathfinding scores from cache if possible.
1444	match read_external_pathfinding_scores_from_cache(Arc::clone(&kv_store), Arc::clone(&logger)) {
1445		Ok(external_scores) => {
1446			scorer.lock().unwrap().merge(external_scores, cur_time);
1447			log_trace!(logger, "External scores from cache merged successfully");
1448		},
1449		Err(e) => {
1450			if e.kind() != std::io::ErrorKind::NotFound {
1451				log_error!(logger, "Error while reading external scores from cache: {}", e);
1452				return Err(BuildError::ReadFailed);
1453			}
1454		},
1455	}
1456
1457	let scoring_fee_params = ProbabilisticScoringFeeParameters::default();
1458	let router = Arc::new(DefaultRouter::new(
1459		Arc::clone(&network_graph),
1460		Arc::clone(&logger),
1461		Arc::clone(&keys_manager),
1462		Arc::clone(&scorer),
1463		scoring_fee_params,
1464	));
1465
1466	let mut user_config = default_user_config(&config);
1467
1468	if liquidity_source_config.and_then(|lsc| lsc.lsps2_service.as_ref()).is_some() {
1469		// If we act as an LSPS2 service, we need to to be able to intercept HTLCs and forward the
1470		// information to the service handler.
1471		user_config.accept_intercept_htlcs = true;
1472
1473		// If we act as an LSPS2 service, we allow forwarding to unnannounced channels.
1474		user_config.accept_forwards_to_priv_channels = true;
1475
1476		// If we act as an LSPS2 service, set the HTLC-value-in-flight to 100% of the channel value
1477		// to ensure we can forward the initial payment.
1478		user_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel =
1479			100;
1480	}
1481
1482	if let Some(role) = async_payments_role {
1483		match role {
1484			AsyncPaymentsRole::Server => {
1485				user_config.accept_forwards_to_priv_channels = true;
1486				user_config.enable_htlc_hold = true;
1487			},
1488			AsyncPaymentsRole::Client => user_config.hold_outbound_htlcs_at_next_hop = true,
1489		}
1490	}
1491
1492	let message_router =
1493		Arc::new(MessageRouter::new(Arc::clone(&network_graph), Arc::clone(&keys_manager)));
1494
1495	// Initialize the ChannelManager
1496	let channel_manager = {
1497		if let Ok(res) = KVStoreSync::read(
1498			&*kv_store,
1499			CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE,
1500			CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE,
1501			CHANNEL_MANAGER_PERSISTENCE_KEY,
1502		) {
1503			let mut reader = Cursor::new(res);
1504			let channel_monitor_references =
1505				channel_monitors.iter().map(|(_, chanmon)| chanmon).collect();
1506			let read_args = ChannelManagerReadArgs::new(
1507				Arc::clone(&keys_manager),
1508				Arc::clone(&keys_manager),
1509				Arc::clone(&keys_manager),
1510				Arc::clone(&fee_estimator),
1511				Arc::clone(&chain_monitor),
1512				Arc::clone(&tx_broadcaster),
1513				Arc::clone(&router),
1514				Arc::clone(&message_router),
1515				Arc::clone(&logger),
1516				user_config,
1517				channel_monitor_references,
1518			);
1519			let (_hash, channel_manager) =
1520				<(BlockHash, ChannelManager)>::read(&mut reader, read_args).map_err(|e| {
1521					log_error!(logger, "Failed to read channel manager from store: {}", e);
1522					BuildError::ReadFailed
1523				})?;
1524			channel_manager
1525		} else {
1526			// We're starting a fresh node.
1527			let best_block =
1528				chain_tip_opt.unwrap_or_else(|| BestBlock::from_network(config.network));
1529
1530			let chain_params = ChainParameters { network: config.network.into(), best_block };
1531			channelmanager::ChannelManager::new(
1532				Arc::clone(&fee_estimator),
1533				Arc::clone(&chain_monitor),
1534				Arc::clone(&tx_broadcaster),
1535				Arc::clone(&router),
1536				Arc::clone(&message_router),
1537				Arc::clone(&logger),
1538				Arc::clone(&keys_manager),
1539				Arc::clone(&keys_manager),
1540				Arc::clone(&keys_manager),
1541				user_config,
1542				chain_params,
1543				cur_time.as_secs() as u32,
1544			)
1545		}
1546	};
1547
1548	let channel_manager = Arc::new(channel_manager);
1549
1550	// Give ChannelMonitors to ChainMonitor
1551	for (_blockhash, channel_monitor) in channel_monitors.into_iter() {
1552		let channel_id = channel_monitor.channel_id();
1553		chain_monitor.watch_channel(channel_id, channel_monitor).map_err(|e| {
1554			log_error!(logger, "Failed to watch channel monitor: {:?}", e);
1555			BuildError::InvalidChannelMonitor
1556		})?;
1557	}
1558
1559	// Initialize the PeerManager
1560	let onion_messenger: Arc<OnionMessenger> =
1561		if let Some(AsyncPaymentsRole::Server) = async_payments_role {
1562			Arc::new(OnionMessenger::new_with_offline_peer_interception(
1563				Arc::clone(&keys_manager),
1564				Arc::clone(&keys_manager),
1565				Arc::clone(&logger),
1566				Arc::clone(&channel_manager),
1567				message_router,
1568				Arc::clone(&channel_manager),
1569				Arc::clone(&channel_manager),
1570				IgnoringMessageHandler {},
1571				IgnoringMessageHandler {},
1572			))
1573		} else {
1574			Arc::new(OnionMessenger::new(
1575				Arc::clone(&keys_manager),
1576				Arc::clone(&keys_manager),
1577				Arc::clone(&logger),
1578				Arc::clone(&channel_manager),
1579				message_router,
1580				Arc::clone(&channel_manager),
1581				Arc::clone(&channel_manager),
1582				IgnoringMessageHandler {},
1583				IgnoringMessageHandler {},
1584			))
1585		};
1586	let ephemeral_bytes: [u8; 32] = keys_manager.get_secure_random_bytes();
1587
1588	// Initialize the GossipSource
1589	// Use the configured gossip source, if the user set one, otherwise default to P2PNetwork.
1590	let gossip_source_config = gossip_source_config.unwrap_or(&GossipSourceConfig::P2PNetwork);
1591
1592	let gossip_source = match gossip_source_config {
1593		GossipSourceConfig::P2PNetwork => {
1594			let p2p_source =
1595				Arc::new(GossipSource::new_p2p(Arc::clone(&network_graph), Arc::clone(&logger)));
1596
1597			// Reset the RGS sync timestamp in case we somehow switch gossip sources
1598			{
1599				let mut locked_node_metrics = node_metrics.write().unwrap();
1600				locked_node_metrics.latest_rgs_snapshot_timestamp = None;
1601				write_node_metrics(
1602					&*locked_node_metrics,
1603					Arc::clone(&kv_store),
1604					Arc::clone(&logger),
1605				)
1606				.map_err(|e| {
1607					log_error!(logger, "Failed writing to store: {}", e);
1608					BuildError::WriteFailed
1609				})?;
1610			}
1611			p2p_source
1612		},
1613		GossipSourceConfig::RapidGossipSync(rgs_server) => {
1614			let latest_sync_timestamp =
1615				node_metrics.read().unwrap().latest_rgs_snapshot_timestamp.unwrap_or(0);
1616			Arc::new(GossipSource::new_rgs(
1617				rgs_server.clone(),
1618				latest_sync_timestamp,
1619				Arc::clone(&network_graph),
1620				Arc::clone(&logger),
1621			))
1622		},
1623	};
1624
1625	let (liquidity_source, custom_message_handler) =
1626		if let Some(lsc) = liquidity_source_config.as_ref() {
1627			let mut liquidity_source_builder = LiquiditySourceBuilder::new(
1628				Arc::clone(&wallet),
1629				Arc::clone(&channel_manager),
1630				Arc::clone(&keys_manager),
1631				Arc::clone(&chain_source),
1632				Arc::clone(&tx_broadcaster),
1633				Arc::clone(&kv_store),
1634				Arc::clone(&config),
1635				Arc::clone(&logger),
1636			);
1637
1638			lsc.lsps1_client.as_ref().map(|config| {
1639				liquidity_source_builder.lsps1_client(
1640					config.node_id,
1641					config.address.clone(),
1642					config.token.clone(),
1643				)
1644			});
1645
1646			lsc.lsps2_client.as_ref().map(|config| {
1647				liquidity_source_builder.lsps2_client(
1648					config.node_id,
1649					config.address.clone(),
1650					config.token.clone(),
1651				)
1652			});
1653
1654			let promise_secret = {
1655				let lsps_xpriv = derive_xprv(
1656					Arc::clone(&config),
1657					&seed_bytes,
1658					LSPS_HARDENED_CHILD_INDEX,
1659					Arc::clone(&logger),
1660				)?;
1661				lsps_xpriv.private_key.secret_bytes()
1662			};
1663			lsc.lsps2_service.as_ref().map(|config| {
1664				liquidity_source_builder.lsps2_service(promise_secret, config.clone())
1665			});
1666
1667			let liquidity_source = runtime
1668				.block_on(async move { liquidity_source_builder.build().await.map(Arc::new) })?;
1669			let custom_message_handler =
1670				Arc::new(NodeCustomMessageHandler::new_liquidity(Arc::clone(&liquidity_source)));
1671			(Some(liquidity_source), custom_message_handler)
1672		} else {
1673			(None, Arc::new(NodeCustomMessageHandler::new_ignoring()))
1674		};
1675
1676	let msg_handler = match gossip_source.as_gossip_sync() {
1677		GossipSync::P2P(p2p_gossip_sync) => MessageHandler {
1678			chan_handler: Arc::clone(&channel_manager),
1679			route_handler: Arc::clone(&p2p_gossip_sync)
1680				as Arc<dyn RoutingMessageHandler + Sync + Send>,
1681			onion_message_handler: Arc::clone(&onion_messenger),
1682			custom_message_handler,
1683			send_only_message_handler: Arc::clone(&chain_monitor),
1684		},
1685		GossipSync::Rapid(_) => MessageHandler {
1686			chan_handler: Arc::clone(&channel_manager),
1687			route_handler: Arc::new(IgnoringMessageHandler {})
1688				as Arc<dyn RoutingMessageHandler + Sync + Send>,
1689			onion_message_handler: Arc::clone(&onion_messenger),
1690			custom_message_handler,
1691			send_only_message_handler: Arc::clone(&chain_monitor),
1692		},
1693		GossipSync::None => {
1694			unreachable!("We must always have a gossip sync!");
1695		},
1696	};
1697
1698	let cur_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).map_err(|e| {
1699		log_error!(logger, "Failed to get current time: {}", e);
1700		BuildError::InvalidSystemTime
1701	})?;
1702
1703	let peer_manager = Arc::new(PeerManager::new(
1704		msg_handler,
1705		cur_time.as_secs().try_into().map_err(|e| {
1706			log_error!(logger, "Failed to get current time: {}", e);
1707			BuildError::InvalidSystemTime
1708		})?,
1709		&ephemeral_bytes,
1710		Arc::clone(&logger),
1711		Arc::clone(&keys_manager),
1712	));
1713
1714	liquidity_source.as_ref().map(|l| l.set_peer_manager(Arc::clone(&peer_manager)));
1715
1716	gossip_source.set_gossip_verifier(
1717		Arc::clone(&chain_source),
1718		Arc::clone(&peer_manager),
1719		Arc::clone(&runtime),
1720	);
1721
1722	let connection_manager =
1723		Arc::new(ConnectionManager::new(Arc::clone(&peer_manager), Arc::clone(&logger)));
1724
1725	let output_sweeper = match io::utils::read_output_sweeper(
1726		Arc::clone(&tx_broadcaster),
1727		Arc::clone(&fee_estimator),
1728		Arc::clone(&chain_source),
1729		Arc::clone(&keys_manager),
1730		Arc::clone(&kv_store),
1731		Arc::clone(&logger),
1732	) {
1733		Ok(output_sweeper) => Arc::new(output_sweeper),
1734		Err(e) => {
1735			if e.kind() == std::io::ErrorKind::NotFound {
1736				Arc::new(OutputSweeper::new(
1737					channel_manager.current_best_block(),
1738					Arc::clone(&tx_broadcaster),
1739					Arc::clone(&fee_estimator),
1740					Some(Arc::clone(&chain_source)),
1741					Arc::clone(&keys_manager),
1742					Arc::clone(&keys_manager),
1743					Arc::clone(&kv_store),
1744					Arc::clone(&logger),
1745				))
1746			} else {
1747				log_error!(logger, "Failed to read output sweeper data from store: {}", e);
1748				return Err(BuildError::ReadFailed);
1749			}
1750		},
1751	};
1752
1753	let event_queue = match io::utils::read_event_queue(Arc::clone(&kv_store), Arc::clone(&logger))
1754	{
1755		Ok(event_queue) => Arc::new(event_queue),
1756		Err(e) => {
1757			if e.kind() == std::io::ErrorKind::NotFound {
1758				Arc::new(EventQueue::new(Arc::clone(&kv_store), Arc::clone(&logger)))
1759			} else {
1760				log_error!(logger, "Failed to read event queue from store: {}", e);
1761				return Err(BuildError::ReadFailed);
1762			}
1763		},
1764	};
1765
1766	let peer_store = match io::utils::read_peer_info(Arc::clone(&kv_store), Arc::clone(&logger)) {
1767		Ok(peer_store) => Arc::new(peer_store),
1768		Err(e) => {
1769			if e.kind() == std::io::ErrorKind::NotFound {
1770				Arc::new(PeerStore::new(Arc::clone(&kv_store), Arc::clone(&logger)))
1771			} else {
1772				log_error!(logger, "Failed to read peer data from store: {}", e);
1773				return Err(BuildError::ReadFailed);
1774			}
1775		},
1776	};
1777
1778	let om_mailbox = if let Some(AsyncPaymentsRole::Server) = async_payments_role {
1779		Some(Arc::new(OnionMessageMailbox::new()))
1780	} else {
1781		None
1782	};
1783
1784	let (stop_sender, _) = tokio::sync::watch::channel(());
1785	let (background_processor_stop_sender, _) = tokio::sync::watch::channel(());
1786	let is_running = Arc::new(RwLock::new(false));
1787
1788	let pathfinding_scores_sync_url = pathfinding_scores_sync_config.map(|c| c.url.clone());
1789
1790	Ok(Node {
1791		runtime,
1792		stop_sender,
1793		background_processor_stop_sender,
1794		config,
1795		wallet,
1796		chain_source,
1797		tx_broadcaster,
1798		fee_estimator,
1799		event_queue,
1800		channel_manager,
1801		chain_monitor,
1802		output_sweeper,
1803		peer_manager,
1804		onion_messenger,
1805		connection_manager,
1806		keys_manager,
1807		network_graph,
1808		gossip_source,
1809		pathfinding_scores_sync_url,
1810		liquidity_source,
1811		kv_store,
1812		logger,
1813		_router: router,
1814		scorer,
1815		peer_store,
1816		payment_store,
1817		is_running,
1818		node_metrics,
1819		om_mailbox,
1820		async_payments_role,
1821	})
1822}
1823
1824fn optionally_install_rustls_cryptoprovider() {
1825	// Acquire a global Mutex, ensuring that only one process at a time install the provider. This
1826	// is mostly required for running tests concurrently.
1827	static INIT_CRYPTO: Once = Once::new();
1828
1829	INIT_CRYPTO.call_once(|| {
1830		// Ensure we always install a `CryptoProvider` for `rustls` if it was somehow not previously installed by now.
1831		if rustls::crypto::CryptoProvider::get_default().is_none() {
1832			let _ = rustls::crypto::ring::default_provider().install_default();
1833		}
1834
1835		// Refuse to startup without TLS support. Better to catch it now than even later at runtime.
1836		assert!(
1837			rustls::crypto::CryptoProvider::get_default().is_some(),
1838			"We need to have a CryptoProvider"
1839		);
1840	});
1841}
1842
1843/// Sets up the node logger.
1844fn setup_logger(
1845	log_writer_config: &Option<LogWriterConfig>, config: &Config,
1846) -> Result<Arc<Logger>, BuildError> {
1847	let logger = match log_writer_config {
1848		Some(LogWriterConfig::File { log_file_path, max_log_level }) => {
1849			let log_file_path = log_file_path
1850				.clone()
1851				.unwrap_or_else(|| format!("{}/{}", config.storage_dir_path, DEFAULT_LOG_FILENAME));
1852			let max_log_level = max_log_level.unwrap_or_else(|| DEFAULT_LOG_LEVEL);
1853
1854			Logger::new_fs_writer(log_file_path, max_log_level)
1855				.map_err(|_| BuildError::LoggerSetupFailed)?
1856		},
1857		Some(LogWriterConfig::Log) => Logger::new_log_facade(),
1858
1859		Some(LogWriterConfig::Custom(custom_log_writer)) => {
1860			Logger::new_custom_writer(Arc::clone(&custom_log_writer))
1861		},
1862		None => {
1863			// Default to use `FileWriter`
1864			let log_file_path = format!("{}/{}", config.storage_dir_path, DEFAULT_LOG_FILENAME);
1865			let log_level = DEFAULT_LOG_LEVEL;
1866			Logger::new_fs_writer(log_file_path, log_level)
1867				.map_err(|_| BuildError::LoggerSetupFailed)?
1868		},
1869	};
1870
1871	Ok(Arc::new(logger))
1872}
1873
1874fn seed_bytes_from_config(
1875	config: &Config, entropy_source_config: Option<&EntropySourceConfig>, logger: Arc<Logger>,
1876) -> Result<[u8; 64], BuildError> {
1877	match entropy_source_config {
1878		Some(EntropySourceConfig::SeedBytes(bytes)) => Ok(bytes.clone()),
1879		Some(EntropySourceConfig::SeedFile(seed_path)) => {
1880			Ok(io::utils::read_or_generate_seed_file(seed_path, Arc::clone(&logger))
1881				.map_err(|_| BuildError::InvalidSeedFile)?)
1882		},
1883		Some(EntropySourceConfig::Bip39Mnemonic { mnemonic, passphrase }) => match passphrase {
1884			Some(passphrase) => Ok(mnemonic.to_seed(passphrase)),
1885			None => Ok(mnemonic.to_seed("")),
1886		},
1887		None => {
1888			// Default to read or generate from the default location generate a seed file.
1889			let seed_path = format!("{}/keys_seed", config.storage_dir_path);
1890			Ok(io::utils::read_or_generate_seed_file(&seed_path, Arc::clone(&logger))
1891				.map_err(|_| BuildError::InvalidSeedFile)?)
1892		},
1893	}
1894}
1895
1896fn derive_xprv(
1897	config: Arc<Config>, seed_bytes: &[u8; 64], hardened_child_index: u32, logger: Arc<Logger>,
1898) -> Result<Xpriv, BuildError> {
1899	use bitcoin::key::Secp256k1;
1900
1901	let xprv = Xpriv::new_master(config.network, seed_bytes).map_err(|e| {
1902		log_error!(logger, "Failed to derive master secret: {}", e);
1903		BuildError::InvalidSeedBytes
1904	})?;
1905
1906	xprv.derive_priv(&Secp256k1::new(), &[ChildNumber::Hardened { index: hardened_child_index }])
1907		.map_err(|e| {
1908			log_error!(logger, "Failed to derive hardened child secret: {}", e);
1909			BuildError::InvalidSeedBytes
1910		})
1911}
1912
1913/// Sanitize the user-provided node alias to ensure that it is a valid protocol-specified UTF-8 string.
1914pub(crate) fn sanitize_alias(alias_str: &str) -> Result<NodeAlias, BuildError> {
1915	let alias = alias_str.trim();
1916
1917	// Alias must be 32-bytes long or less.
1918	if alias.as_bytes().len() > 32 {
1919		return Err(BuildError::InvalidNodeAlias);
1920	}
1921
1922	let mut bytes = [0u8; 32];
1923	bytes[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes());
1924	Ok(NodeAlias(bytes))
1925}
1926
1927#[cfg(test)]
1928mod tests {
1929	use super::{sanitize_alias, BuildError, NodeAlias};
1930
1931	#[test]
1932	fn sanitize_empty_node_alias() {
1933		// Empty node alias
1934		let alias = "";
1935		let mut buf = [0u8; 32];
1936		buf[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes());
1937
1938		let expected_node_alias = NodeAlias([0; 32]);
1939		let node_alias = sanitize_alias(alias).unwrap();
1940		assert_eq!(node_alias, expected_node_alias);
1941	}
1942
1943	#[test]
1944	fn sanitize_alias_with_sandwiched_null() {
1945		// Alias with emojis
1946		let alias = "I\u{1F496}LDK-Node!";
1947		let mut buf = [0u8; 32];
1948		buf[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes());
1949		let expected_alias = NodeAlias(buf);
1950
1951		let user_provided_alias = "I\u{1F496}LDK-Node!\0\u{26A1}";
1952		let node_alias = sanitize_alias(user_provided_alias).unwrap();
1953
1954		let node_alias_display = format!("{}", node_alias);
1955
1956		assert_eq!(alias, &node_alias_display);
1957		assert_ne!(expected_alias, node_alias);
1958	}
1959
1960	#[test]
1961	fn sanitize_alias_gt_32_bytes() {
1962		let alias = "This is a string longer than thirty-two bytes!"; // 46 bytes
1963		let node = sanitize_alias(alias);
1964		assert_eq!(node.err().unwrap(), BuildError::InvalidNodeAlias);
1965	}
1966}