ldk_node/
builder.rs

1// This file is Copyright its original authors, visible in version control history.
2//
3// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
4// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. You may not use this file except in
6// accordance with one or both of these licenses.
7
8use crate::chain::{ChainSource, DEFAULT_ESPLORA_SERVER_URL};
9use crate::config::{
10	default_user_config, may_announce_channel, AnnounceError, Config, ElectrumSyncConfig,
11	EsploraSyncConfig, DEFAULT_LOG_FILENAME, DEFAULT_LOG_LEVEL, WALLET_KEYS_SEED_LEN,
12};
13
14use crate::connection::ConnectionManager;
15use crate::event::EventQueue;
16use crate::fee_estimator::OnchainFeeEstimator;
17use crate::gossip::GossipSource;
18use crate::io::sqlite_store::SqliteStore;
19use crate::io::utils::{read_node_metrics, write_node_metrics};
20use crate::io::vss_store::VssStore;
21use crate::io::{
22	self, PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE,
23};
24use crate::liquidity::{
25	LSPS1ClientConfig, LSPS2ClientConfig, LSPS2ServiceConfig, LiquiditySourceBuilder,
26};
27use crate::logger::{log_error, log_info, LdkLogger, LogLevel, LogWriter, Logger};
28use crate::message_handler::NodeCustomMessageHandler;
29use crate::peer_store::PeerStore;
30use crate::tx_broadcaster::TransactionBroadcaster;
31use crate::types::{
32	ChainMonitor, ChannelManager, DynStore, GossipSync, Graph, KeysManager, MessageRouter,
33	OnionMessenger, PaymentStore, PeerManager,
34};
35use crate::wallet::persist::KVStoreWalletPersister;
36use crate::wallet::Wallet;
37use crate::{Node, NodeMetrics};
38
39use lightning::chain::{chainmonitor, BestBlock, Watch};
40use lightning::io::Cursor;
41use lightning::ln::channelmanager::{self, ChainParameters, ChannelManagerReadArgs};
42use lightning::ln::msgs::{RoutingMessageHandler, SocketAddress};
43use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler};
44use lightning::routing::gossip::NodeAlias;
45use lightning::routing::router::DefaultRouter;
46use lightning::routing::scoring::{
47	ProbabilisticScorer, ProbabilisticScoringDecayParameters, ProbabilisticScoringFeeParameters,
48};
49use lightning::sign::EntropySource;
50
51use lightning::util::persist::{
52	read_channel_monitors, CHANNEL_MANAGER_PERSISTENCE_KEY,
53	CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE,
54};
55use lightning::util::ser::ReadableArgs;
56use lightning::util::sweep::OutputSweeper;
57
58use lightning_persister::fs_store::FilesystemStore;
59
60use bdk_wallet::template::Bip84;
61use bdk_wallet::KeychainKind;
62use bdk_wallet::Wallet as BdkWallet;
63
64use bip39::Mnemonic;
65
66use bitcoin::secp256k1::PublicKey;
67use bitcoin::{BlockHash, Network};
68
69use bitcoin::bip32::{ChildNumber, Xpriv};
70use std::collections::HashMap;
71use std::convert::TryInto;
72use std::default::Default;
73use std::fmt;
74use std::fs;
75use std::path::PathBuf;
76use std::sync::atomic::AtomicBool;
77use std::sync::{Arc, Mutex, Once, RwLock};
78use std::time::SystemTime;
79use vss_client::headers::{FixedHeaders, LnurlAuthToJwtProvider, VssHeaderProvider};
80
81const VSS_HARDENED_CHILD_INDEX: u32 = 877;
82const VSS_LNURL_AUTH_HARDENED_CHILD_INDEX: u32 = 138;
83const LSPS_HARDENED_CHILD_INDEX: u32 = 577;
84
85#[derive(Debug, Clone)]
86enum ChainDataSourceConfig {
87	Esplora { server_url: String, sync_config: Option<EsploraSyncConfig> },
88	Electrum { server_url: String, sync_config: Option<ElectrumSyncConfig> },
89	BitcoindRpc { rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String },
90}
91
92#[derive(Debug, Clone)]
93enum EntropySourceConfig {
94	SeedFile(String),
95	SeedBytes([u8; WALLET_KEYS_SEED_LEN]),
96	Bip39Mnemonic { mnemonic: Mnemonic, passphrase: Option<String> },
97}
98
99#[derive(Debug, Clone)]
100enum GossipSourceConfig {
101	P2PNetwork,
102	RapidGossipSync(String),
103}
104
105#[derive(Debug, Clone, Default)]
106struct LiquiditySourceConfig {
107	// Act as an LSPS1 client connecting to the given service.
108	lsps1_client: Option<LSPS1ClientConfig>,
109	// Act as an LSPS2 client connecting to the given service.
110	lsps2_client: Option<LSPS2ClientConfig>,
111	// Act as an LSPS2 service.
112	lsps2_service: Option<LSPS2ServiceConfig>,
113}
114
115#[derive(Clone)]
116enum LogWriterConfig {
117	File { log_file_path: Option<String>, max_log_level: Option<LogLevel> },
118	Log,
119	Custom(Arc<dyn LogWriter>),
120}
121
122impl std::fmt::Debug for LogWriterConfig {
123	fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
124		match self {
125			LogWriterConfig::File { max_log_level, log_file_path } => f
126				.debug_struct("LogWriterConfig")
127				.field("max_log_level", max_log_level)
128				.field("log_file_path", log_file_path)
129				.finish(),
130			LogWriterConfig::Log => write!(f, "LogWriterConfig::Log"),
131			LogWriterConfig::Custom(_) => {
132				f.debug_tuple("Custom").field(&"<config internal to custom log writer>").finish()
133			},
134		}
135	}
136}
137
138/// An error encountered during building a [`Node`].
139///
140/// [`Node`]: crate::Node
141#[derive(Debug, Clone, PartialEq)]
142pub enum BuildError {
143	/// The given seed bytes are invalid, e.g., have invalid length.
144	InvalidSeedBytes,
145	/// The given seed file is invalid, e.g., has invalid length, or could not be read.
146	InvalidSeedFile,
147	/// The current system time is invalid, clocks might have gone backwards.
148	InvalidSystemTime,
149	/// The a read channel monitor is invalid.
150	InvalidChannelMonitor,
151	/// The given listening addresses are invalid, e.g. too many were passed.
152	InvalidListeningAddresses,
153	/// The given announcement addresses are invalid, e.g. too many were passed.
154	InvalidAnnouncementAddresses,
155	/// The provided alias is invalid.
156	InvalidNodeAlias,
157	/// We failed to read data from the [`KVStore`].
158	///
159	/// [`KVStore`]: lightning::util::persist::KVStore
160	ReadFailed,
161	/// We failed to write data to the [`KVStore`].
162	///
163	/// [`KVStore`]: lightning::util::persist::KVStore
164	WriteFailed,
165	/// We failed to access the given `storage_dir_path`.
166	StoragePathAccessFailed,
167	/// We failed to setup our [`KVStore`].
168	///
169	/// [`KVStore`]: lightning::util::persist::KVStore
170	KVStoreSetupFailed,
171	/// We failed to setup the onchain wallet.
172	WalletSetupFailed,
173	/// We failed to setup the logger.
174	LoggerSetupFailed,
175	/// The given network does not match the node's previously configured network.
176	NetworkMismatch,
177}
178
179impl fmt::Display for BuildError {
180	fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
181		match *self {
182			Self::InvalidSeedBytes => write!(f, "Given seed bytes are invalid."),
183			Self::InvalidSeedFile => write!(f, "Given seed file is invalid or could not be read."),
184			Self::InvalidSystemTime => {
185				write!(f, "System time is invalid. Clocks might have gone back in time.")
186			},
187			Self::InvalidChannelMonitor => {
188				write!(f, "Failed to watch a deserialized ChannelMonitor")
189			},
190			Self::InvalidListeningAddresses => write!(f, "Given listening addresses are invalid."),
191			Self::InvalidAnnouncementAddresses => {
192				write!(f, "Given announcement addresses are invalid.")
193			},
194			Self::ReadFailed => write!(f, "Failed to read from store."),
195			Self::WriteFailed => write!(f, "Failed to write to store."),
196			Self::StoragePathAccessFailed => write!(f, "Failed to access the given storage path."),
197			Self::KVStoreSetupFailed => write!(f, "Failed to setup KVStore."),
198			Self::WalletSetupFailed => write!(f, "Failed to setup onchain wallet."),
199			Self::LoggerSetupFailed => write!(f, "Failed to setup the logger."),
200			Self::InvalidNodeAlias => write!(f, "Given node alias is invalid."),
201			Self::NetworkMismatch => {
202				write!(f, "Given network does not match the node's previously configured network.")
203			},
204		}
205	}
206}
207
208impl std::error::Error for BuildError {}
209
210/// A builder for an [`Node`] instance, allowing to set some configuration and module choices from
211/// the getgo.
212///
213/// ### Defaults
214/// - Wallet entropy is sourced from a `keys_seed` file located under [`Config::storage_dir_path`]
215/// - Chain data is sourced from the Esplora endpoint `https://blockstream.info/api`
216/// - Gossip data is sourced via the peer-to-peer network
217#[derive(Debug)]
218pub struct NodeBuilder {
219	config: Config,
220	entropy_source_config: Option<EntropySourceConfig>,
221	chain_data_source_config: Option<ChainDataSourceConfig>,
222	gossip_source_config: Option<GossipSourceConfig>,
223	liquidity_source_config: Option<LiquiditySourceConfig>,
224	log_writer_config: Option<LogWriterConfig>,
225}
226
227impl NodeBuilder {
228	/// Creates a new builder instance with the default configuration.
229	pub fn new() -> Self {
230		let config = Config::default();
231		Self::from_config(config)
232	}
233
234	/// Creates a new builder instance from an [`Config`].
235	pub fn from_config(config: Config) -> Self {
236		let entropy_source_config = None;
237		let chain_data_source_config = None;
238		let gossip_source_config = None;
239		let liquidity_source_config = None;
240		let log_writer_config = None;
241		Self {
242			config,
243			entropy_source_config,
244			chain_data_source_config,
245			gossip_source_config,
246			liquidity_source_config,
247			log_writer_config,
248		}
249	}
250
251	/// Configures the [`Node`] instance to source its wallet entropy from a seed file on disk.
252	///
253	/// If the given file does not exist a new random seed file will be generated and
254	/// stored at the given location.
255	pub fn set_entropy_seed_path(&mut self, seed_path: String) -> &mut Self {
256		self.entropy_source_config = Some(EntropySourceConfig::SeedFile(seed_path));
257		self
258	}
259
260	/// Configures the [`Node`] instance to source its wallet entropy from the given
261	/// [`WALLET_KEYS_SEED_LEN`] seed bytes.
262	pub fn set_entropy_seed_bytes(&mut self, seed_bytes: [u8; WALLET_KEYS_SEED_LEN]) -> &mut Self {
263		self.entropy_source_config = Some(EntropySourceConfig::SeedBytes(seed_bytes));
264		self
265	}
266
267	/// Configures the [`Node`] instance to source its wallet entropy from a [BIP 39] mnemonic.
268	///
269	/// [BIP 39]: https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki
270	pub fn set_entropy_bip39_mnemonic(
271		&mut self, mnemonic: Mnemonic, passphrase: Option<String>,
272	) -> &mut Self {
273		self.entropy_source_config =
274			Some(EntropySourceConfig::Bip39Mnemonic { mnemonic, passphrase });
275		self
276	}
277
278	/// Configures the [`Node`] instance to source its chain data from the given Esplora server.
279	///
280	/// If no `sync_config` is given, default values are used. See [`EsploraSyncConfig`] for more
281	/// information.
282	pub fn set_chain_source_esplora(
283		&mut self, server_url: String, sync_config: Option<EsploraSyncConfig>,
284	) -> &mut Self {
285		self.chain_data_source_config =
286			Some(ChainDataSourceConfig::Esplora { server_url, sync_config });
287		self
288	}
289
290	/// Configures the [`Node`] instance to source its chain data from the given Electrum server.
291	///
292	/// If no `sync_config` is given, default values are used. See [`ElectrumSyncConfig`] for more
293	/// information.
294	pub fn set_chain_source_electrum(
295		&mut self, server_url: String, sync_config: Option<ElectrumSyncConfig>,
296	) -> &mut Self {
297		self.chain_data_source_config =
298			Some(ChainDataSourceConfig::Electrum { server_url, sync_config });
299		self
300	}
301
302	/// Configures the [`Node`] instance to source its chain data from the given Bitcoin Core RPC
303	/// endpoint.
304	pub fn set_chain_source_bitcoind_rpc(
305		&mut self, rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String,
306	) -> &mut Self {
307		self.chain_data_source_config =
308			Some(ChainDataSourceConfig::BitcoindRpc { rpc_host, rpc_port, rpc_user, rpc_password });
309		self
310	}
311
312	/// Configures the [`Node`] instance to source its gossip data from the Lightning peer-to-peer
313	/// network.
314	pub fn set_gossip_source_p2p(&mut self) -> &mut Self {
315		self.gossip_source_config = Some(GossipSourceConfig::P2PNetwork);
316		self
317	}
318
319	/// Configures the [`Node`] instance to source its gossip data from the given RapidGossipSync
320	/// server.
321	pub fn set_gossip_source_rgs(&mut self, rgs_server_url: String) -> &mut Self {
322		self.gossip_source_config = Some(GossipSourceConfig::RapidGossipSync(rgs_server_url));
323		self
324	}
325
326	/// Configures the [`Node`] instance to source inbound liquidity from the given
327	/// [bLIP-51 / LSPS1] service.
328	///
329	/// Will mark the LSP as trusted for 0-confirmation channels, see [`Config::trusted_peers_0conf`].
330	///
331	/// The given `token` will be used by the LSP to authenticate the user.
332	///
333	/// [bLIP-51 / LSPS1]: https://github.com/lightning/blips/blob/master/blip-0051.md
334	pub fn set_liquidity_source_lsps1(
335		&mut self, node_id: PublicKey, address: SocketAddress, token: Option<String>,
336	) -> &mut Self {
337		// Mark the LSP as trusted for 0conf
338		self.config.trusted_peers_0conf.push(node_id.clone());
339
340		let liquidity_source_config =
341			self.liquidity_source_config.get_or_insert(LiquiditySourceConfig::default());
342		let lsps1_client_config = LSPS1ClientConfig { node_id, address, token };
343		liquidity_source_config.lsps1_client = Some(lsps1_client_config);
344		self
345	}
346
347	/// Configures the [`Node`] instance to source just-in-time inbound liquidity from the given
348	/// [bLIP-52 / LSPS2] service.
349	///
350	/// Will mark the LSP as trusted for 0-confirmation channels, see [`Config::trusted_peers_0conf`].
351	///
352	/// The given `token` will be used by the LSP to authenticate the user.
353	///
354	/// [bLIP-52 / LSPS2]: https://github.com/lightning/blips/blob/master/blip-0052.md
355	pub fn set_liquidity_source_lsps2(
356		&mut self, node_id: PublicKey, address: SocketAddress, token: Option<String>,
357	) -> &mut Self {
358		// Mark the LSP as trusted for 0conf
359		self.config.trusted_peers_0conf.push(node_id.clone());
360
361		let liquidity_source_config =
362			self.liquidity_source_config.get_or_insert(LiquiditySourceConfig::default());
363		let lsps2_client_config = LSPS2ClientConfig { node_id, address, token };
364		liquidity_source_config.lsps2_client = Some(lsps2_client_config);
365		self
366	}
367
368	/// Configures the [`Node`] instance to provide an [LSPS2] service, issuing just-in-time
369	/// channels to clients.
370	///
371	/// **Caution**: LSP service support is in **alpha** and is considered an experimental feature.
372	///
373	/// [LSPS2]: https://github.com/BitcoinAndLightningLayerSpecs/lsp/blob/main/LSPS2/README.md
374	pub fn set_liquidity_provider_lsps2(
375		&mut self, service_config: LSPS2ServiceConfig,
376	) -> &mut Self {
377		let liquidity_source_config =
378			self.liquidity_source_config.get_or_insert(LiquiditySourceConfig::default());
379		liquidity_source_config.lsps2_service = Some(service_config);
380		self
381	}
382
383	/// Sets the used storage directory path.
384	pub fn set_storage_dir_path(&mut self, storage_dir_path: String) -> &mut Self {
385		self.config.storage_dir_path = storage_dir_path;
386		self
387	}
388
389	/// Configures the [`Node`] instance to write logs to the filesystem.
390	///
391	/// The `log_file_path` defaults to [`DEFAULT_LOG_FILENAME`] in the configured
392	/// [`Config::storage_dir_path`] if set to `None`.
393	///
394	/// If set, the `max_log_level` sets the maximum log level. Otherwise, the latter defaults to
395	/// [`DEFAULT_LOG_LEVEL`].
396	///
397	/// [`DEFAULT_LOG_FILENAME`]: crate::config::DEFAULT_LOG_FILENAME
398	pub fn set_filesystem_logger(
399		&mut self, log_file_path: Option<String>, max_log_level: Option<LogLevel>,
400	) -> &mut Self {
401		self.log_writer_config = Some(LogWriterConfig::File { log_file_path, max_log_level });
402		self
403	}
404
405	/// Configures the [`Node`] instance to write logs to the [`log`](https://crates.io/crates/log) facade.
406	pub fn set_log_facade_logger(&mut self) -> &mut Self {
407		self.log_writer_config = Some(LogWriterConfig::Log);
408		self
409	}
410
411	/// Configures the [`Node`] instance to write logs to the provided custom [`LogWriter`].
412	pub fn set_custom_logger(&mut self, log_writer: Arc<dyn LogWriter>) -> &mut Self {
413		self.log_writer_config = Some(LogWriterConfig::Custom(log_writer));
414		self
415	}
416
417	/// Sets the Bitcoin network used.
418	pub fn set_network(&mut self, network: Network) -> &mut Self {
419		self.config.network = network;
420		self
421	}
422
423	/// Sets the IP address and TCP port on which [`Node`] will listen for incoming network connections.
424	pub fn set_listening_addresses(
425		&mut self, listening_addresses: Vec<SocketAddress>,
426	) -> Result<&mut Self, BuildError> {
427		if listening_addresses.len() > 100 {
428			return Err(BuildError::InvalidListeningAddresses);
429		}
430
431		self.config.listening_addresses = Some(listening_addresses);
432		Ok(self)
433	}
434
435	/// Sets the IP address and TCP port which [`Node`] will announce to the gossip network that it accepts connections on.
436	///
437	/// **Note**: If unset, the [`listening_addresses`] will be used as the list of addresses to announce.
438	///
439	/// [`listening_addresses`]: Self::set_listening_addresses
440	pub fn set_announcement_addresses(
441		&mut self, announcement_addresses: Vec<SocketAddress>,
442	) -> Result<&mut Self, BuildError> {
443		if announcement_addresses.len() > 100 {
444			return Err(BuildError::InvalidAnnouncementAddresses);
445		}
446
447		self.config.announcement_addresses = Some(announcement_addresses);
448		Ok(self)
449	}
450
451	/// Sets the node alias that will be used when broadcasting announcements to the gossip
452	/// network.
453	///
454	/// The provided alias must be a valid UTF-8 string and no longer than 32 bytes in total.
455	pub fn set_node_alias(&mut self, node_alias: String) -> Result<&mut Self, BuildError> {
456		let node_alias = sanitize_alias(&node_alias)?;
457
458		self.config.node_alias = Some(node_alias);
459		Ok(self)
460	}
461
462	/// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options
463	/// previously configured.
464	pub fn build(&self) -> Result<Node, BuildError> {
465		let storage_dir_path = self.config.storage_dir_path.clone();
466		fs::create_dir_all(storage_dir_path.clone())
467			.map_err(|_| BuildError::StoragePathAccessFailed)?;
468		let kv_store = Arc::new(
469			SqliteStore::new(
470				storage_dir_path.into(),
471				Some(io::sqlite_store::SQLITE_DB_FILE_NAME.to_string()),
472				Some(io::sqlite_store::KV_TABLE_NAME.to_string()),
473			)
474			.map_err(|_| BuildError::KVStoreSetupFailed)?,
475		);
476		self.build_with_store(kv_store)
477	}
478
479	/// Builds a [`Node`] instance with a [`FilesystemStore`] backend and according to the options
480	/// previously configured.
481	pub fn build_with_fs_store(&self) -> Result<Node, BuildError> {
482		let mut storage_dir_path: PathBuf = self.config.storage_dir_path.clone().into();
483		storage_dir_path.push("fs_store");
484
485		fs::create_dir_all(storage_dir_path.clone())
486			.map_err(|_| BuildError::StoragePathAccessFailed)?;
487		let kv_store = Arc::new(FilesystemStore::new(storage_dir_path));
488		self.build_with_store(kv_store)
489	}
490
491	/// Builds a [`Node`] instance with a [VSS] backend and according to the options
492	/// previously configured.
493	///
494	/// Uses [LNURL-auth] based authentication scheme as default method for authentication/authorization.
495	///
496	/// The LNURL challenge will be retrieved by making a request to the given `lnurl_auth_server_url`.
497	/// The returned JWT token in response to the signed LNURL request, will be used for
498	/// authentication/authorization of all the requests made to VSS.
499	///
500	/// `fixed_headers` are included as it is in all the requests made to VSS and LNURL auth server.
501	///
502	/// **Caution**: VSS support is in **alpha** and is considered experimental.
503	/// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are
504	/// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted.
505	///
506	/// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md
507	/// [LNURL-auth]: https://github.com/lnurl/luds/blob/luds/04.md
508	pub fn build_with_vss_store(
509		&self, vss_url: String, store_id: String, lnurl_auth_server_url: String,
510		fixed_headers: HashMap<String, String>,
511	) -> Result<Node, BuildError> {
512		use bitcoin::key::Secp256k1;
513
514		let logger = setup_logger(&self.log_writer_config, &self.config)?;
515
516		let seed_bytes = seed_bytes_from_config(
517			&self.config,
518			self.entropy_source_config.as_ref(),
519			Arc::clone(&logger),
520		)?;
521
522		let config = Arc::new(self.config.clone());
523
524		let vss_xprv =
525			derive_xprv(config, &seed_bytes, VSS_HARDENED_CHILD_INDEX, Arc::clone(&logger))?;
526
527		let lnurl_auth_xprv = vss_xprv
528			.derive_priv(
529				&Secp256k1::new(),
530				&[ChildNumber::Hardened { index: VSS_LNURL_AUTH_HARDENED_CHILD_INDEX }],
531			)
532			.map_err(|e| {
533				log_error!(logger, "Failed to derive VSS secret: {}", e);
534				BuildError::KVStoreSetupFailed
535			})?;
536
537		let lnurl_auth_jwt_provider =
538			LnurlAuthToJwtProvider::new(lnurl_auth_xprv, lnurl_auth_server_url, fixed_headers)
539				.map_err(|e| {
540					log_error!(logger, "Failed to create LnurlAuthToJwtProvider: {}", e);
541					BuildError::KVStoreSetupFailed
542				})?;
543
544		let header_provider = Arc::new(lnurl_auth_jwt_provider);
545
546		self.build_with_vss_store_and_header_provider(vss_url, store_id, header_provider)
547	}
548
549	/// Builds a [`Node`] instance with a [VSS] backend and according to the options
550	/// previously configured.
551	///
552	/// Uses [`FixedHeaders`] as default method for authentication/authorization.
553	///
554	/// Given `fixed_headers` are included as it is in all the requests made to VSS.
555	///
556	/// **Caution**: VSS support is in **alpha** and is considered experimental.
557	/// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are
558	/// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted.
559	///
560	/// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md
561	pub fn build_with_vss_store_and_fixed_headers(
562		&self, vss_url: String, store_id: String, fixed_headers: HashMap<String, String>,
563	) -> Result<Node, BuildError> {
564		let header_provider = Arc::new(FixedHeaders::new(fixed_headers));
565
566		self.build_with_vss_store_and_header_provider(vss_url, store_id, header_provider)
567	}
568
569	/// Builds a [`Node`] instance with a [VSS] backend and according to the options
570	/// previously configured.
571	///
572	/// Given `header_provider` is used to attach headers to every request made
573	/// to VSS.
574	///
575	/// **Caution**: VSS support is in **alpha** and is considered experimental.
576	/// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are
577	/// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted.
578	///
579	/// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md
580	pub fn build_with_vss_store_and_header_provider(
581		&self, vss_url: String, store_id: String, header_provider: Arc<dyn VssHeaderProvider>,
582	) -> Result<Node, BuildError> {
583		let logger = setup_logger(&self.log_writer_config, &self.config)?;
584
585		let seed_bytes = seed_bytes_from_config(
586			&self.config,
587			self.entropy_source_config.as_ref(),
588			Arc::clone(&logger),
589		)?;
590
591		let config = Arc::new(self.config.clone());
592
593		let vss_xprv = derive_xprv(
594			config.clone(),
595			&seed_bytes,
596			VSS_HARDENED_CHILD_INDEX,
597			Arc::clone(&logger),
598		)?;
599
600		let vss_seed_bytes: [u8; 32] = vss_xprv.private_key.secret_bytes();
601
602		let vss_store =
603			VssStore::new(vss_url, store_id, vss_seed_bytes, header_provider).map_err(|e| {
604				log_error!(logger, "Failed to setup VssStore: {}", e);
605				BuildError::KVStoreSetupFailed
606			})?;
607		build_with_store_internal(
608			config,
609			self.chain_data_source_config.as_ref(),
610			self.gossip_source_config.as_ref(),
611			self.liquidity_source_config.as_ref(),
612			seed_bytes,
613			logger,
614			Arc::new(vss_store),
615		)
616	}
617
618	/// Builds a [`Node`] instance according to the options previously configured.
619	pub fn build_with_store(&self, kv_store: Arc<DynStore>) -> Result<Node, BuildError> {
620		let logger = setup_logger(&self.log_writer_config, &self.config)?;
621
622		let seed_bytes = seed_bytes_from_config(
623			&self.config,
624			self.entropy_source_config.as_ref(),
625			Arc::clone(&logger),
626		)?;
627		let config = Arc::new(self.config.clone());
628
629		build_with_store_internal(
630			config,
631			self.chain_data_source_config.as_ref(),
632			self.gossip_source_config.as_ref(),
633			self.liquidity_source_config.as_ref(),
634			seed_bytes,
635			logger,
636			kv_store,
637		)
638	}
639}
640
641/// A builder for an [`Node`] instance, allowing to set some configuration and module choices from
642/// the getgo.
643///
644/// ### Defaults
645/// - Wallet entropy is sourced from a `keys_seed` file located under [`Config::storage_dir_path`]
646/// - Chain data is sourced from the Esplora endpoint `https://blockstream.info/api`
647/// - Gossip data is sourced via the peer-to-peer network
648#[derive(Debug)]
649#[cfg(feature = "uniffi")]
650pub struct ArcedNodeBuilder {
651	inner: RwLock<NodeBuilder>,
652}
653
654#[cfg(feature = "uniffi")]
655impl ArcedNodeBuilder {
656	/// Creates a new builder instance with the default configuration.
657	pub fn new() -> Self {
658		let inner = RwLock::new(NodeBuilder::new());
659		Self { inner }
660	}
661
662	/// Creates a new builder instance from an [`Config`].
663	pub fn from_config(config: Config) -> Self {
664		let inner = RwLock::new(NodeBuilder::from_config(config));
665		Self { inner }
666	}
667
668	/// Configures the [`Node`] instance to source its wallet entropy from a seed file on disk.
669	///
670	/// If the given file does not exist a new random seed file will be generated and
671	/// stored at the given location.
672	pub fn set_entropy_seed_path(&self, seed_path: String) {
673		self.inner.write().unwrap().set_entropy_seed_path(seed_path);
674	}
675
676	/// Configures the [`Node`] instance to source its wallet entropy from the given
677	/// [`WALLET_KEYS_SEED_LEN`] seed bytes.
678	///
679	/// **Note:** Will return an error if the length of the given `seed_bytes` differs from
680	/// [`WALLET_KEYS_SEED_LEN`].
681	pub fn set_entropy_seed_bytes(&self, seed_bytes: Vec<u8>) -> Result<(), BuildError> {
682		if seed_bytes.len() != WALLET_KEYS_SEED_LEN {
683			return Err(BuildError::InvalidSeedBytes);
684		}
685		let mut bytes = [0u8; WALLET_KEYS_SEED_LEN];
686		bytes.copy_from_slice(&seed_bytes);
687
688		self.inner.write().unwrap().set_entropy_seed_bytes(bytes);
689		Ok(())
690	}
691
692	/// Configures the [`Node`] instance to source its wallet entropy from a [BIP 39] mnemonic.
693	///
694	/// [BIP 39]: https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki
695	pub fn set_entropy_bip39_mnemonic(&self, mnemonic: Mnemonic, passphrase: Option<String>) {
696		self.inner.write().unwrap().set_entropy_bip39_mnemonic(mnemonic, passphrase);
697	}
698
699	/// Configures the [`Node`] instance to source its chain data from the given Esplora server.
700	///
701	/// If no `sync_config` is given, default values are used. See [`EsploraSyncConfig`] for more
702	/// information.
703	pub fn set_chain_source_esplora(
704		&self, server_url: String, sync_config: Option<EsploraSyncConfig>,
705	) {
706		self.inner.write().unwrap().set_chain_source_esplora(server_url, sync_config);
707	}
708
709	/// Configures the [`Node`] instance to source its chain data from the given Electrum server.
710	///
711	/// If no `sync_config` is given, default values are used. See [`ElectrumSyncConfig`] for more
712	/// information.
713	pub fn set_chain_source_electrum(
714		&self, server_url: String, sync_config: Option<ElectrumSyncConfig>,
715	) {
716		self.inner.write().unwrap().set_chain_source_electrum(server_url, sync_config);
717	}
718
719	/// Configures the [`Node`] instance to source its chain data from the given Bitcoin Core RPC
720	/// endpoint.
721	pub fn set_chain_source_bitcoind_rpc(
722		&self, rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String,
723	) {
724		self.inner.write().unwrap().set_chain_source_bitcoind_rpc(
725			rpc_host,
726			rpc_port,
727			rpc_user,
728			rpc_password,
729		);
730	}
731
732	/// Configures the [`Node`] instance to source its gossip data from the Lightning peer-to-peer
733	/// network.
734	pub fn set_gossip_source_p2p(&self) {
735		self.inner.write().unwrap().set_gossip_source_p2p();
736	}
737
738	/// Configures the [`Node`] instance to source its gossip data from the given RapidGossipSync
739	/// server.
740	pub fn set_gossip_source_rgs(&self, rgs_server_url: String) {
741		self.inner.write().unwrap().set_gossip_source_rgs(rgs_server_url);
742	}
743
744	/// Configures the [`Node`] instance to source inbound liquidity from the given
745	/// [bLIP-51 / LSPS1] service.
746	///
747	/// Will mark the LSP as trusted for 0-confirmation channels, see [`Config::trusted_peers_0conf`].
748	///
749	/// The given `token` will be used by the LSP to authenticate the user.
750	///
751	/// [bLIP-51 / LSPS1]: https://github.com/lightning/blips/blob/master/blip-0051.md
752	pub fn set_liquidity_source_lsps1(
753		&self, node_id: PublicKey, address: SocketAddress, token: Option<String>,
754	) {
755		self.inner.write().unwrap().set_liquidity_source_lsps1(node_id, address, token);
756	}
757
758	/// Configures the [`Node`] instance to source just-in-time inbound liquidity from the given
759	/// [bLIP-52 / LSPS2] service.
760	///
761	/// Will mark the LSP as trusted for 0-confirmation channels, see [`Config::trusted_peers_0conf`].
762	///
763	/// The given `token` will be used by the LSP to authenticate the user.
764	///
765	/// [bLIP-52 / LSPS2]: https://github.com/lightning/blips/blob/master/blip-0052.md
766	pub fn set_liquidity_source_lsps2(
767		&self, node_id: PublicKey, address: SocketAddress, token: Option<String>,
768	) {
769		self.inner.write().unwrap().set_liquidity_source_lsps2(node_id, address, token);
770	}
771
772	/// Configures the [`Node`] instance to provide an [LSPS2] service, issuing just-in-time
773	/// channels to clients.
774	///
775	/// **Caution**: LSP service support is in **alpha** and is considered an experimental feature.
776	///
777	/// [LSPS2]: https://github.com/BitcoinAndLightningLayerSpecs/lsp/blob/main/LSPS2/README.md
778	pub fn set_liquidity_provider_lsps2(&self, service_config: LSPS2ServiceConfig) {
779		self.inner.write().unwrap().set_liquidity_provider_lsps2(service_config);
780	}
781
782	/// Sets the used storage directory path.
783	pub fn set_storage_dir_path(&self, storage_dir_path: String) {
784		self.inner.write().unwrap().set_storage_dir_path(storage_dir_path);
785	}
786
787	/// Configures the [`Node`] instance to write logs to the filesystem.
788	///
789	/// The `log_file_path` defaults to [`DEFAULT_LOG_FILENAME`] in the configured
790	/// [`Config::storage_dir_path`] if set to `None`.
791	///
792	/// If set, the `max_log_level` sets the maximum log level. Otherwise, the latter defaults to
793	/// [`DEFAULT_LOG_LEVEL`].
794	///
795	/// [`DEFAULT_LOG_FILENAME`]: crate::config::DEFAULT_LOG_FILENAME
796	pub fn set_filesystem_logger(
797		&self, log_file_path: Option<String>, log_level: Option<LogLevel>,
798	) {
799		self.inner.write().unwrap().set_filesystem_logger(log_file_path, log_level);
800	}
801
802	/// Configures the [`Node`] instance to write logs to the [`log`](https://crates.io/crates/log) facade.
803	pub fn set_log_facade_logger(&self) {
804		self.inner.write().unwrap().set_log_facade_logger();
805	}
806
807	/// Configures the [`Node`] instance to write logs to the provided custom [`LogWriter`].
808	pub fn set_custom_logger(&self, log_writer: Arc<dyn LogWriter>) {
809		self.inner.write().unwrap().set_custom_logger(log_writer);
810	}
811
812	/// Sets the Bitcoin network used.
813	pub fn set_network(&self, network: Network) {
814		self.inner.write().unwrap().set_network(network);
815	}
816
817	/// Sets the IP address and TCP port on which [`Node`] will listen for incoming network connections.
818	pub fn set_listening_addresses(
819		&self, listening_addresses: Vec<SocketAddress>,
820	) -> Result<(), BuildError> {
821		self.inner.write().unwrap().set_listening_addresses(listening_addresses).map(|_| ())
822	}
823
824	/// Sets the IP address and TCP port which [`Node`] will announce to the gossip network that it accepts connections on.
825	///
826	/// **Note**: If unset, the [`listening_addresses`] will be used as the list of addresses to announce.
827	///
828	/// [`listening_addresses`]: Self::set_listening_addresses
829	pub fn set_announcement_addresses(
830		&self, announcement_addresses: Vec<SocketAddress>,
831	) -> Result<(), BuildError> {
832		self.inner.write().unwrap().set_announcement_addresses(announcement_addresses).map(|_| ())
833	}
834
835	/// Sets the node alias that will be used when broadcasting announcements to the gossip
836	/// network.
837	///
838	/// The provided alias must be a valid UTF-8 string and no longer than 32 bytes in total.
839	pub fn set_node_alias(&self, node_alias: String) -> Result<(), BuildError> {
840		self.inner.write().unwrap().set_node_alias(node_alias).map(|_| ())
841	}
842
843	/// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options
844	/// previously configured.
845	pub fn build(&self) -> Result<Arc<Node>, BuildError> {
846		self.inner.read().unwrap().build().map(Arc::new)
847	}
848
849	/// Builds a [`Node`] instance with a [`FilesystemStore`] backend and according to the options
850	/// previously configured.
851	pub fn build_with_fs_store(&self) -> Result<Arc<Node>, BuildError> {
852		self.inner.read().unwrap().build_with_fs_store().map(Arc::new)
853	}
854
855	/// Builds a [`Node`] instance with a [VSS] backend and according to the options
856	/// previously configured.
857	///
858	/// Uses [LNURL-auth] based authentication scheme as default method for authentication/authorization.
859	///
860	/// The LNURL challenge will be retrieved by making a request to the given `lnurl_auth_server_url`.
861	/// The returned JWT token in response to the signed LNURL request, will be used for
862	/// authentication/authorization of all the requests made to VSS.
863	///
864	/// `fixed_headers` are included as it is in all the requests made to VSS and LNURL auth server.
865	///
866	/// **Caution**: VSS support is in **alpha** and is considered experimental.
867	/// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are
868	/// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted.
869	///
870	/// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md
871	/// [LNURL-auth]: https://github.com/lnurl/luds/blob/luds/04.md
872	pub fn build_with_vss_store(
873		&self, vss_url: String, store_id: String, lnurl_auth_server_url: String,
874		fixed_headers: HashMap<String, String>,
875	) -> Result<Arc<Node>, BuildError> {
876		self.inner
877			.read()
878			.unwrap()
879			.build_with_vss_store(vss_url, store_id, lnurl_auth_server_url, fixed_headers)
880			.map(Arc::new)
881	}
882
883	/// Builds a [`Node`] instance with a [VSS] backend and according to the options
884	/// previously configured.
885	///
886	/// Uses [`FixedHeaders`] as default method for authentication/authorization.
887	///
888	/// Given `fixed_headers` are included as it is in all the requests made to VSS.
889	///
890	/// **Caution**: VSS support is in **alpha** and is considered experimental.
891	/// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are
892	/// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted.
893	///
894	/// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md
895	pub fn build_with_vss_store_and_fixed_headers(
896		&self, vss_url: String, store_id: String, fixed_headers: HashMap<String, String>,
897	) -> Result<Arc<Node>, BuildError> {
898		self.inner
899			.read()
900			.unwrap()
901			.build_with_vss_store_and_fixed_headers(vss_url, store_id, fixed_headers)
902			.map(Arc::new)
903	}
904
905	/// Builds a [`Node`] instance with a [VSS] backend and according to the options
906	/// previously configured.
907	///
908	/// Given `header_provider` is used to attach headers to every request made
909	/// to VSS.
910	///
911	/// **Caution**: VSS support is in **alpha** and is considered experimental.
912	/// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are
913	/// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted.
914	///
915	/// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md
916	pub fn build_with_vss_store_and_header_provider(
917		&self, vss_url: String, store_id: String, header_provider: Arc<dyn VssHeaderProvider>,
918	) -> Result<Arc<Node>, BuildError> {
919		self.inner
920			.read()
921			.unwrap()
922			.build_with_vss_store_and_header_provider(vss_url, store_id, header_provider)
923			.map(Arc::new)
924	}
925
926	/// Builds a [`Node`] instance according to the options previously configured.
927	pub fn build_with_store(&self, kv_store: Arc<DynStore>) -> Result<Arc<Node>, BuildError> {
928		self.inner.read().unwrap().build_with_store(kv_store).map(Arc::new)
929	}
930}
931
932/// Builds a [`Node`] instance according to the options previously configured.
933fn build_with_store_internal(
934	config: Arc<Config>, chain_data_source_config: Option<&ChainDataSourceConfig>,
935	gossip_source_config: Option<&GossipSourceConfig>,
936	liquidity_source_config: Option<&LiquiditySourceConfig>, seed_bytes: [u8; 64],
937	logger: Arc<Logger>, kv_store: Arc<DynStore>,
938) -> Result<Node, BuildError> {
939	optionally_install_rustls_cryptoprovider();
940
941	if let Err(err) = may_announce_channel(&config) {
942		if config.announcement_addresses.is_some() {
943			log_error!(logger, "Announcement addresses were set but some required configuration options for node announcement are missing: {}", err);
944			let build_error = if matches!(err, AnnounceError::MissingNodeAlias) {
945				BuildError::InvalidNodeAlias
946			} else {
947				BuildError::InvalidListeningAddresses
948			};
949			return Err(build_error);
950		}
951
952		if config.node_alias.is_some() {
953			log_error!(logger, "Node alias was set but some required configuration options for node announcement are missing: {}", err);
954			return Err(BuildError::InvalidListeningAddresses);
955		}
956	}
957
958	// Initialize the status fields.
959	let is_listening = Arc::new(AtomicBool::new(false));
960	let node_metrics = match read_node_metrics(Arc::clone(&kv_store), Arc::clone(&logger)) {
961		Ok(metrics) => Arc::new(RwLock::new(metrics)),
962		Err(e) => {
963			if e.kind() == std::io::ErrorKind::NotFound {
964				Arc::new(RwLock::new(NodeMetrics::default()))
965			} else {
966				return Err(BuildError::ReadFailed);
967			}
968		},
969	};
970
971	// Initialize the on-chain wallet and chain access
972	let xprv = bitcoin::bip32::Xpriv::new_master(config.network, &seed_bytes).map_err(|e| {
973		log_error!(logger, "Failed to derive master secret: {}", e);
974		BuildError::InvalidSeedBytes
975	})?;
976
977	let descriptor = Bip84(xprv, KeychainKind::External);
978	let change_descriptor = Bip84(xprv, KeychainKind::Internal);
979	let mut wallet_persister =
980		KVStoreWalletPersister::new(Arc::clone(&kv_store), Arc::clone(&logger));
981	let wallet_opt = BdkWallet::load()
982		.descriptor(KeychainKind::External, Some(descriptor.clone()))
983		.descriptor(KeychainKind::Internal, Some(change_descriptor.clone()))
984		.extract_keys()
985		.check_network(config.network)
986		.load_wallet(&mut wallet_persister)
987		.map_err(|e| match e {
988			bdk_wallet::LoadWithPersistError::InvalidChangeSet(
989				bdk_wallet::LoadError::Mismatch(bdk_wallet::LoadMismatch::Network {
990					loaded,
991					expected,
992				}),
993			) => {
994				log_error!(
995					logger,
996					"Failed to setup wallet: Networks do not match. Expected {} but got {}",
997					expected,
998					loaded
999				);
1000				BuildError::NetworkMismatch
1001			},
1002			_ => {
1003				log_error!(logger, "Failed to set up wallet: {}", e);
1004				BuildError::WalletSetupFailed
1005			},
1006		})?;
1007	let bdk_wallet = match wallet_opt {
1008		Some(wallet) => wallet,
1009		None => BdkWallet::create(descriptor, change_descriptor)
1010			.network(config.network)
1011			.create_wallet(&mut wallet_persister)
1012			.map_err(|e| {
1013				log_error!(logger, "Failed to set up wallet: {}", e);
1014				BuildError::WalletSetupFailed
1015			})?,
1016	};
1017
1018	let tx_broadcaster = Arc::new(TransactionBroadcaster::new(Arc::clone(&logger)));
1019	let fee_estimator = Arc::new(OnchainFeeEstimator::new());
1020
1021	let payment_store = match io::utils::read_payments(Arc::clone(&kv_store), Arc::clone(&logger)) {
1022		Ok(payments) => Arc::new(PaymentStore::new(
1023			payments,
1024			PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE.to_string(),
1025			PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE.to_string(),
1026			Arc::clone(&kv_store),
1027			Arc::clone(&logger),
1028		)),
1029		Err(_) => {
1030			return Err(BuildError::ReadFailed);
1031		},
1032	};
1033
1034	let wallet = Arc::new(Wallet::new(
1035		bdk_wallet,
1036		wallet_persister,
1037		Arc::clone(&tx_broadcaster),
1038		Arc::clone(&fee_estimator),
1039		Arc::clone(&payment_store),
1040		Arc::clone(&config),
1041		Arc::clone(&logger),
1042	));
1043
1044	let chain_source = match chain_data_source_config {
1045		Some(ChainDataSourceConfig::Esplora { server_url, sync_config }) => {
1046			let sync_config = sync_config.unwrap_or(EsploraSyncConfig::default());
1047			Arc::new(ChainSource::new_esplora(
1048				server_url.clone(),
1049				sync_config,
1050				Arc::clone(&wallet),
1051				Arc::clone(&fee_estimator),
1052				Arc::clone(&tx_broadcaster),
1053				Arc::clone(&kv_store),
1054				Arc::clone(&config),
1055				Arc::clone(&logger),
1056				Arc::clone(&node_metrics),
1057			))
1058		},
1059		Some(ChainDataSourceConfig::Electrum { server_url, sync_config }) => {
1060			let sync_config = sync_config.unwrap_or(ElectrumSyncConfig::default());
1061			Arc::new(ChainSource::new_electrum(
1062				server_url.clone(),
1063				sync_config,
1064				Arc::clone(&wallet),
1065				Arc::clone(&fee_estimator),
1066				Arc::clone(&tx_broadcaster),
1067				Arc::clone(&kv_store),
1068				Arc::clone(&config),
1069				Arc::clone(&logger),
1070				Arc::clone(&node_metrics),
1071			))
1072		},
1073		Some(ChainDataSourceConfig::BitcoindRpc { rpc_host, rpc_port, rpc_user, rpc_password }) => {
1074			Arc::new(ChainSource::new_bitcoind_rpc(
1075				rpc_host.clone(),
1076				*rpc_port,
1077				rpc_user.clone(),
1078				rpc_password.clone(),
1079				Arc::clone(&wallet),
1080				Arc::clone(&fee_estimator),
1081				Arc::clone(&tx_broadcaster),
1082				Arc::clone(&kv_store),
1083				Arc::clone(&config),
1084				Arc::clone(&logger),
1085				Arc::clone(&node_metrics),
1086			))
1087		},
1088		None => {
1089			// Default to Esplora client.
1090			let server_url = DEFAULT_ESPLORA_SERVER_URL.to_string();
1091			let sync_config = EsploraSyncConfig::default();
1092			Arc::new(ChainSource::new_esplora(
1093				server_url.clone(),
1094				sync_config,
1095				Arc::clone(&wallet),
1096				Arc::clone(&fee_estimator),
1097				Arc::clone(&tx_broadcaster),
1098				Arc::clone(&kv_store),
1099				Arc::clone(&config),
1100				Arc::clone(&logger),
1101				Arc::clone(&node_metrics),
1102			))
1103		},
1104	};
1105
1106	let runtime = Arc::new(RwLock::new(None));
1107
1108	// Initialize the ChainMonitor
1109	let chain_monitor: Arc<ChainMonitor> = Arc::new(chainmonitor::ChainMonitor::new(
1110		Some(Arc::clone(&chain_source)),
1111		Arc::clone(&tx_broadcaster),
1112		Arc::clone(&logger),
1113		Arc::clone(&fee_estimator),
1114		Arc::clone(&kv_store),
1115	));
1116
1117	// Initialize the KeysManager
1118	let cur_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).map_err(|e| {
1119		log_error!(logger, "Failed to get current time: {}", e);
1120		BuildError::InvalidSystemTime
1121	})?;
1122
1123	let ldk_seed_bytes: [u8; 32] = xprv.private_key.secret_bytes();
1124	let keys_manager = Arc::new(KeysManager::new(
1125		&ldk_seed_bytes,
1126		cur_time.as_secs(),
1127		cur_time.subsec_nanos(),
1128		Arc::clone(&wallet),
1129		Arc::clone(&logger),
1130	));
1131
1132	// Initialize the network graph, scorer, and router
1133	let network_graph =
1134		match io::utils::read_network_graph(Arc::clone(&kv_store), Arc::clone(&logger)) {
1135			Ok(graph) => Arc::new(graph),
1136			Err(e) => {
1137				if e.kind() == std::io::ErrorKind::NotFound {
1138					Arc::new(Graph::new(config.network.into(), Arc::clone(&logger)))
1139				} else {
1140					return Err(BuildError::ReadFailed);
1141				}
1142			},
1143		};
1144
1145	let scorer = match io::utils::read_scorer(
1146		Arc::clone(&kv_store),
1147		Arc::clone(&network_graph),
1148		Arc::clone(&logger),
1149	) {
1150		Ok(scorer) => Arc::new(Mutex::new(scorer)),
1151		Err(e) => {
1152			if e.kind() == std::io::ErrorKind::NotFound {
1153				let params = ProbabilisticScoringDecayParameters::default();
1154				Arc::new(Mutex::new(ProbabilisticScorer::new(
1155					params,
1156					Arc::clone(&network_graph),
1157					Arc::clone(&logger),
1158				)))
1159			} else {
1160				return Err(BuildError::ReadFailed);
1161			}
1162		},
1163	};
1164
1165	let scoring_fee_params = ProbabilisticScoringFeeParameters::default();
1166	let router = Arc::new(DefaultRouter::new(
1167		Arc::clone(&network_graph),
1168		Arc::clone(&logger),
1169		Arc::clone(&keys_manager),
1170		Arc::clone(&scorer),
1171		scoring_fee_params,
1172	));
1173
1174	// Read ChannelMonitor state from store
1175	let channel_monitors = match read_channel_monitors(
1176		Arc::clone(&kv_store),
1177		Arc::clone(&keys_manager),
1178		Arc::clone(&keys_manager),
1179	) {
1180		Ok(monitors) => monitors,
1181		Err(e) => {
1182			if e.kind() == lightning::io::ErrorKind::NotFound {
1183				Vec::new()
1184			} else {
1185				log_error!(logger, "Failed to read channel monitors: {}", e.to_string());
1186				return Err(BuildError::ReadFailed);
1187			}
1188		},
1189	};
1190
1191	let mut user_config = default_user_config(&config);
1192	if liquidity_source_config.and_then(|lsc| lsc.lsps2_client.as_ref()).is_some() {
1193		// Generally allow claiming underpaying HTLCs as the LSP will skim off some fee. We'll
1194		// check that they don't take too much before claiming.
1195		user_config.channel_config.accept_underpaying_htlcs = true;
1196
1197		// FIXME: When we're an LSPS2 client, set maximum allowed inbound HTLC value in flight
1198		// to 100%. We should eventually be able to set this on a per-channel basis, but for
1199		// now we just bump the default for all channels.
1200		user_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel =
1201			100;
1202	}
1203
1204	if liquidity_source_config.and_then(|lsc| lsc.lsps2_service.as_ref()).is_some() {
1205		// If we act as an LSPS2 service, we need to to be able to intercept HTLCs and forward the
1206		// information to the service handler.
1207		user_config.accept_intercept_htlcs = true;
1208
1209		// If we act as an LSPS2 service, we allow forwarding to unnannounced channels.
1210		user_config.accept_forwards_to_priv_channels = true;
1211
1212		// If we act as an LSPS2 service, set the HTLC-value-in-flight to 100% of the channel value
1213		// to ensure we can forward the initial payment.
1214		user_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel =
1215			100;
1216	}
1217
1218	let message_router =
1219		Arc::new(MessageRouter::new(Arc::clone(&network_graph), Arc::clone(&keys_manager)));
1220
1221	// Initialize the ChannelManager
1222	let channel_manager = {
1223		if let Ok(res) = kv_store.read(
1224			CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE,
1225			CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE,
1226			CHANNEL_MANAGER_PERSISTENCE_KEY,
1227		) {
1228			let mut reader = Cursor::new(res);
1229			let channel_monitor_references =
1230				channel_monitors.iter().map(|(_, chanmon)| chanmon).collect();
1231			let read_args = ChannelManagerReadArgs::new(
1232				Arc::clone(&keys_manager),
1233				Arc::clone(&keys_manager),
1234				Arc::clone(&keys_manager),
1235				Arc::clone(&fee_estimator),
1236				Arc::clone(&chain_monitor),
1237				Arc::clone(&tx_broadcaster),
1238				Arc::clone(&router),
1239				Arc::clone(&message_router),
1240				Arc::clone(&logger),
1241				user_config,
1242				channel_monitor_references,
1243			);
1244			let (_hash, channel_manager) =
1245				<(BlockHash, ChannelManager)>::read(&mut reader, read_args).map_err(|e| {
1246					log_error!(logger, "Failed to read channel manager from KVStore: {}", e);
1247					BuildError::ReadFailed
1248				})?;
1249			channel_manager
1250		} else {
1251			// We're starting a fresh node.
1252			let genesis_block_hash =
1253				bitcoin::blockdata::constants::genesis_block(config.network).block_hash();
1254
1255			let chain_params = ChainParameters {
1256				network: config.network.into(),
1257				best_block: BestBlock::new(genesis_block_hash, 0),
1258			};
1259			channelmanager::ChannelManager::new(
1260				Arc::clone(&fee_estimator),
1261				Arc::clone(&chain_monitor),
1262				Arc::clone(&tx_broadcaster),
1263				Arc::clone(&router),
1264				Arc::clone(&message_router),
1265				Arc::clone(&logger),
1266				Arc::clone(&keys_manager),
1267				Arc::clone(&keys_manager),
1268				Arc::clone(&keys_manager),
1269				user_config,
1270				chain_params,
1271				cur_time.as_secs() as u32,
1272			)
1273		}
1274	};
1275
1276	let channel_manager = Arc::new(channel_manager);
1277
1278	// Give ChannelMonitors to ChainMonitor
1279	for (_blockhash, channel_monitor) in channel_monitors.into_iter() {
1280		let funding_outpoint = channel_monitor.get_funding_txo().0;
1281		chain_monitor.watch_channel(funding_outpoint, channel_monitor).map_err(|e| {
1282			log_error!(logger, "Failed to watch channel monitor: {:?}", e);
1283			BuildError::InvalidChannelMonitor
1284		})?;
1285	}
1286
1287	// Initialize the PeerManager
1288	let onion_messenger: Arc<OnionMessenger> = Arc::new(OnionMessenger::new(
1289		Arc::clone(&keys_manager),
1290		Arc::clone(&keys_manager),
1291		Arc::clone(&logger),
1292		Arc::clone(&channel_manager),
1293		message_router,
1294		Arc::clone(&channel_manager),
1295		IgnoringMessageHandler {},
1296		IgnoringMessageHandler {},
1297		IgnoringMessageHandler {},
1298	));
1299	let ephemeral_bytes: [u8; 32] = keys_manager.get_secure_random_bytes();
1300
1301	// Initialize the GossipSource
1302	// Use the configured gossip source, if the user set one, otherwise default to P2PNetwork.
1303	let gossip_source_config = gossip_source_config.unwrap_or(&GossipSourceConfig::P2PNetwork);
1304
1305	let gossip_source = match gossip_source_config {
1306		GossipSourceConfig::P2PNetwork => {
1307			let p2p_source =
1308				Arc::new(GossipSource::new_p2p(Arc::clone(&network_graph), Arc::clone(&logger)));
1309
1310			// Reset the RGS sync timestamp in case we somehow switch gossip sources
1311			{
1312				let mut locked_node_metrics = node_metrics.write().unwrap();
1313				locked_node_metrics.latest_rgs_snapshot_timestamp = None;
1314				write_node_metrics(
1315					&*locked_node_metrics,
1316					Arc::clone(&kv_store),
1317					Arc::clone(&logger),
1318				)
1319				.map_err(|e| {
1320					log_error!(logger, "Failed writing to store: {}", e);
1321					BuildError::WriteFailed
1322				})?;
1323			}
1324			p2p_source
1325		},
1326		GossipSourceConfig::RapidGossipSync(rgs_server) => {
1327			let latest_sync_timestamp =
1328				node_metrics.read().unwrap().latest_rgs_snapshot_timestamp.unwrap_or(0);
1329			Arc::new(GossipSource::new_rgs(
1330				rgs_server.clone(),
1331				latest_sync_timestamp,
1332				Arc::clone(&network_graph),
1333				Arc::clone(&logger),
1334			))
1335		},
1336	};
1337
1338	let (liquidity_source, custom_message_handler) =
1339		if let Some(lsc) = liquidity_source_config.as_ref() {
1340			let mut liquidity_source_builder = LiquiditySourceBuilder::new(
1341				Arc::clone(&wallet),
1342				Arc::clone(&channel_manager),
1343				Arc::clone(&keys_manager),
1344				Arc::clone(&chain_source),
1345				Arc::clone(&config),
1346				Arc::clone(&logger),
1347			);
1348
1349			lsc.lsps1_client.as_ref().map(|config| {
1350				liquidity_source_builder.lsps1_client(
1351					config.node_id,
1352					config.address.clone(),
1353					config.token.clone(),
1354				)
1355			});
1356
1357			lsc.lsps2_client.as_ref().map(|config| {
1358				liquidity_source_builder.lsps2_client(
1359					config.node_id,
1360					config.address.clone(),
1361					config.token.clone(),
1362				)
1363			});
1364
1365			let promise_secret = {
1366				let lsps_xpriv = derive_xprv(
1367					Arc::clone(&config),
1368					&seed_bytes,
1369					LSPS_HARDENED_CHILD_INDEX,
1370					Arc::clone(&logger),
1371				)?;
1372				lsps_xpriv.private_key.secret_bytes()
1373			};
1374			lsc.lsps2_service.as_ref().map(|config| {
1375				liquidity_source_builder.lsps2_service(promise_secret, config.clone())
1376			});
1377
1378			let liquidity_source = Arc::new(liquidity_source_builder.build());
1379			let custom_message_handler =
1380				Arc::new(NodeCustomMessageHandler::new_liquidity(Arc::clone(&liquidity_source)));
1381			(Some(liquidity_source), custom_message_handler)
1382		} else {
1383			(None, Arc::new(NodeCustomMessageHandler::new_ignoring()))
1384		};
1385
1386	let msg_handler = match gossip_source.as_gossip_sync() {
1387		GossipSync::P2P(p2p_gossip_sync) => MessageHandler {
1388			chan_handler: Arc::clone(&channel_manager),
1389			route_handler: Arc::clone(&p2p_gossip_sync)
1390				as Arc<dyn RoutingMessageHandler + Sync + Send>,
1391			onion_message_handler: Arc::clone(&onion_messenger),
1392			custom_message_handler,
1393		},
1394		GossipSync::Rapid(_) => MessageHandler {
1395			chan_handler: Arc::clone(&channel_manager),
1396			route_handler: Arc::new(IgnoringMessageHandler {})
1397				as Arc<dyn RoutingMessageHandler + Sync + Send>,
1398			onion_message_handler: Arc::clone(&onion_messenger),
1399			custom_message_handler,
1400		},
1401		GossipSync::None => {
1402			unreachable!("We must always have a gossip sync!");
1403		},
1404	};
1405
1406	let cur_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).map_err(|e| {
1407		log_error!(logger, "Failed to get current time: {}", e);
1408		BuildError::InvalidSystemTime
1409	})?;
1410
1411	let peer_manager = Arc::new(PeerManager::new(
1412		msg_handler,
1413		cur_time.as_secs().try_into().map_err(|e| {
1414			log_error!(logger, "Failed to get current time: {}", e);
1415			BuildError::InvalidSystemTime
1416		})?,
1417		&ephemeral_bytes,
1418		Arc::clone(&logger),
1419		Arc::clone(&keys_manager),
1420	));
1421
1422	liquidity_source.as_ref().map(|l| l.set_peer_manager(Arc::clone(&peer_manager)));
1423
1424	gossip_source.set_gossip_verifier(
1425		Arc::clone(&chain_source),
1426		Arc::clone(&peer_manager),
1427		Arc::clone(&runtime),
1428	);
1429
1430	let connection_manager =
1431		Arc::new(ConnectionManager::new(Arc::clone(&peer_manager), Arc::clone(&logger)));
1432
1433	let output_sweeper = match io::utils::read_output_sweeper(
1434		Arc::clone(&tx_broadcaster),
1435		Arc::clone(&fee_estimator),
1436		Arc::clone(&chain_source),
1437		Arc::clone(&keys_manager),
1438		Arc::clone(&kv_store),
1439		Arc::clone(&logger),
1440	) {
1441		Ok(output_sweeper) => Arc::new(output_sweeper),
1442		Err(e) => {
1443			if e.kind() == std::io::ErrorKind::NotFound {
1444				Arc::new(OutputSweeper::new(
1445					channel_manager.current_best_block(),
1446					Arc::clone(&tx_broadcaster),
1447					Arc::clone(&fee_estimator),
1448					Some(Arc::clone(&chain_source)),
1449					Arc::clone(&keys_manager),
1450					Arc::clone(&keys_manager),
1451					Arc::clone(&kv_store),
1452					Arc::clone(&logger),
1453				))
1454			} else {
1455				return Err(BuildError::ReadFailed);
1456			}
1457		},
1458	};
1459
1460	match io::utils::migrate_deprecated_spendable_outputs(
1461		Arc::clone(&output_sweeper),
1462		Arc::clone(&kv_store),
1463		Arc::clone(&logger),
1464	) {
1465		Ok(()) => {
1466			log_info!(logger, "Successfully migrated OutputSweeper data.");
1467		},
1468		Err(e) => {
1469			log_error!(logger, "Failed to migrate OutputSweeper data: {}", e);
1470			return Err(BuildError::ReadFailed);
1471		},
1472	}
1473
1474	let event_queue = match io::utils::read_event_queue(Arc::clone(&kv_store), Arc::clone(&logger))
1475	{
1476		Ok(event_queue) => Arc::new(event_queue),
1477		Err(e) => {
1478			if e.kind() == std::io::ErrorKind::NotFound {
1479				Arc::new(EventQueue::new(Arc::clone(&kv_store), Arc::clone(&logger)))
1480			} else {
1481				return Err(BuildError::ReadFailed);
1482			}
1483		},
1484	};
1485
1486	let peer_store = match io::utils::read_peer_info(Arc::clone(&kv_store), Arc::clone(&logger)) {
1487		Ok(peer_store) => Arc::new(peer_store),
1488		Err(e) => {
1489			if e.kind() == std::io::ErrorKind::NotFound {
1490				Arc::new(PeerStore::new(Arc::clone(&kv_store), Arc::clone(&logger)))
1491			} else {
1492				return Err(BuildError::ReadFailed);
1493			}
1494		},
1495	};
1496
1497	let (stop_sender, _) = tokio::sync::watch::channel(());
1498	let background_processor_task = Mutex::new(None);
1499	let background_tasks = Mutex::new(None);
1500	let cancellable_background_tasks = Mutex::new(None);
1501
1502	Ok(Node {
1503		runtime,
1504		stop_sender,
1505		background_processor_task,
1506		background_tasks,
1507		cancellable_background_tasks,
1508		config,
1509		wallet,
1510		chain_source,
1511		tx_broadcaster,
1512		event_queue,
1513		channel_manager,
1514		chain_monitor,
1515		output_sweeper,
1516		peer_manager,
1517		onion_messenger,
1518		connection_manager,
1519		keys_manager,
1520		network_graph,
1521		gossip_source,
1522		liquidity_source,
1523		kv_store,
1524		logger,
1525		_router: router,
1526		scorer,
1527		peer_store,
1528		payment_store,
1529		is_listening,
1530		node_metrics,
1531	})
1532}
1533
1534fn optionally_install_rustls_cryptoprovider() {
1535	// Acquire a global Mutex, ensuring that only one process at a time install the provider. This
1536	// is mostly required for running tests concurrently.
1537	static INIT_CRYPTO: Once = Once::new();
1538
1539	INIT_CRYPTO.call_once(|| {
1540		// Ensure we always install a `CryptoProvider` for `rustls` if it was somehow not previously installed by now.
1541		if rustls::crypto::CryptoProvider::get_default().is_none() {
1542			let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
1543		}
1544
1545		// Refuse to startup without TLS support. Better to catch it now than even later at runtime.
1546		assert!(
1547			rustls::crypto::CryptoProvider::get_default().is_some(),
1548			"We need to have a CryptoProvider"
1549		);
1550	});
1551}
1552
1553/// Sets up the node logger.
1554fn setup_logger(
1555	log_writer_config: &Option<LogWriterConfig>, config: &Config,
1556) -> Result<Arc<Logger>, BuildError> {
1557	let logger = match log_writer_config {
1558		Some(LogWriterConfig::File { log_file_path, max_log_level }) => {
1559			let log_file_path = log_file_path
1560				.clone()
1561				.unwrap_or_else(|| format!("{}/{}", config.storage_dir_path, DEFAULT_LOG_FILENAME));
1562			let max_log_level = max_log_level.unwrap_or_else(|| DEFAULT_LOG_LEVEL);
1563
1564			Logger::new_fs_writer(log_file_path, max_log_level)
1565				.map_err(|_| BuildError::LoggerSetupFailed)?
1566		},
1567		Some(LogWriterConfig::Log) => Logger::new_log_facade(),
1568
1569		Some(LogWriterConfig::Custom(custom_log_writer)) => {
1570			Logger::new_custom_writer(Arc::clone(&custom_log_writer))
1571		},
1572		None => {
1573			// Default to use `FileWriter`
1574			let log_file_path = format!("{}/{}", config.storage_dir_path, DEFAULT_LOG_FILENAME);
1575			let log_level = DEFAULT_LOG_LEVEL;
1576			Logger::new_fs_writer(log_file_path, log_level)
1577				.map_err(|_| BuildError::LoggerSetupFailed)?
1578		},
1579	};
1580
1581	Ok(Arc::new(logger))
1582}
1583
1584fn seed_bytes_from_config(
1585	config: &Config, entropy_source_config: Option<&EntropySourceConfig>, logger: Arc<Logger>,
1586) -> Result<[u8; 64], BuildError> {
1587	match entropy_source_config {
1588		Some(EntropySourceConfig::SeedBytes(bytes)) => Ok(bytes.clone()),
1589		Some(EntropySourceConfig::SeedFile(seed_path)) => {
1590			Ok(io::utils::read_or_generate_seed_file(seed_path, Arc::clone(&logger))
1591				.map_err(|_| BuildError::InvalidSeedFile)?)
1592		},
1593		Some(EntropySourceConfig::Bip39Mnemonic { mnemonic, passphrase }) => match passphrase {
1594			Some(passphrase) => Ok(mnemonic.to_seed(passphrase)),
1595			None => Ok(mnemonic.to_seed("")),
1596		},
1597		None => {
1598			// Default to read or generate from the default location generate a seed file.
1599			let seed_path = format!("{}/keys_seed", config.storage_dir_path);
1600			Ok(io::utils::read_or_generate_seed_file(&seed_path, Arc::clone(&logger))
1601				.map_err(|_| BuildError::InvalidSeedFile)?)
1602		},
1603	}
1604}
1605
1606fn derive_xprv(
1607	config: Arc<Config>, seed_bytes: &[u8; 64], hardened_child_index: u32, logger: Arc<Logger>,
1608) -> Result<Xpriv, BuildError> {
1609	use bitcoin::key::Secp256k1;
1610
1611	let xprv = Xpriv::new_master(config.network, seed_bytes).map_err(|e| {
1612		log_error!(logger, "Failed to derive master secret: {}", e);
1613		BuildError::InvalidSeedBytes
1614	})?;
1615
1616	xprv.derive_priv(&Secp256k1::new(), &[ChildNumber::Hardened { index: hardened_child_index }])
1617		.map_err(|e| {
1618			log_error!(logger, "Failed to derive hardened child secret: {}", e);
1619			BuildError::InvalidSeedBytes
1620		})
1621}
1622
1623/// Sanitize the user-provided node alias to ensure that it is a valid protocol-specified UTF-8 string.
1624pub(crate) fn sanitize_alias(alias_str: &str) -> Result<NodeAlias, BuildError> {
1625	let alias = alias_str.trim();
1626
1627	// Alias must be 32-bytes long or less.
1628	if alias.as_bytes().len() > 32 {
1629		return Err(BuildError::InvalidNodeAlias);
1630	}
1631
1632	let mut bytes = [0u8; 32];
1633	bytes[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes());
1634	Ok(NodeAlias(bytes))
1635}
1636
1637#[cfg(test)]
1638mod tests {
1639	use super::{sanitize_alias, BuildError, NodeAlias};
1640
1641	#[test]
1642	fn sanitize_empty_node_alias() {
1643		// Empty node alias
1644		let alias = "";
1645		let mut buf = [0u8; 32];
1646		buf[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes());
1647
1648		let expected_node_alias = NodeAlias([0; 32]);
1649		let node_alias = sanitize_alias(alias).unwrap();
1650		assert_eq!(node_alias, expected_node_alias);
1651	}
1652
1653	#[test]
1654	fn sanitize_alias_with_sandwiched_null() {
1655		// Alias with emojis
1656		let alias = "I\u{1F496}LDK-Node!";
1657		let mut buf = [0u8; 32];
1658		buf[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes());
1659		let expected_alias = NodeAlias(buf);
1660
1661		let user_provided_alias = "I\u{1F496}LDK-Node!\0\u{26A1}";
1662		let node_alias = sanitize_alias(user_provided_alias).unwrap();
1663
1664		let node_alias_display = format!("{}", node_alias);
1665
1666		assert_eq!(alias, &node_alias_display);
1667		assert_ne!(expected_alias, node_alias);
1668	}
1669
1670	#[test]
1671	fn sanitize_alias_gt_32_bytes() {
1672		let alias = "This is a string longer than thirty-two bytes!"; // 46 bytes
1673		let node = sanitize_alias(alias);
1674		assert_eq!(node.err().unwrap(), BuildError::InvalidNodeAlias);
1675	}
1676}