ldk_node/
lib.rs

1// This file is Copyright its original authors, visible in version control history.
2//
3// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
4// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. You may not use this file except in
6// accordance with one or both of these licenses.
7
8#![crate_name = "ldk_node"]
9
10//! # LDK Node
11//! A ready-to-go Lightning node library built using [LDK](https://lightningdevkit.org/) and
12//! [BDK](https://bitcoindevkit.org/).
13//!
14//! LDK Node is a non-custodial Lightning node in library form. Its central goal is to provide a
15//! small, simple, and straightforward interface that enables users to easily set up and run a
16//! Lightning node with an integrated on-chain wallet. While minimalism is at its core, LDK Node
17//! aims to be sufficiently modular and configurable to be useful for a variety of use cases.
18//!
19//! ## Getting Started
20//!
21//! The primary abstraction of the library is the [`Node`], which can be retrieved by setting up
22//! and configuring a [`Builder`] to your liking and calling [`build`]. `Node` can then be
23//! controlled via commands such as [`start`], [`stop`], [`open_channel`], [`send`], etc.:
24//!
25//! ```no_run
26//! # #[cfg(not(feature = "uniffi"))]
27//! # {
28//! use ldk_node::Builder;
29//! use ldk_node::lightning_invoice::Bolt11Invoice;
30//! use ldk_node::lightning::ln::msgs::SocketAddress;
31//! use ldk_node::bitcoin::Network;
32//! use ldk_node::bitcoin::secp256k1::PublicKey;
33//! use std::str::FromStr;
34//!
35//! fn main() {
36//! 	let mut builder = Builder::new();
37//! 	builder.set_network(Network::Testnet);
38//! 	builder.set_chain_source_esplora("https://blockstream.info/testnet/api".to_string(), None);
39//! 	builder.set_gossip_source_rgs("https://rapidsync.lightningdevkit.org/testnet/snapshot".to_string());
40//!
41//! 	let node = builder.build().unwrap();
42//!
43//! 	node.start().unwrap();
44//!
45//! 	let funding_address = node.onchain_payment().new_address();
46//!
47//! 	// .. fund address ..
48//!
49//! 	let node_id = PublicKey::from_str("NODE_ID").unwrap();
50//! 	let node_addr = SocketAddress::from_str("IP_ADDR:PORT").unwrap();
51//! 	node.open_channel(node_id, node_addr, 10000, None, None).unwrap();
52//!
53//! 	let event = node.wait_next_event();
54//! 	println!("EVENT: {:?}", event);
55//! 	node.event_handled();
56//!
57//! 	let invoice = Bolt11Invoice::from_str("INVOICE_STR").unwrap();
58//! 	node.bolt11_payment().send(&invoice, None).unwrap();
59//!
60//! 	node.stop().unwrap();
61//! }
62//! # }
63//! ```
64//!
65//! [`build`]: Builder::build
66//! [`start`]: Node::start
67//! [`stop`]: Node::stop
68//! [`open_channel`]: Node::open_channel
69//! [`send`]: Bolt11Payment::send
70//!
71#![cfg_attr(not(feature = "uniffi"), deny(missing_docs))]
72#![deny(rustdoc::broken_intra_doc_links)]
73#![deny(rustdoc::private_intra_doc_links)]
74#![allow(bare_trait_objects)]
75#![allow(ellipsis_inclusive_range_patterns)]
76#![cfg_attr(docsrs, feature(doc_auto_cfg))]
77
78mod balance;
79mod builder;
80mod chain;
81pub mod config;
82mod connection;
83mod data_store;
84mod error;
85mod event;
86mod fee_estimator;
87mod gossip;
88pub mod graph;
89mod hex_utils;
90pub mod io;
91pub mod liquidity;
92pub mod logger;
93mod message_handler;
94pub mod payment;
95mod peer_store;
96mod sweep;
97mod tx_broadcaster;
98mod types;
99#[cfg(feature = "uniffi")]
100mod uniffi_types;
101mod wallet;
102
103pub use bip39;
104pub use bitcoin;
105pub use lightning;
106pub use lightning_invoice;
107pub use lightning_liquidity;
108pub use lightning_types;
109pub use vss_client;
110
111pub use balance::{BalanceDetails, LightningBalance, PendingSweepBalance};
112pub use error::Error as NodeError;
113use error::Error;
114
115pub use event::Event;
116
117pub use io::utils::generate_entropy_mnemonic;
118
119#[cfg(feature = "uniffi")]
120use uniffi_types::*;
121
122#[cfg(feature = "uniffi")]
123pub use builder::ArcedNodeBuilder as Builder;
124pub use builder::BuildError;
125#[cfg(not(feature = "uniffi"))]
126pub use builder::NodeBuilder as Builder;
127
128use chain::ChainSource;
129use config::{
130	default_user_config, may_announce_channel, ChannelConfig, Config,
131	BACKGROUND_TASK_SHUTDOWN_TIMEOUT_SECS, LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS,
132	NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, RGS_SYNC_INTERVAL,
133};
134use connection::ConnectionManager;
135use event::{EventHandler, EventQueue};
136use gossip::GossipSource;
137use graph::NetworkGraph;
138use io::utils::write_node_metrics;
139use liquidity::{LSPS1Liquidity, LiquiditySource};
140use payment::{
141	Bolt11Payment, Bolt12Payment, OnchainPayment, PaymentDetails, SpontaneousPayment,
142	UnifiedQrPayment,
143};
144use peer_store::{PeerInfo, PeerStore};
145use types::{
146	Broadcaster, BumpTransactionEventHandler, ChainMonitor, ChannelManager, DynStore, Graph,
147	KeysManager, OnionMessenger, PaymentStore, PeerManager, Router, Scorer, Sweeper, Wallet,
148};
149pub use types::{ChannelDetails, CustomTlvRecord, PeerDetails, UserChannelId};
150
151use logger::{log_debug, log_error, log_info, log_trace, LdkLogger, Logger};
152
153use lightning::chain::BestBlock;
154use lightning::events::bump_transaction::Wallet as LdkWallet;
155use lightning::impl_writeable_tlv_based;
156use lightning::ln::channel_state::ChannelShutdownState;
157use lightning::ln::channelmanager::PaymentId;
158use lightning::ln::msgs::SocketAddress;
159use lightning::routing::gossip::NodeAlias;
160
161use lightning_background_processor::process_events_async;
162
163use bitcoin::secp256k1::PublicKey;
164
165use rand::Rng;
166
167use std::default::Default;
168use std::net::ToSocketAddrs;
169use std::sync::atomic::{AtomicBool, Ordering};
170use std::sync::{Arc, Mutex, RwLock};
171use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
172
173#[cfg(feature = "uniffi")]
174uniffi::include_scaffolding!("ldk_node");
175
176/// The main interface object of LDK Node, wrapping the necessary LDK and BDK functionalities.
177///
178/// Needs to be initialized and instantiated through [`Builder::build`].
179pub struct Node {
180	runtime: Arc<RwLock<Option<Arc<tokio::runtime::Runtime>>>>,
181	stop_sender: tokio::sync::watch::Sender<()>,
182	background_processor_task: Mutex<Option<tokio::task::JoinHandle<()>>>,
183	background_tasks: Mutex<Option<tokio::task::JoinSet<()>>>,
184	cancellable_background_tasks: Mutex<Option<tokio::task::JoinSet<()>>>,
185	config: Arc<Config>,
186	wallet: Arc<Wallet>,
187	chain_source: Arc<ChainSource>,
188	tx_broadcaster: Arc<Broadcaster>,
189	event_queue: Arc<EventQueue<Arc<Logger>>>,
190	channel_manager: Arc<ChannelManager>,
191	chain_monitor: Arc<ChainMonitor>,
192	output_sweeper: Arc<Sweeper>,
193	peer_manager: Arc<PeerManager>,
194	onion_messenger: Arc<OnionMessenger>,
195	connection_manager: Arc<ConnectionManager<Arc<Logger>>>,
196	keys_manager: Arc<KeysManager>,
197	network_graph: Arc<Graph>,
198	gossip_source: Arc<GossipSource>,
199	liquidity_source: Option<Arc<LiquiditySource<Arc<Logger>>>>,
200	kv_store: Arc<DynStore>,
201	logger: Arc<Logger>,
202	_router: Arc<Router>,
203	scorer: Arc<Mutex<Scorer>>,
204	peer_store: Arc<PeerStore<Arc<Logger>>>,
205	payment_store: Arc<PaymentStore>,
206	is_listening: Arc<AtomicBool>,
207	node_metrics: Arc<RwLock<NodeMetrics>>,
208}
209
210impl Node {
211	/// Starts the necessary background tasks, such as handling events coming from user input,
212	/// LDK/BDK, and the peer-to-peer network.
213	///
214	/// After this returns, the [`Node`] instance can be controlled via the provided API methods in
215	/// a thread-safe manner.
216	pub fn start(&self) -> Result<(), Error> {
217		let runtime =
218			Arc::new(tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap());
219		self.start_with_runtime(runtime)
220	}
221
222	/// Starts the necessary background tasks (such as handling events coming from user input,
223	/// LDK/BDK, and the peer-to-peer network) on the the given `runtime`.
224	///
225	/// This allows to have LDK Node reuse an outer pre-existing runtime, e.g., to avoid stacking Tokio
226	/// runtime contexts.
227	///
228	/// After this returns, the [`Node`] instance can be controlled via the provided API methods in
229	/// a thread-safe manner.
230	pub fn start_with_runtime(&self, runtime: Arc<tokio::runtime::Runtime>) -> Result<(), Error> {
231		// Acquire a run lock and hold it until we're setup.
232		let mut runtime_lock = self.runtime.write().unwrap();
233		if runtime_lock.is_some() {
234			// We're already running.
235			return Err(Error::AlreadyRunning);
236		}
237
238		let mut background_tasks = tokio::task::JoinSet::new();
239		let mut cancellable_background_tasks = tokio::task::JoinSet::new();
240		let runtime_handle = runtime.handle();
241
242		log_info!(
243			self.logger,
244			"Starting up LDK Node with node ID {} on network: {}",
245			self.node_id(),
246			self.config.network
247		);
248
249		// Start up any runtime-dependant chain sources (e.g. Electrum)
250		self.chain_source.start(Arc::clone(&runtime)).map_err(|e| {
251			log_error!(self.logger, "Failed to start chain syncing: {}", e);
252			e
253		})?;
254
255		// Block to ensure we update our fee rate cache once on startup
256		let chain_source = Arc::clone(&self.chain_source);
257		let runtime_ref = &runtime;
258		tokio::task::block_in_place(move || {
259			runtime_ref.block_on(async move { chain_source.update_fee_rate_estimates().await })
260		})?;
261
262		// Spawn background task continuously syncing onchain, lightning, and fee rate cache.
263		let stop_sync_receiver = self.stop_sender.subscribe();
264		let chain_source = Arc::clone(&self.chain_source);
265		let sync_cman = Arc::clone(&self.channel_manager);
266		let sync_cmon = Arc::clone(&self.chain_monitor);
267		let sync_sweeper = Arc::clone(&self.output_sweeper);
268		background_tasks.spawn_on(
269			async move {
270				chain_source
271					.continuously_sync_wallets(
272						stop_sync_receiver,
273						sync_cman,
274						sync_cmon,
275						sync_sweeper,
276					)
277					.await;
278			},
279			runtime_handle,
280		);
281
282		if self.gossip_source.is_rgs() {
283			let gossip_source = Arc::clone(&self.gossip_source);
284			let gossip_sync_store = Arc::clone(&self.kv_store);
285			let gossip_sync_logger = Arc::clone(&self.logger);
286			let gossip_node_metrics = Arc::clone(&self.node_metrics);
287			let mut stop_gossip_sync = self.stop_sender.subscribe();
288			cancellable_background_tasks.spawn_on(async move {
289				let mut interval = tokio::time::interval(RGS_SYNC_INTERVAL);
290				loop {
291					tokio::select! {
292						_ = stop_gossip_sync.changed() => {
293							log_debug!(
294								gossip_sync_logger,
295								"Stopping background syncing RGS gossip data.",
296							);
297							return;
298						}
299						_ = interval.tick() => {
300							let gossip_sync_logger = Arc::clone(&gossip_sync_logger);
301							let now = Instant::now();
302							match gossip_source.update_rgs_snapshot().await {
303								Ok(updated_timestamp) => {
304									log_trace!(
305										gossip_sync_logger,
306										"Background sync of RGS gossip data finished in {}ms.",
307										now.elapsed().as_millis()
308										);
309									{
310										let mut locked_node_metrics = gossip_node_metrics.write().unwrap();
311										locked_node_metrics.latest_rgs_snapshot_timestamp = Some(updated_timestamp);
312										write_node_metrics(&*locked_node_metrics, Arc::clone(&gossip_sync_store), Arc::clone(&gossip_sync_logger))
313											.unwrap_or_else(|e| {
314												log_error!(gossip_sync_logger, "Persistence failed: {}", e);
315											});
316									}
317								}
318								Err(e) => {
319									log_error!(
320										gossip_sync_logger,
321										"Background sync of RGS gossip data failed: {}",
322										e
323									)
324								}
325							}
326						}
327					}
328				}
329			}, runtime_handle);
330		}
331
332		if let Some(listening_addresses) = &self.config.listening_addresses {
333			// Setup networking
334			let peer_manager_connection_handler = Arc::clone(&self.peer_manager);
335			let mut stop_listen = self.stop_sender.subscribe();
336			let listening_logger = Arc::clone(&self.logger);
337			let listening_indicator = Arc::clone(&self.is_listening);
338
339			let mut bind_addrs = Vec::with_capacity(listening_addresses.len());
340
341			for listening_addr in listening_addresses {
342				let resolved_address = listening_addr.to_socket_addrs().map_err(|e| {
343					log_error!(
344						self.logger,
345						"Unable to resolve listening address: {:?}. Error details: {}",
346						listening_addr,
347						e,
348					);
349					Error::InvalidSocketAddress
350				})?;
351
352				bind_addrs.extend(resolved_address);
353			}
354
355			cancellable_background_tasks.spawn_on(async move {
356				{
357				let listener =
358					tokio::net::TcpListener::bind(&*bind_addrs).await
359										.unwrap_or_else(|e| {
360											log_error!(listening_logger, "Failed to bind to listen addresses/ports - is something else already listening on it?: {}", e);
361											panic!(
362												"Failed to bind to listen address/port - is something else already listening on it?",
363												);
364										});
365
366				listening_indicator.store(true, Ordering::Release);
367
368				loop {
369					let peer_mgr = Arc::clone(&peer_manager_connection_handler);
370					tokio::select! {
371						_ = stop_listen.changed() => {
372							log_debug!(
373								listening_logger,
374								"Stopping listening to inbound connections."
375							);
376							break;
377						}
378						res = listener.accept() => {
379							let tcp_stream = res.unwrap().0;
380							tokio::spawn(async move {
381								lightning_net_tokio::setup_inbound(
382									Arc::clone(&peer_mgr),
383									tcp_stream.into_std().unwrap(),
384									)
385									.await;
386							});
387						}
388					}
389				}
390				}
391
392				listening_indicator.store(false, Ordering::Release);
393			}, runtime_handle);
394		}
395
396		// Regularly reconnect to persisted peers.
397		let connect_cm = Arc::clone(&self.connection_manager);
398		let connect_pm = Arc::clone(&self.peer_manager);
399		let connect_logger = Arc::clone(&self.logger);
400		let connect_peer_store = Arc::clone(&self.peer_store);
401		let mut stop_connect = self.stop_sender.subscribe();
402		cancellable_background_tasks.spawn_on(async move {
403			let mut interval = tokio::time::interval(PEER_RECONNECTION_INTERVAL);
404			interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip);
405			loop {
406				tokio::select! {
407						_ = stop_connect.changed() => {
408							log_debug!(
409								connect_logger,
410								"Stopping reconnecting known peers."
411							);
412							return;
413						}
414						_ = interval.tick() => {
415							let pm_peers = connect_pm
416								.list_peers()
417								.iter()
418								.map(|peer| peer.counterparty_node_id)
419								.collect::<Vec<_>>();
420
421							for peer_info in connect_peer_store.list_peers().iter().filter(|info| !pm_peers.contains(&info.node_id)) {
422								let _ = connect_cm.do_connect_peer(
423									peer_info.node_id,
424									peer_info.address.clone(),
425									).await;
426							}
427						}
428				}
429			}
430		}, runtime_handle);
431
432		// Regularly broadcast node announcements.
433		let bcast_cm = Arc::clone(&self.channel_manager);
434		let bcast_pm = Arc::clone(&self.peer_manager);
435		let bcast_config = Arc::clone(&self.config);
436		let bcast_store = Arc::clone(&self.kv_store);
437		let bcast_logger = Arc::clone(&self.logger);
438		let bcast_node_metrics = Arc::clone(&self.node_metrics);
439		let mut stop_bcast = self.stop_sender.subscribe();
440		let node_alias = self.config.node_alias.clone();
441		if may_announce_channel(&self.config).is_ok() {
442			cancellable_background_tasks.spawn_on(async move {
443				// We check every 30 secs whether our last broadcast is NODE_ANN_BCAST_INTERVAL away.
444				#[cfg(not(test))]
445				let mut interval = tokio::time::interval(Duration::from_secs(30));
446				#[cfg(test)]
447				let mut interval = tokio::time::interval(Duration::from_secs(5));
448				loop {
449					tokio::select! {
450						_ = stop_bcast.changed() => {
451							log_debug!(
452								bcast_logger,
453								"Stopping broadcasting node announcements.",
454								);
455							return;
456						}
457						_ = interval.tick() => {
458							let skip_broadcast = match bcast_node_metrics.read().unwrap().latest_node_announcement_broadcast_timestamp {
459								Some(latest_bcast_time_secs) => {
460									// Skip if the time hasn't elapsed yet.
461									let next_bcast_unix_time = SystemTime::UNIX_EPOCH + Duration::from_secs(latest_bcast_time_secs) + NODE_ANN_BCAST_INTERVAL;
462									next_bcast_unix_time.elapsed().is_err()
463								}
464								None => {
465									// Don't skip if we haven't broadcasted before.
466									false
467								}
468							};
469
470							if skip_broadcast {
471								continue;
472							}
473
474							if !bcast_cm.list_channels().iter().any(|chan| chan.is_announced && chan.is_channel_ready) {
475								// Skip if we don't have any public channels that are ready.
476								continue;
477							}
478
479							if bcast_pm.list_peers().is_empty() {
480								// Skip if we don't have any connected peers to gossip to.
481								continue;
482							}
483
484							let addresses = if let Some(announcement_addresses) = bcast_config.announcement_addresses.clone() {
485								announcement_addresses
486							} else if let Some(listening_addresses) = bcast_config.listening_addresses.clone() {
487								listening_addresses
488							} else {
489								debug_assert!(false, "We checked whether the node may announce, so listening addresses should always be set");
490								continue;
491							};
492
493							if let Some(node_alias) = node_alias.as_ref() {
494								bcast_pm.broadcast_node_announcement([0; 3], node_alias.0, addresses);
495
496								let unix_time_secs_opt =
497									SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs());
498								{
499									let mut locked_node_metrics = bcast_node_metrics.write().unwrap();
500									locked_node_metrics.latest_node_announcement_broadcast_timestamp = unix_time_secs_opt;
501									write_node_metrics(&*locked_node_metrics, Arc::clone(&bcast_store), Arc::clone(&bcast_logger))
502										.unwrap_or_else(|e| {
503											log_error!(bcast_logger, "Persistence failed: {}", e);
504										});
505								}
506							} else {
507								debug_assert!(false, "We checked whether the node may announce, so node alias should always be set");
508								continue
509							}
510						}
511					}
512				}
513			}, runtime_handle);
514		}
515
516		let mut stop_tx_bcast = self.stop_sender.subscribe();
517		let chain_source = Arc::clone(&self.chain_source);
518		let tx_bcast_logger = Arc::clone(&self.logger);
519		runtime.spawn(async move {
520			// Every second we try to clear our broadcasting queue.
521			let mut interval = tokio::time::interval(Duration::from_secs(1));
522			interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip);
523			loop {
524				tokio::select! {
525						_ = stop_tx_bcast.changed() => {
526							log_debug!(
527								tx_bcast_logger,
528								"Stopping broadcasting transactions.",
529							);
530							return;
531						}
532						_ = interval.tick() => {
533							chain_source.process_broadcast_queue().await;
534						}
535				}
536			}
537		});
538
539		let bump_tx_event_handler = Arc::new(BumpTransactionEventHandler::new(
540			Arc::clone(&self.tx_broadcaster),
541			Arc::new(LdkWallet::new(Arc::clone(&self.wallet), Arc::clone(&self.logger))),
542			Arc::clone(&self.keys_manager),
543			Arc::clone(&self.logger),
544		));
545
546		let event_handler = Arc::new(EventHandler::new(
547			Arc::clone(&self.event_queue),
548			Arc::clone(&self.wallet),
549			bump_tx_event_handler,
550			Arc::clone(&self.channel_manager),
551			Arc::clone(&self.connection_manager),
552			Arc::clone(&self.output_sweeper),
553			Arc::clone(&self.network_graph),
554			self.liquidity_source.clone(),
555			Arc::clone(&self.payment_store),
556			Arc::clone(&self.peer_store),
557			Arc::clone(&self.runtime),
558			Arc::clone(&self.logger),
559			Arc::clone(&self.config),
560		));
561
562		// Setup background processing
563		let background_persister = Arc::clone(&self.kv_store);
564		let background_event_handler = Arc::clone(&event_handler);
565		let background_chain_mon = Arc::clone(&self.chain_monitor);
566		let background_chan_man = Arc::clone(&self.channel_manager);
567		let background_gossip_sync = self.gossip_source.as_gossip_sync();
568		let background_peer_man = Arc::clone(&self.peer_manager);
569		let background_onion_messenger = Arc::clone(&self.onion_messenger);
570		let background_logger = Arc::clone(&self.logger);
571		let background_error_logger = Arc::clone(&self.logger);
572		let background_scorer = Arc::clone(&self.scorer);
573		let stop_bp = self.stop_sender.subscribe();
574		let sleeper_logger = Arc::clone(&self.logger);
575		let sleeper = move |d| {
576			let mut stop = stop_bp.clone();
577			let sleeper_logger = Arc::clone(&sleeper_logger);
578			Box::pin(async move {
579				tokio::select! {
580					_ = stop.changed() => {
581						log_debug!(
582							sleeper_logger,
583							"Stopping processing events.",
584						);
585						true
586					}
587					_ = tokio::time::sleep(d) => {
588						false
589					}
590				}
591			})
592		};
593
594		let handle = runtime.spawn(async move {
595			process_events_async(
596				background_persister,
597				|e| background_event_handler.handle_event(e),
598				background_chain_mon,
599				background_chan_man,
600				Some(background_onion_messenger),
601				background_gossip_sync,
602				background_peer_man,
603				background_logger,
604				Some(background_scorer),
605				sleeper,
606				true,
607				|| Some(SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap()),
608			)
609			.await
610			.unwrap_or_else(|e| {
611				log_error!(background_error_logger, "Failed to process events: {}", e);
612				panic!("Failed to process events");
613			});
614		});
615		debug_assert!(self.background_processor_task.lock().unwrap().is_none());
616		*self.background_processor_task.lock().unwrap() = Some(handle);
617
618		if let Some(liquidity_source) = self.liquidity_source.as_ref() {
619			let mut stop_liquidity_handler = self.stop_sender.subscribe();
620			let liquidity_handler = Arc::clone(&liquidity_source);
621			let liquidity_logger = Arc::clone(&self.logger);
622			background_tasks.spawn_on(
623				async move {
624					loop {
625						tokio::select! {
626							_ = stop_liquidity_handler.changed() => {
627								log_debug!(
628									liquidity_logger,
629									"Stopping processing liquidity events.",
630								);
631								return;
632							}
633							_ = liquidity_handler.handle_next_event() => {}
634						}
635					}
636				},
637				runtime_handle,
638			);
639		}
640
641		*runtime_lock = Some(runtime);
642
643		debug_assert!(self.background_tasks.lock().unwrap().is_none());
644		*self.background_tasks.lock().unwrap() = Some(background_tasks);
645
646		debug_assert!(self.cancellable_background_tasks.lock().unwrap().is_none());
647		*self.cancellable_background_tasks.lock().unwrap() = Some(cancellable_background_tasks);
648
649		log_info!(self.logger, "Startup complete.");
650		Ok(())
651	}
652
653	/// Disconnects all peers, stops all running background tasks, and shuts down [`Node`].
654	///
655	/// After this returns most API methods will return [`Error::NotRunning`].
656	pub fn stop(&self) -> Result<(), Error> {
657		let runtime = self.runtime.write().unwrap().take().ok_or(Error::NotRunning)?;
658		#[cfg(tokio_unstable)]
659		let metrics_runtime = Arc::clone(&runtime);
660
661		log_info!(self.logger, "Shutting down LDK Node with node ID {}...", self.node_id());
662
663		// Stop any runtime-dependant chain sources.
664		self.chain_source.stop();
665
666		// Stop the runtime.
667		match self.stop_sender.send(()) {
668			Ok(_) => log_trace!(self.logger, "Sent shutdown signal to background tasks."),
669			Err(e) => {
670				log_error!(
671					self.logger,
672					"Failed to send shutdown signal. This should never happen: {}",
673					e
674				);
675				debug_assert!(false);
676			},
677		}
678
679		// Cancel cancellable background tasks
680		if let Some(mut tasks) = self.cancellable_background_tasks.lock().unwrap().take() {
681			let runtime_2 = Arc::clone(&runtime);
682			tasks.abort_all();
683			tokio::task::block_in_place(move || {
684				runtime_2.block_on(async { while let Some(_) = tasks.join_next().await {} })
685			});
686		} else {
687			debug_assert!(false, "Expected some cancellable background tasks");
688		};
689
690		// Disconnect all peers.
691		self.peer_manager.disconnect_all_peers();
692		log_debug!(self.logger, "Disconnected all network peers.");
693
694		// Stop any runtime-dependant chain sources.
695		self.chain_source.stop();
696		log_debug!(self.logger, "Stopped chain sources.");
697
698		// Wait until non-cancellable background tasks (mod LDK's background processor) are done.
699		let runtime_3 = Arc::clone(&runtime);
700		if let Some(mut tasks) = self.background_tasks.lock().unwrap().take() {
701			tokio::task::block_in_place(move || {
702				runtime_3.block_on(async {
703					loop {
704						let timeout_fut = tokio::time::timeout(
705							Duration::from_secs(BACKGROUND_TASK_SHUTDOWN_TIMEOUT_SECS),
706							tasks.join_next_with_id(),
707						);
708						match timeout_fut.await {
709							Ok(Some(Ok((id, _)))) => {
710								log_trace!(self.logger, "Stopped background task with id {}", id);
711							},
712							Ok(Some(Err(e))) => {
713								tasks.abort_all();
714								log_trace!(self.logger, "Stopping background task failed: {}", e);
715								break;
716							},
717							Ok(None) => {
718								log_debug!(self.logger, "Stopped all background tasks");
719								break;
720							},
721							Err(e) => {
722								tasks.abort_all();
723								log_error!(
724									self.logger,
725									"Stopping background task timed out: {}",
726									e
727								);
728								break;
729							},
730						}
731					}
732				})
733			});
734		} else {
735			debug_assert!(false, "Expected some background tasks");
736		};
737
738		// Wait until background processing stopped, at least until a timeout is reached.
739		if let Some(background_processor_task) =
740			self.background_processor_task.lock().unwrap().take()
741		{
742			let abort_handle = background_processor_task.abort_handle();
743			let timeout_res = tokio::task::block_in_place(move || {
744				runtime.block_on(async {
745					tokio::time::timeout(
746						Duration::from_secs(LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS),
747						background_processor_task,
748					)
749					.await
750				})
751			});
752
753			match timeout_res {
754				Ok(stop_res) => match stop_res {
755					Ok(()) => log_debug!(self.logger, "Stopped background processing of events."),
756					Err(e) => {
757						abort_handle.abort();
758						log_error!(
759							self.logger,
760							"Stopping event handling failed. This should never happen: {}",
761							e
762						);
763						panic!("Stopping event handling failed. This should never happen.");
764					},
765				},
766				Err(e) => {
767					abort_handle.abort();
768					log_error!(self.logger, "Stopping event handling timed out: {}", e);
769				},
770			}
771		} else {
772			debug_assert!(false, "Expected a background processing task");
773		};
774
775		#[cfg(tokio_unstable)]
776		{
777			log_trace!(
778				self.logger,
779				"Active runtime tasks left prior to shutdown: {}",
780				metrics_runtime.metrics().active_tasks_count()
781			);
782		}
783
784		log_info!(self.logger, "Shutdown complete.");
785		Ok(())
786	}
787
788	/// Returns the status of the [`Node`].
789	pub fn status(&self) -> NodeStatus {
790		let is_running = self.runtime.read().unwrap().is_some();
791		let is_listening = self.is_listening.load(Ordering::Acquire);
792		let current_best_block = self.channel_manager.current_best_block().into();
793		let locked_node_metrics = self.node_metrics.read().unwrap();
794		let latest_lightning_wallet_sync_timestamp =
795			locked_node_metrics.latest_lightning_wallet_sync_timestamp;
796		let latest_onchain_wallet_sync_timestamp =
797			locked_node_metrics.latest_onchain_wallet_sync_timestamp;
798		let latest_fee_rate_cache_update_timestamp =
799			locked_node_metrics.latest_fee_rate_cache_update_timestamp;
800		let latest_rgs_snapshot_timestamp =
801			locked_node_metrics.latest_rgs_snapshot_timestamp.map(|val| val as u64);
802		let latest_node_announcement_broadcast_timestamp =
803			locked_node_metrics.latest_node_announcement_broadcast_timestamp;
804		let latest_channel_monitor_archival_height =
805			locked_node_metrics.latest_channel_monitor_archival_height;
806
807		NodeStatus {
808			is_running,
809			is_listening,
810			current_best_block,
811			latest_lightning_wallet_sync_timestamp,
812			latest_onchain_wallet_sync_timestamp,
813			latest_fee_rate_cache_update_timestamp,
814			latest_rgs_snapshot_timestamp,
815			latest_node_announcement_broadcast_timestamp,
816			latest_channel_monitor_archival_height,
817		}
818	}
819
820	/// Returns the config with which the [`Node`] was initialized.
821	pub fn config(&self) -> Config {
822		self.config.as_ref().clone()
823	}
824
825	/// Returns the next event in the event queue, if currently available.
826	///
827	/// Will return `Some(..)` if an event is available and `None` otherwise.
828	///
829	/// **Note:** this will always return the same event until handling is confirmed via [`Node::event_handled`].
830	///
831	/// **Caution:** Users must handle events as quickly as possible to prevent a large event backlog,
832	/// which can increase the memory footprint of [`Node`].
833	pub fn next_event(&self) -> Option<Event> {
834		self.event_queue.next_event()
835	}
836
837	/// Returns the next event in the event queue.
838	///
839	/// Will asynchronously poll the event queue until the next event is ready.
840	///
841	/// **Note:** this will always return the same event until handling is confirmed via [`Node::event_handled`].
842	///
843	/// **Caution:** Users must handle events as quickly as possible to prevent a large event backlog,
844	/// which can increase the memory footprint of [`Node`].
845	pub async fn next_event_async(&self) -> Event {
846		self.event_queue.next_event_async().await
847	}
848
849	/// Returns the next event in the event queue.
850	///
851	/// Will block the current thread until the next event is available.
852	///
853	/// **Note:** this will always return the same event until handling is confirmed via [`Node::event_handled`].
854	///
855	/// **Caution:** Users must handle events as quickly as possible to prevent a large event backlog,
856	/// which can increase the memory footprint of [`Node`].
857	pub fn wait_next_event(&self) -> Event {
858		self.event_queue.wait_next_event()
859	}
860
861	/// Confirm the last retrieved event handled.
862	///
863	/// **Note:** This **MUST** be called after each event has been handled.
864	pub fn event_handled(&self) -> Result<(), Error> {
865		self.event_queue.event_handled().map_err(|e| {
866			log_error!(
867				self.logger,
868				"Couldn't mark event handled due to persistence failure: {}",
869				e
870			);
871			e
872		})
873	}
874
875	/// Returns our own node id
876	pub fn node_id(&self) -> PublicKey {
877		self.channel_manager.get_our_node_id()
878	}
879
880	/// Returns our own listening addresses.
881	pub fn listening_addresses(&self) -> Option<Vec<SocketAddress>> {
882		self.config.listening_addresses.clone()
883	}
884
885	/// Returns the addresses that the node will announce to the network.
886	pub fn announcement_addresses(&self) -> Option<Vec<SocketAddress>> {
887		self.config
888			.announcement_addresses
889			.clone()
890			.or_else(|| self.config.listening_addresses.clone())
891	}
892
893	/// Returns our node alias.
894	pub fn node_alias(&self) -> Option<NodeAlias> {
895		self.config.node_alias
896	}
897
898	/// Returns a payment handler allowing to create and pay [BOLT 11] invoices.
899	///
900	/// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md
901	#[cfg(not(feature = "uniffi"))]
902	pub fn bolt11_payment(&self) -> Bolt11Payment {
903		Bolt11Payment::new(
904			Arc::clone(&self.runtime),
905			Arc::clone(&self.channel_manager),
906			Arc::clone(&self.connection_manager),
907			self.liquidity_source.clone(),
908			Arc::clone(&self.payment_store),
909			Arc::clone(&self.peer_store),
910			Arc::clone(&self.config),
911			Arc::clone(&self.logger),
912		)
913	}
914
915	/// Returns a payment handler allowing to create and pay [BOLT 11] invoices.
916	///
917	/// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md
918	#[cfg(feature = "uniffi")]
919	pub fn bolt11_payment(&self) -> Arc<Bolt11Payment> {
920		Arc::new(Bolt11Payment::new(
921			Arc::clone(&self.runtime),
922			Arc::clone(&self.channel_manager),
923			Arc::clone(&self.connection_manager),
924			self.liquidity_source.clone(),
925			Arc::clone(&self.payment_store),
926			Arc::clone(&self.peer_store),
927			Arc::clone(&self.config),
928			Arc::clone(&self.logger),
929		))
930	}
931
932	/// Returns a payment handler allowing to create and pay [BOLT 12] offers and refunds.
933	///
934	/// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md
935	#[cfg(not(feature = "uniffi"))]
936	pub fn bolt12_payment(&self) -> Bolt12Payment {
937		Bolt12Payment::new(
938			Arc::clone(&self.runtime),
939			Arc::clone(&self.channel_manager),
940			Arc::clone(&self.payment_store),
941			Arc::clone(&self.logger),
942		)
943	}
944
945	/// Returns a payment handler allowing to create and pay [BOLT 12] offers and refunds.
946	///
947	/// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md
948	#[cfg(feature = "uniffi")]
949	pub fn bolt12_payment(&self) -> Arc<Bolt12Payment> {
950		Arc::new(Bolt12Payment::new(
951			Arc::clone(&self.runtime),
952			Arc::clone(&self.channel_manager),
953			Arc::clone(&self.payment_store),
954			Arc::clone(&self.logger),
955		))
956	}
957
958	/// Returns a payment handler allowing to send spontaneous ("keysend") payments.
959	#[cfg(not(feature = "uniffi"))]
960	pub fn spontaneous_payment(&self) -> SpontaneousPayment {
961		SpontaneousPayment::new(
962			Arc::clone(&self.runtime),
963			Arc::clone(&self.channel_manager),
964			Arc::clone(&self.keys_manager),
965			Arc::clone(&self.payment_store),
966			Arc::clone(&self.config),
967			Arc::clone(&self.logger),
968		)
969	}
970
971	/// Returns a payment handler allowing to send spontaneous ("keysend") payments.
972	#[cfg(feature = "uniffi")]
973	pub fn spontaneous_payment(&self) -> Arc<SpontaneousPayment> {
974		Arc::new(SpontaneousPayment::new(
975			Arc::clone(&self.runtime),
976			Arc::clone(&self.channel_manager),
977			Arc::clone(&self.keys_manager),
978			Arc::clone(&self.payment_store),
979			Arc::clone(&self.config),
980			Arc::clone(&self.logger),
981		))
982	}
983
984	/// Returns a payment handler allowing to send and receive on-chain payments.
985	#[cfg(not(feature = "uniffi"))]
986	pub fn onchain_payment(&self) -> OnchainPayment {
987		OnchainPayment::new(
988			Arc::clone(&self.runtime),
989			Arc::clone(&self.wallet),
990			Arc::clone(&self.channel_manager),
991			Arc::clone(&self.config),
992			Arc::clone(&self.logger),
993		)
994	}
995
996	/// Returns a payment handler allowing to send and receive on-chain payments.
997	#[cfg(feature = "uniffi")]
998	pub fn onchain_payment(&self) -> Arc<OnchainPayment> {
999		Arc::new(OnchainPayment::new(
1000			Arc::clone(&self.runtime),
1001			Arc::clone(&self.wallet),
1002			Arc::clone(&self.channel_manager),
1003			Arc::clone(&self.config),
1004			Arc::clone(&self.logger),
1005		))
1006	}
1007
1008	/// Returns a payment handler allowing to create [BIP 21] URIs with an on-chain, [BOLT 11],
1009	/// and [BOLT 12] payment options.
1010	///
1011	/// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md
1012	/// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md
1013	/// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki
1014	#[cfg(not(feature = "uniffi"))]
1015	pub fn unified_qr_payment(&self) -> UnifiedQrPayment {
1016		UnifiedQrPayment::new(
1017			self.onchain_payment().into(),
1018			self.bolt11_payment().into(),
1019			self.bolt12_payment().into(),
1020			Arc::clone(&self.config),
1021			Arc::clone(&self.logger),
1022		)
1023	}
1024
1025	/// Returns a payment handler allowing to create [BIP 21] URIs with an on-chain, [BOLT 11],
1026	/// and [BOLT 12] payment options.
1027	///
1028	/// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md
1029	/// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md
1030	/// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki
1031	#[cfg(feature = "uniffi")]
1032	pub fn unified_qr_payment(&self) -> Arc<UnifiedQrPayment> {
1033		Arc::new(UnifiedQrPayment::new(
1034			self.onchain_payment(),
1035			self.bolt11_payment(),
1036			self.bolt12_payment(),
1037			Arc::clone(&self.config),
1038			Arc::clone(&self.logger),
1039		))
1040	}
1041
1042	/// Returns a liquidity handler allowing to request channels via the [bLIP-51 / LSPS1] protocol.
1043	///
1044	/// [bLIP-51 / LSPS1]: https://github.com/lightning/blips/blob/master/blip-0051.md
1045	#[cfg(not(feature = "uniffi"))]
1046	pub fn lsps1_liquidity(&self) -> LSPS1Liquidity {
1047		LSPS1Liquidity::new(
1048			Arc::clone(&self.runtime),
1049			Arc::clone(&self.wallet),
1050			Arc::clone(&self.connection_manager),
1051			self.liquidity_source.clone(),
1052			Arc::clone(&self.logger),
1053		)
1054	}
1055
1056	/// Returns a liquidity handler allowing to request channels via the [bLIP-51 / LSPS1] protocol.
1057	///
1058	/// [bLIP-51 / LSPS1]: https://github.com/lightning/blips/blob/master/blip-0051.md
1059	#[cfg(feature = "uniffi")]
1060	pub fn lsps1_liquidity(&self) -> Arc<LSPS1Liquidity> {
1061		Arc::new(LSPS1Liquidity::new(
1062			Arc::clone(&self.runtime),
1063			Arc::clone(&self.wallet),
1064			Arc::clone(&self.connection_manager),
1065			self.liquidity_source.clone(),
1066			Arc::clone(&self.logger),
1067		))
1068	}
1069
1070	/// Retrieve a list of known channels.
1071	pub fn list_channels(&self) -> Vec<ChannelDetails> {
1072		self.channel_manager.list_channels().into_iter().map(|c| c.into()).collect()
1073	}
1074
1075	/// Connect to a node on the peer-to-peer network.
1076	///
1077	/// If `persist` is set to `true`, we'll remember the peer and reconnect to it on restart.
1078	pub fn connect(
1079		&self, node_id: PublicKey, address: SocketAddress, persist: bool,
1080	) -> Result<(), Error> {
1081		let rt_lock = self.runtime.read().unwrap();
1082		if rt_lock.is_none() {
1083			return Err(Error::NotRunning);
1084		}
1085		let runtime = rt_lock.as_ref().unwrap();
1086
1087		let peer_info = PeerInfo { node_id, address };
1088
1089		let con_node_id = peer_info.node_id;
1090		let con_addr = peer_info.address.clone();
1091		let con_cm = Arc::clone(&self.connection_manager);
1092
1093		// We need to use our main runtime here as a local runtime might not be around to poll
1094		// connection futures going forward.
1095		tokio::task::block_in_place(move || {
1096			runtime.block_on(async move {
1097				con_cm.connect_peer_if_necessary(con_node_id, con_addr).await
1098			})
1099		})?;
1100
1101		log_info!(self.logger, "Connected to peer {}@{}. ", peer_info.node_id, peer_info.address);
1102
1103		if persist {
1104			self.peer_store.add_peer(peer_info)?;
1105		}
1106
1107		Ok(())
1108	}
1109
1110	/// Disconnects the peer with the given node id.
1111	///
1112	/// Will also remove the peer from the peer store, i.e., after this has been called we won't
1113	/// try to reconnect on restart.
1114	pub fn disconnect(&self, counterparty_node_id: PublicKey) -> Result<(), Error> {
1115		let rt_lock = self.runtime.read().unwrap();
1116		if rt_lock.is_none() {
1117			return Err(Error::NotRunning);
1118		}
1119
1120		log_info!(self.logger, "Disconnecting peer {}..", counterparty_node_id);
1121
1122		match self.peer_store.remove_peer(&counterparty_node_id) {
1123			Ok(()) => {},
1124			Err(e) => {
1125				log_error!(self.logger, "Failed to remove peer {}: {}", counterparty_node_id, e)
1126			},
1127		}
1128
1129		self.peer_manager.disconnect_by_node_id(counterparty_node_id);
1130		Ok(())
1131	}
1132
1133	fn open_channel_inner(
1134		&self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64,
1135		push_to_counterparty_msat: Option<u64>, channel_config: Option<ChannelConfig>,
1136		announce_for_forwarding: bool,
1137	) -> Result<UserChannelId, Error> {
1138		let rt_lock = self.runtime.read().unwrap();
1139		if rt_lock.is_none() {
1140			return Err(Error::NotRunning);
1141		}
1142		let runtime = rt_lock.as_ref().unwrap();
1143
1144		let peer_info = PeerInfo { node_id, address };
1145
1146		let con_node_id = peer_info.node_id;
1147		let con_addr = peer_info.address.clone();
1148		let con_cm = Arc::clone(&self.connection_manager);
1149
1150		let cur_anchor_reserve_sats =
1151			total_anchor_channels_reserve_sats(&self.channel_manager, &self.config);
1152		let spendable_amount_sats =
1153			self.wallet.get_spendable_amount_sats(cur_anchor_reserve_sats).unwrap_or(0);
1154
1155		// Fail early if we have less than the channel value available.
1156		if spendable_amount_sats < channel_amount_sats {
1157			log_error!(self.logger,
1158				"Unable to create channel due to insufficient funds. Available: {}sats, Required: {}sats",
1159				spendable_amount_sats, channel_amount_sats
1160			);
1161			return Err(Error::InsufficientFunds);
1162		}
1163
1164		// We need to use our main runtime here as a local runtime might not be around to poll
1165		// connection futures going forward.
1166		tokio::task::block_in_place(move || {
1167			runtime.block_on(async move {
1168				con_cm.connect_peer_if_necessary(con_node_id, con_addr).await
1169			})
1170		})?;
1171
1172		// Fail if we have less than the channel value + anchor reserve available (if applicable).
1173		let init_features = self
1174			.peer_manager
1175			.peer_by_node_id(&node_id)
1176			.ok_or(Error::ConnectionFailed)?
1177			.init_features;
1178		let required_funds_sats = channel_amount_sats
1179			+ self.config.anchor_channels_config.as_ref().map_or(0, |c| {
1180				if init_features.requires_anchors_zero_fee_htlc_tx()
1181					&& !c.trusted_peers_no_reserve.contains(&node_id)
1182				{
1183					c.per_channel_reserve_sats
1184				} else {
1185					0
1186				}
1187			});
1188
1189		if spendable_amount_sats < required_funds_sats {
1190			log_error!(self.logger,
1191				"Unable to create channel due to insufficient funds. Available: {}sats, Required: {}sats",
1192				spendable_amount_sats, required_funds_sats
1193			);
1194			return Err(Error::InsufficientFunds);
1195		}
1196
1197		let mut user_config = default_user_config(&self.config);
1198		user_config.channel_handshake_config.announce_for_forwarding = announce_for_forwarding;
1199		user_config.channel_config = (channel_config.unwrap_or_default()).clone().into();
1200		// We set the max inflight to 100% for private channels.
1201		// FIXME: LDK will default to this behavior soon, too, at which point we should drop this
1202		// manual override.
1203		if !announce_for_forwarding {
1204			user_config
1205				.channel_handshake_config
1206				.max_inbound_htlc_value_in_flight_percent_of_channel = 100;
1207		}
1208
1209		let push_msat = push_to_counterparty_msat.unwrap_or(0);
1210		let user_channel_id: u128 = rand::thread_rng().gen::<u128>();
1211
1212		match self.channel_manager.create_channel(
1213			peer_info.node_id,
1214			channel_amount_sats,
1215			push_msat,
1216			user_channel_id,
1217			None,
1218			Some(user_config),
1219		) {
1220			Ok(_) => {
1221				log_info!(
1222					self.logger,
1223					"Initiated channel creation with peer {}. ",
1224					peer_info.node_id
1225				);
1226				self.peer_store.add_peer(peer_info)?;
1227				Ok(UserChannelId(user_channel_id))
1228			},
1229			Err(e) => {
1230				log_error!(self.logger, "Failed to initiate channel creation: {:?}", e);
1231				Err(Error::ChannelCreationFailed)
1232			},
1233		}
1234	}
1235
1236	/// Connect to a node and open a new unannounced channel.
1237	///
1238	/// To open an announced channel, see [`Node::open_announced_channel`].
1239	///
1240	/// Disconnects and reconnects are handled automatically.
1241	///
1242	/// If `push_to_counterparty_msat` is set, the given value will be pushed (read: sent) to the
1243	/// channel counterparty on channel open. This can be useful to start out with the balance not
1244	/// entirely shifted to one side, therefore allowing to receive payments from the getgo.
1245	///
1246	/// If Anchor channels are enabled, this will ensure the configured
1247	/// [`AnchorChannelsConfig::per_channel_reserve_sats`] is available and will be retained before
1248	/// opening the channel.
1249	///
1250	/// Returns a [`UserChannelId`] allowing to locally keep track of the channel.
1251	///
1252	/// [`AnchorChannelsConfig::per_channel_reserve_sats`]: crate::config::AnchorChannelsConfig::per_channel_reserve_sats
1253	pub fn open_channel(
1254		&self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64,
1255		push_to_counterparty_msat: Option<u64>, channel_config: Option<ChannelConfig>,
1256	) -> Result<UserChannelId, Error> {
1257		self.open_channel_inner(
1258			node_id,
1259			address,
1260			channel_amount_sats,
1261			push_to_counterparty_msat,
1262			channel_config,
1263			false,
1264		)
1265	}
1266
1267	/// Connect to a node and open a new announced channel.
1268	///
1269	/// This will return an error if the node has not been sufficiently configured to operate as a
1270	/// forwarding node that can properly announce its existence to the publip network graph, i.e.,
1271	/// [`Config::listening_addresses`] and [`Config::node_alias`] are unset.
1272	///
1273	/// To open an unannounced channel, see [`Node::open_channel`].
1274	///
1275	/// Disconnects and reconnects are handled automatically.
1276	///
1277	/// If `push_to_counterparty_msat` is set, the given value will be pushed (read: sent) to the
1278	/// channel counterparty on channel open. This can be useful to start out with the balance not
1279	/// entirely shifted to one side, therefore allowing to receive payments from the getgo.
1280	///
1281	/// If Anchor channels are enabled, this will ensure the configured
1282	/// [`AnchorChannelsConfig::per_channel_reserve_sats`] is available and will be retained before
1283	/// opening the channel.
1284	///
1285	/// Returns a [`UserChannelId`] allowing to locally keep track of the channel.
1286	///
1287	/// [`AnchorChannelsConfig::per_channel_reserve_sats`]: crate::config::AnchorChannelsConfig::per_channel_reserve_sats
1288	pub fn open_announced_channel(
1289		&self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64,
1290		push_to_counterparty_msat: Option<u64>, channel_config: Option<ChannelConfig>,
1291	) -> Result<UserChannelId, Error> {
1292		if let Err(err) = may_announce_channel(&self.config) {
1293			log_error!(self.logger, "Failed to open announced channel as the node hasn't been sufficiently configured to act as a forwarding node: {}", err);
1294			return Err(Error::ChannelCreationFailed);
1295		}
1296
1297		self.open_channel_inner(
1298			node_id,
1299			address,
1300			channel_amount_sats,
1301			push_to_counterparty_msat,
1302			channel_config,
1303			true,
1304		)
1305	}
1306
1307	/// Manually sync the LDK and BDK wallets with the current chain state and update the fee rate
1308	/// cache.
1309	///
1310	/// **Note:** The wallets are regularly synced in the background if background syncing is enabled
1311	/// via [`EsploraSyncConfig::background_sync_config`]. Therefore, using this blocking sync method
1312	/// is almost always redundant when background syncing is enabled and should be avoided where possible.
1313	/// However, if background syncing is disabled (i.e., `background_sync_config` is set to `None`),
1314	/// this method must be called manually to keep wallets in sync with the chain state.
1315	///
1316	/// [`EsploraSyncConfig::background_sync_config`]: crate::config::EsploraSyncConfig::background_sync_config
1317	pub fn sync_wallets(&self) -> Result<(), Error> {
1318		let rt_lock = self.runtime.read().unwrap();
1319		if rt_lock.is_none() {
1320			return Err(Error::NotRunning);
1321		}
1322
1323		let chain_source = Arc::clone(&self.chain_source);
1324		let sync_cman = Arc::clone(&self.channel_manager);
1325		let sync_cmon = Arc::clone(&self.chain_monitor);
1326		let sync_sweeper = Arc::clone(&self.output_sweeper);
1327		tokio::task::block_in_place(move || {
1328			tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap().block_on(
1329				async move {
1330					match chain_source.as_ref() {
1331						ChainSource::Esplora { .. } => {
1332							chain_source.update_fee_rate_estimates().await?;
1333							chain_source
1334								.sync_lightning_wallet(sync_cman, sync_cmon, sync_sweeper)
1335								.await?;
1336							chain_source.sync_onchain_wallet().await?;
1337						},
1338						ChainSource::Electrum { .. } => {
1339							chain_source.update_fee_rate_estimates().await?;
1340							chain_source
1341								.sync_lightning_wallet(sync_cman, sync_cmon, sync_sweeper)
1342								.await?;
1343							chain_source.sync_onchain_wallet().await?;
1344						},
1345						ChainSource::BitcoindRpc { .. } => {
1346							chain_source.update_fee_rate_estimates().await?;
1347							chain_source
1348								.poll_and_update_listeners(sync_cman, sync_cmon, sync_sweeper)
1349								.await?;
1350						},
1351					}
1352					Ok(())
1353				},
1354			)
1355		})
1356	}
1357
1358	/// Close a previously opened channel.
1359	///
1360	/// Will attempt to close a channel coopertively. If this fails, users might need to resort to
1361	/// [`Node::force_close_channel`].
1362	pub fn close_channel(
1363		&self, user_channel_id: &UserChannelId, counterparty_node_id: PublicKey,
1364	) -> Result<(), Error> {
1365		self.close_channel_internal(user_channel_id, counterparty_node_id, false, None)
1366	}
1367
1368	/// Force-close a previously opened channel.
1369	///
1370	/// Will force-close the channel, potentially broadcasting our latest state. Note that in
1371	/// contrast to cooperative closure, force-closing will have the channel funds time-locked,
1372	/// i.e., they will only be available after the counterparty had time to contest our claim.
1373	/// Force-closing channels also more costly in terms of on-chain fees. So cooperative closure
1374	/// should always be preferred (and tried first).
1375	///
1376	/// Broadcasting the closing transactions will be omitted for Anchor channels if we trust the
1377	/// counterparty to broadcast for us (see [`AnchorChannelsConfig::trusted_peers_no_reserve`]
1378	/// for more information).
1379	///
1380	/// [`AnchorChannelsConfig::trusted_peers_no_reserve`]: crate::config::AnchorChannelsConfig::trusted_peers_no_reserve
1381	pub fn force_close_channel(
1382		&self, user_channel_id: &UserChannelId, counterparty_node_id: PublicKey,
1383		reason: Option<String>,
1384	) -> Result<(), Error> {
1385		self.close_channel_internal(user_channel_id, counterparty_node_id, true, reason)
1386	}
1387
1388	fn close_channel_internal(
1389		&self, user_channel_id: &UserChannelId, counterparty_node_id: PublicKey, force: bool,
1390		force_close_reason: Option<String>,
1391	) -> Result<(), Error> {
1392		debug_assert!(
1393			force_close_reason.is_none() || force,
1394			"Reason can only be set for force closures"
1395		);
1396		let open_channels =
1397			self.channel_manager.list_channels_with_counterparty(&counterparty_node_id);
1398		if let Some(channel_details) =
1399			open_channels.iter().find(|c| c.user_channel_id == user_channel_id.0)
1400		{
1401			if force {
1402				if self.config.anchor_channels_config.as_ref().map_or(false, |acc| {
1403					acc.trusted_peers_no_reserve.contains(&counterparty_node_id)
1404				}) {
1405					self.channel_manager
1406						.force_close_without_broadcasting_txn(
1407							&channel_details.channel_id,
1408							&counterparty_node_id,
1409							force_close_reason.unwrap_or_default(),
1410						)
1411						.map_err(|e| {
1412							log_error!(
1413								self.logger,
1414								"Failed to force-close channel to trusted peer: {:?}",
1415								e
1416							);
1417							Error::ChannelClosingFailed
1418						})?;
1419				} else {
1420					self.channel_manager
1421						.force_close_broadcasting_latest_txn(
1422							&channel_details.channel_id,
1423							&counterparty_node_id,
1424							force_close_reason.unwrap_or_default(),
1425						)
1426						.map_err(|e| {
1427							log_error!(self.logger, "Failed to force-close channel: {:?}", e);
1428							Error::ChannelClosingFailed
1429						})?;
1430				}
1431			} else {
1432				self.channel_manager
1433					.close_channel(&channel_details.channel_id, &counterparty_node_id)
1434					.map_err(|e| {
1435						log_error!(self.logger, "Failed to close channel: {:?}", e);
1436						Error::ChannelClosingFailed
1437					})?;
1438			}
1439
1440			// Check if this was the last open channel, if so, forget the peer.
1441			if open_channels.len() == 1 {
1442				self.peer_store.remove_peer(&counterparty_node_id)?;
1443			}
1444		}
1445
1446		Ok(())
1447	}
1448
1449	/// Update the config for a previously opened channel.
1450	pub fn update_channel_config(
1451		&self, user_channel_id: &UserChannelId, counterparty_node_id: PublicKey,
1452		channel_config: ChannelConfig,
1453	) -> Result<(), Error> {
1454		let open_channels =
1455			self.channel_manager.list_channels_with_counterparty(&counterparty_node_id);
1456		if let Some(channel_details) =
1457			open_channels.iter().find(|c| c.user_channel_id == user_channel_id.0)
1458		{
1459			self.channel_manager
1460				.update_channel_config(
1461					&counterparty_node_id,
1462					&[channel_details.channel_id],
1463					&(channel_config).clone().into(),
1464				)
1465				.map_err(|_| Error::ChannelConfigUpdateFailed)
1466		} else {
1467			Err(Error::ChannelConfigUpdateFailed)
1468		}
1469	}
1470
1471	/// Retrieve the details of a specific payment with the given id.
1472	///
1473	/// Returns `Some` if the payment was known and `None` otherwise.
1474	pub fn payment(&self, payment_id: &PaymentId) -> Option<PaymentDetails> {
1475		self.payment_store.get(payment_id)
1476	}
1477
1478	/// Remove the payment with the given id from the store.
1479	pub fn remove_payment(&self, payment_id: &PaymentId) -> Result<(), Error> {
1480		self.payment_store.remove(&payment_id)
1481	}
1482
1483	/// Retrieves an overview of all known balances.
1484	pub fn list_balances(&self) -> BalanceDetails {
1485		let cur_anchor_reserve_sats =
1486			total_anchor_channels_reserve_sats(&self.channel_manager, &self.config);
1487		let (total_onchain_balance_sats, spendable_onchain_balance_sats) =
1488			self.wallet.get_balances(cur_anchor_reserve_sats).unwrap_or((0, 0));
1489
1490		let total_anchor_channels_reserve_sats =
1491			std::cmp::min(cur_anchor_reserve_sats, total_onchain_balance_sats);
1492
1493		let mut total_lightning_balance_sats = 0;
1494		let mut lightning_balances = Vec::new();
1495		for (funding_txo, channel_id) in self.chain_monitor.list_monitors() {
1496			match self.chain_monitor.get_monitor(funding_txo) {
1497				Ok(monitor) => {
1498					// unwrap safety: `get_counterparty_node_id` will always be `Some` after 0.0.110 and
1499					// LDK Node 0.1 depended on 0.0.115 already.
1500					let counterparty_node_id = monitor.get_counterparty_node_id().unwrap();
1501					for ldk_balance in monitor.get_claimable_balances() {
1502						total_lightning_balance_sats += ldk_balance.claimable_amount_satoshis();
1503						lightning_balances.push(LightningBalance::from_ldk_balance(
1504							channel_id,
1505							counterparty_node_id,
1506							ldk_balance,
1507						));
1508					}
1509				},
1510				Err(()) => {
1511					continue;
1512				},
1513			}
1514		}
1515
1516		let pending_balances_from_channel_closures = self
1517			.output_sweeper
1518			.tracked_spendable_outputs()
1519			.into_iter()
1520			.map(PendingSweepBalance::from_tracked_spendable_output)
1521			.collect();
1522
1523		BalanceDetails {
1524			total_onchain_balance_sats,
1525			spendable_onchain_balance_sats,
1526			total_anchor_channels_reserve_sats,
1527			total_lightning_balance_sats,
1528			lightning_balances,
1529			pending_balances_from_channel_closures,
1530		}
1531	}
1532
1533	/// Retrieves all payments that match the given predicate.
1534	///
1535	/// For example, you could retrieve all stored outbound payments as follows:
1536	/// ```
1537	/// # use ldk_node::Builder;
1538	/// # use ldk_node::config::Config;
1539	/// # use ldk_node::payment::PaymentDirection;
1540	/// # use ldk_node::bitcoin::Network;
1541	/// # let mut config = Config::default();
1542	/// # config.network = Network::Regtest;
1543	/// # config.storage_dir_path = "/tmp/ldk_node_test/".to_string();
1544	/// # let builder = Builder::from_config(config);
1545	/// # let node = builder.build().unwrap();
1546	/// node.list_payments_with_filter(|p| p.direction == PaymentDirection::Outbound);
1547	/// ```
1548	pub fn list_payments_with_filter<F: FnMut(&&PaymentDetails) -> bool>(
1549		&self, f: F,
1550	) -> Vec<PaymentDetails> {
1551		self.payment_store.list_filter(f)
1552	}
1553
1554	/// Retrieves all payments.
1555	pub fn list_payments(&self) -> Vec<PaymentDetails> {
1556		self.payment_store.list_filter(|_| true)
1557	}
1558
1559	/// Retrieves a list of known peers.
1560	pub fn list_peers(&self) -> Vec<PeerDetails> {
1561		let mut peers = Vec::new();
1562
1563		// First add all connected peers, preferring to list the connected address if available.
1564		let connected_peers = self.peer_manager.list_peers();
1565		let connected_peers_len = connected_peers.len();
1566		for connected_peer in connected_peers {
1567			let node_id = connected_peer.counterparty_node_id;
1568			let stored_peer = self.peer_store.get_peer(&node_id);
1569			let stored_addr_opt = stored_peer.as_ref().map(|p| p.address.clone());
1570			let address = match (connected_peer.socket_address, stored_addr_opt) {
1571				(Some(con_addr), _) => con_addr,
1572				(None, Some(stored_addr)) => stored_addr,
1573				(None, None) => continue,
1574			};
1575
1576			let is_persisted = stored_peer.is_some();
1577			let is_connected = true;
1578			let details = PeerDetails { node_id, address, is_persisted, is_connected };
1579			peers.push(details);
1580		}
1581
1582		// Now add all known-but-offline peers, too.
1583		for p in self.peer_store.list_peers() {
1584			if peers.iter().take(connected_peers_len).any(|d| d.node_id == p.node_id) {
1585				continue;
1586			}
1587
1588			let details = PeerDetails {
1589				node_id: p.node_id,
1590				address: p.address,
1591				is_persisted: true,
1592				is_connected: false,
1593			};
1594
1595			peers.push(details);
1596		}
1597
1598		peers
1599	}
1600
1601	/// Returns a handler allowing to query the network graph.
1602	#[cfg(not(feature = "uniffi"))]
1603	pub fn network_graph(&self) -> NetworkGraph {
1604		NetworkGraph::new(Arc::clone(&self.network_graph))
1605	}
1606
1607	/// Returns a handler allowing to query the network graph.
1608	#[cfg(feature = "uniffi")]
1609	pub fn network_graph(&self) -> Arc<NetworkGraph> {
1610		Arc::new(NetworkGraph::new(Arc::clone(&self.network_graph)))
1611	}
1612
1613	/// Creates a digital ECDSA signature of a message with the node's secret key.
1614	///
1615	/// A receiver knowing the corresponding `PublicKey` (e.g. the node’s id) and the message
1616	/// can be sure that the signature was generated by the caller.
1617	/// Signatures are EC recoverable, meaning that given the message and the
1618	/// signature the `PublicKey` of the signer can be extracted.
1619	pub fn sign_message(&self, msg: &[u8]) -> String {
1620		self.keys_manager.sign_message(msg)
1621	}
1622
1623	/// Verifies that the given ECDSA signature was created for the given message with the
1624	/// secret key corresponding to the given public key.
1625	pub fn verify_signature(&self, msg: &[u8], sig: &str, pkey: &PublicKey) -> bool {
1626		self.keys_manager.verify_signature(msg, sig, pkey)
1627	}
1628
1629	/// Exports the current state of the scorer. The result can be shared with and merged by light nodes that only have
1630	/// a limited view of the network.
1631	pub fn export_pathfinding_scores(&self) -> Result<Vec<u8>, Error> {
1632		self.kv_store
1633			.read(
1634				lightning::util::persist::SCORER_PERSISTENCE_PRIMARY_NAMESPACE,
1635				lightning::util::persist::SCORER_PERSISTENCE_SECONDARY_NAMESPACE,
1636				lightning::util::persist::SCORER_PERSISTENCE_KEY,
1637			)
1638			.map_err(|e| {
1639				log_error!(
1640					self.logger,
1641					"Failed to access store while exporting pathfinding scores: {}",
1642					e
1643				);
1644				Error::PersistenceFailed
1645			})
1646	}
1647}
1648
1649impl Drop for Node {
1650	fn drop(&mut self) {
1651		let _ = self.stop();
1652	}
1653}
1654
1655/// Represents the status of the [`Node`].
1656#[derive(Clone, Debug, PartialEq, Eq)]
1657pub struct NodeStatus {
1658	/// Indicates whether the [`Node`] is running.
1659	pub is_running: bool,
1660	/// Indicates whether the [`Node`] is listening for incoming connections on the addresses
1661	/// configured via [`Config::listening_addresses`].
1662	pub is_listening: bool,
1663	/// The best block to which our Lightning wallet is currently synced.
1664	pub current_best_block: BestBlock,
1665	/// The timestamp, in seconds since start of the UNIX epoch, when we last successfully synced
1666	/// our Lightning wallet to the chain tip.
1667	///
1668	/// Will be `None` if the wallet hasn't been synced yet.
1669	pub latest_lightning_wallet_sync_timestamp: Option<u64>,
1670	/// The timestamp, in seconds since start of the UNIX epoch, when we last successfully synced
1671	/// our on-chain wallet to the chain tip.
1672	///
1673	/// Will be `None` if the wallet hasn't been synced yet.
1674	pub latest_onchain_wallet_sync_timestamp: Option<u64>,
1675	/// The timestamp, in seconds since start of the UNIX epoch, when we last successfully update
1676	/// our fee rate cache.
1677	///
1678	/// Will be `None` if the cache hasn't been updated yet.
1679	pub latest_fee_rate_cache_update_timestamp: Option<u64>,
1680	/// The timestamp, in seconds since start of the UNIX epoch, when the last rapid gossip sync
1681	/// (RGS) snapshot we successfully applied was generated.
1682	///
1683	/// Will be `None` if RGS isn't configured or the snapshot hasn't been updated yet.
1684	pub latest_rgs_snapshot_timestamp: Option<u64>,
1685	/// The timestamp, in seconds since start of the UNIX epoch, when we last broadcasted a node
1686	/// announcement.
1687	///
1688	/// Will be `None` if we have no public channels or we haven't broadcasted yet.
1689	pub latest_node_announcement_broadcast_timestamp: Option<u64>,
1690	/// The block height when we last archived closed channel monitor data.
1691	///
1692	/// Will be `None` if we haven't archived any monitors of closed channels yet.
1693	pub latest_channel_monitor_archival_height: Option<u32>,
1694}
1695
1696/// Status fields that are persisted across restarts.
1697#[derive(Clone, Debug, PartialEq, Eq)]
1698pub(crate) struct NodeMetrics {
1699	latest_lightning_wallet_sync_timestamp: Option<u64>,
1700	latest_onchain_wallet_sync_timestamp: Option<u64>,
1701	latest_fee_rate_cache_update_timestamp: Option<u64>,
1702	latest_rgs_snapshot_timestamp: Option<u32>,
1703	latest_node_announcement_broadcast_timestamp: Option<u64>,
1704	latest_channel_monitor_archival_height: Option<u32>,
1705}
1706
1707impl Default for NodeMetrics {
1708	fn default() -> Self {
1709		Self {
1710			latest_lightning_wallet_sync_timestamp: None,
1711			latest_onchain_wallet_sync_timestamp: None,
1712			latest_fee_rate_cache_update_timestamp: None,
1713			latest_rgs_snapshot_timestamp: None,
1714			latest_node_announcement_broadcast_timestamp: None,
1715			latest_channel_monitor_archival_height: None,
1716		}
1717	}
1718}
1719
1720impl_writeable_tlv_based!(NodeMetrics, {
1721	(0, latest_lightning_wallet_sync_timestamp, option),
1722	(2, latest_onchain_wallet_sync_timestamp, option),
1723	(4, latest_fee_rate_cache_update_timestamp, option),
1724	(6, latest_rgs_snapshot_timestamp, option),
1725	(8, latest_node_announcement_broadcast_timestamp, option),
1726	(10, latest_channel_monitor_archival_height, option),
1727});
1728
1729pub(crate) fn total_anchor_channels_reserve_sats(
1730	channel_manager: &ChannelManager, config: &Config,
1731) -> u64 {
1732	config.anchor_channels_config.as_ref().map_or(0, |anchor_channels_config| {
1733		channel_manager
1734			.list_channels()
1735			.into_iter()
1736			.filter(|c| {
1737				!anchor_channels_config.trusted_peers_no_reserve.contains(&c.counterparty.node_id)
1738					&& c.channel_shutdown_state
1739						.map_or(true, |s| s != ChannelShutdownState::ShutdownComplete)
1740					&& c.channel_type
1741						.as_ref()
1742						.map_or(false, |t| t.requires_anchors_zero_fee_htlc_tx())
1743			})
1744			.count() as u64
1745			* anchor_channels_config.per_channel_reserve_sats
1746	})
1747}