use crate::chain::{ChainSource, DEFAULT_ESPLORA_SERVER_URL};
use crate::config::{
default_user_config, may_announce_channel, AnnounceError, Config, ElectrumSyncConfig,
EsploraSyncConfig, DEFAULT_LOG_FILENAME, DEFAULT_LOG_LEVEL, WALLET_KEYS_SEED_LEN,
};
use crate::connection::ConnectionManager;
use crate::event::EventQueue;
use crate::fee_estimator::OnchainFeeEstimator;
use crate::gossip::GossipSource;
use crate::io::sqlite_store::SqliteStore;
use crate::io::utils::{read_node_metrics, write_node_metrics};
use crate::io::vss_store::VssStore;
use crate::io::{
self, PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE,
};
use crate::liquidity::{
LSPS1ClientConfig, LSPS2ClientConfig, LSPS2ServiceConfig, LiquiditySourceBuilder,
};
use crate::logger::{log_error, log_info, LdkLogger, LogLevel, LogWriter, Logger};
use crate::message_handler::NodeCustomMessageHandler;
use crate::peer_store::PeerStore;
use crate::tx_broadcaster::TransactionBroadcaster;
use crate::types::{
ChainMonitor, ChannelManager, DynStore, GossipSync, Graph, KeysManager, MessageRouter,
OnionMessenger, PaymentStore, PeerManager,
};
use crate::wallet::persist::KVStoreWalletPersister;
use crate::wallet::Wallet;
use crate::{Node, NodeMetrics};
use lightning::chain::{chainmonitor, BestBlock, Watch};
use lightning::io::Cursor;
use lightning::ln::channelmanager::{self, ChainParameters, ChannelManagerReadArgs};
use lightning::ln::msgs::{RoutingMessageHandler, SocketAddress};
use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler};
use lightning::routing::gossip::NodeAlias;
use lightning::routing::router::DefaultRouter;
use lightning::routing::scoring::{
ProbabilisticScorer, ProbabilisticScoringDecayParameters, ProbabilisticScoringFeeParameters,
};
use lightning::sign::EntropySource;
use lightning::util::persist::{
read_channel_monitors, CHANNEL_MANAGER_PERSISTENCE_KEY,
CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE,
};
use lightning::util::ser::ReadableArgs;
use lightning::util::sweep::OutputSweeper;
use lightning_persister::fs_store::FilesystemStore;
use bdk_wallet::template::Bip84;
use bdk_wallet::KeychainKind;
use bdk_wallet::Wallet as BdkWallet;
use bip39::Mnemonic;
use bitcoin::secp256k1::PublicKey;
use bitcoin::{BlockHash, Network};
use bitcoin::bip32::{ChildNumber, Xpriv};
use std::collections::HashMap;
use std::convert::TryInto;
use std::default::Default;
use std::fmt;
use std::fs;
use std::path::PathBuf;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex, RwLock};
use std::time::SystemTime;
use vss_client::headers::{FixedHeaders, LnurlAuthToJwtProvider, VssHeaderProvider};
const VSS_HARDENED_CHILD_INDEX: u32 = 877;
const VSS_LNURL_AUTH_HARDENED_CHILD_INDEX: u32 = 138;
const LSPS_HARDENED_CHILD_INDEX: u32 = 577;
#[derive(Debug, Clone)]
enum ChainDataSourceConfig {
Esplora { server_url: String, sync_config: Option<EsploraSyncConfig> },
Electrum { server_url: String, sync_config: Option<ElectrumSyncConfig> },
BitcoindRpc { rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String },
}
#[derive(Debug, Clone)]
enum EntropySourceConfig {
SeedFile(String),
SeedBytes([u8; WALLET_KEYS_SEED_LEN]),
Bip39Mnemonic { mnemonic: Mnemonic, passphrase: Option<String> },
}
#[derive(Debug, Clone)]
enum GossipSourceConfig {
P2PNetwork,
RapidGossipSync(String),
}
#[derive(Debug, Clone, Default)]
struct LiquiditySourceConfig {
lsps1_client: Option<LSPS1ClientConfig>,
lsps2_client: Option<LSPS2ClientConfig>,
lsps2_service: Option<LSPS2ServiceConfig>,
}
#[derive(Clone)]
enum LogWriterConfig {
File { log_file_path: Option<String>, max_log_level: Option<LogLevel> },
Log,
Custom(Arc<dyn LogWriter>),
}
impl std::fmt::Debug for LogWriterConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
LogWriterConfig::File { max_log_level, log_file_path } => f
.debug_struct("LogWriterConfig")
.field("max_log_level", max_log_level)
.field("log_file_path", log_file_path)
.finish(),
LogWriterConfig::Log => write!(f, "LogWriterConfig::Log"),
LogWriterConfig::Custom(_) => {
f.debug_tuple("Custom").field(&"<config internal to custom log writer>").finish()
},
}
}
}
#[derive(Debug, Clone, PartialEq)]
pub enum BuildError {
InvalidSeedBytes,
InvalidSeedFile,
InvalidSystemTime,
InvalidChannelMonitor,
InvalidListeningAddresses,
InvalidAnnouncementAddresses,
InvalidNodeAlias,
ReadFailed,
WriteFailed,
StoragePathAccessFailed,
KVStoreSetupFailed,
WalletSetupFailed,
LoggerSetupFailed,
NetworkMismatch,
}
impl fmt::Display for BuildError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Self::InvalidSeedBytes => write!(f, "Given seed bytes are invalid."),
Self::InvalidSeedFile => write!(f, "Given seed file is invalid or could not be read."),
Self::InvalidSystemTime => {
write!(f, "System time is invalid. Clocks might have gone back in time.")
},
Self::InvalidChannelMonitor => {
write!(f, "Failed to watch a deserialized ChannelMonitor")
},
Self::InvalidListeningAddresses => write!(f, "Given listening addresses are invalid."),
Self::InvalidAnnouncementAddresses => {
write!(f, "Given announcement addresses are invalid.")
},
Self::ReadFailed => write!(f, "Failed to read from store."),
Self::WriteFailed => write!(f, "Failed to write to store."),
Self::StoragePathAccessFailed => write!(f, "Failed to access the given storage path."),
Self::KVStoreSetupFailed => write!(f, "Failed to setup KVStore."),
Self::WalletSetupFailed => write!(f, "Failed to setup onchain wallet."),
Self::LoggerSetupFailed => write!(f, "Failed to setup the logger."),
Self::InvalidNodeAlias => write!(f, "Given node alias is invalid."),
Self::NetworkMismatch => {
write!(f, "Given network does not match the node's previously configured network.")
},
}
}
}
impl std::error::Error for BuildError {}
#[derive(Debug)]
pub struct NodeBuilder {
config: Config,
entropy_source_config: Option<EntropySourceConfig>,
chain_data_source_config: Option<ChainDataSourceConfig>,
gossip_source_config: Option<GossipSourceConfig>,
liquidity_source_config: Option<LiquiditySourceConfig>,
log_writer_config: Option<LogWriterConfig>,
}
impl NodeBuilder {
pub fn new() -> Self {
let config = Config::default();
Self::from_config(config)
}
pub fn from_config(config: Config) -> Self {
let entropy_source_config = None;
let chain_data_source_config = None;
let gossip_source_config = None;
let liquidity_source_config = None;
let log_writer_config = None;
Self {
config,
entropy_source_config,
chain_data_source_config,
gossip_source_config,
liquidity_source_config,
log_writer_config,
}
}
pub fn set_entropy_seed_path(&mut self, seed_path: String) -> &mut Self {
self.entropy_source_config = Some(EntropySourceConfig::SeedFile(seed_path));
self
}
pub fn set_entropy_seed_bytes(&mut self, seed_bytes: [u8; WALLET_KEYS_SEED_LEN]) -> &mut Self {
self.entropy_source_config = Some(EntropySourceConfig::SeedBytes(seed_bytes));
self
}
pub fn set_entropy_bip39_mnemonic(
&mut self, mnemonic: Mnemonic, passphrase: Option<String>,
) -> &mut Self {
self.entropy_source_config =
Some(EntropySourceConfig::Bip39Mnemonic { mnemonic, passphrase });
self
}
pub fn set_chain_source_esplora(
&mut self, server_url: String, sync_config: Option<EsploraSyncConfig>,
) -> &mut Self {
self.chain_data_source_config =
Some(ChainDataSourceConfig::Esplora { server_url, sync_config });
self
}
pub fn set_chain_source_electrum(
&mut self, server_url: String, sync_config: Option<ElectrumSyncConfig>,
) -> &mut Self {
self.chain_data_source_config =
Some(ChainDataSourceConfig::Electrum { server_url, sync_config });
self
}
pub fn set_chain_source_bitcoind_rpc(
&mut self, rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String,
) -> &mut Self {
self.chain_data_source_config =
Some(ChainDataSourceConfig::BitcoindRpc { rpc_host, rpc_port, rpc_user, rpc_password });
self
}
pub fn set_gossip_source_p2p(&mut self) -> &mut Self {
self.gossip_source_config = Some(GossipSourceConfig::P2PNetwork);
self
}
pub fn set_gossip_source_rgs(&mut self, rgs_server_url: String) -> &mut Self {
self.gossip_source_config = Some(GossipSourceConfig::RapidGossipSync(rgs_server_url));
self
}
pub fn set_liquidity_source_lsps1(
&mut self, node_id: PublicKey, address: SocketAddress, token: Option<String>,
) -> &mut Self {
self.config.trusted_peers_0conf.push(node_id.clone());
let liquidity_source_config =
self.liquidity_source_config.get_or_insert(LiquiditySourceConfig::default());
let lsps1_client_config = LSPS1ClientConfig { node_id, address, token };
liquidity_source_config.lsps1_client = Some(lsps1_client_config);
self
}
pub fn set_liquidity_source_lsps2(
&mut self, node_id: PublicKey, address: SocketAddress, token: Option<String>,
) -> &mut Self {
self.config.trusted_peers_0conf.push(node_id.clone());
let liquidity_source_config =
self.liquidity_source_config.get_or_insert(LiquiditySourceConfig::default());
let lsps2_client_config = LSPS2ClientConfig { node_id, address, token };
liquidity_source_config.lsps2_client = Some(lsps2_client_config);
self
}
pub fn set_liquidity_provider_lsps2(
&mut self, service_config: LSPS2ServiceConfig,
) -> &mut Self {
let liquidity_source_config =
self.liquidity_source_config.get_or_insert(LiquiditySourceConfig::default());
liquidity_source_config.lsps2_service = Some(service_config);
self
}
pub fn set_storage_dir_path(&mut self, storage_dir_path: String) -> &mut Self {
self.config.storage_dir_path = storage_dir_path;
self
}
pub fn set_filesystem_logger(
&mut self, log_file_path: Option<String>, max_log_level: Option<LogLevel>,
) -> &mut Self {
self.log_writer_config = Some(LogWriterConfig::File { log_file_path, max_log_level });
self
}
pub fn set_log_facade_logger(&mut self) -> &mut Self {
self.log_writer_config = Some(LogWriterConfig::Log);
self
}
pub fn set_custom_logger(&mut self, log_writer: Arc<dyn LogWriter>) -> &mut Self {
self.log_writer_config = Some(LogWriterConfig::Custom(log_writer));
self
}
pub fn set_network(&mut self, network: Network) -> &mut Self {
self.config.network = network;
self
}
pub fn set_listening_addresses(
&mut self, listening_addresses: Vec<SocketAddress>,
) -> Result<&mut Self, BuildError> {
if listening_addresses.len() > 100 {
return Err(BuildError::InvalidListeningAddresses);
}
self.config.listening_addresses = Some(listening_addresses);
Ok(self)
}
pub fn set_announcement_addresses(
&mut self, announcement_addresses: Vec<SocketAddress>,
) -> Result<&mut Self, BuildError> {
if announcement_addresses.len() > 100 {
return Err(BuildError::InvalidAnnouncementAddresses);
}
self.config.announcement_addresses = Some(announcement_addresses);
Ok(self)
}
pub fn set_node_alias(&mut self, node_alias: String) -> Result<&mut Self, BuildError> {
let node_alias = sanitize_alias(&node_alias)?;
self.config.node_alias = Some(node_alias);
Ok(self)
}
pub fn build(&self) -> Result<Node, BuildError> {
let storage_dir_path = self.config.storage_dir_path.clone();
fs::create_dir_all(storage_dir_path.clone())
.map_err(|_| BuildError::StoragePathAccessFailed)?;
let kv_store = Arc::new(
SqliteStore::new(
storage_dir_path.into(),
Some(io::sqlite_store::SQLITE_DB_FILE_NAME.to_string()),
Some(io::sqlite_store::KV_TABLE_NAME.to_string()),
)
.map_err(|_| BuildError::KVStoreSetupFailed)?,
);
self.build_with_store(kv_store)
}
pub fn build_with_fs_store(&self) -> Result<Node, BuildError> {
let mut storage_dir_path: PathBuf = self.config.storage_dir_path.clone().into();
storage_dir_path.push("fs_store");
fs::create_dir_all(storage_dir_path.clone())
.map_err(|_| BuildError::StoragePathAccessFailed)?;
let kv_store = Arc::new(FilesystemStore::new(storage_dir_path));
self.build_with_store(kv_store)
}
pub fn build_with_vss_store(
&self, vss_url: String, store_id: String, lnurl_auth_server_url: String,
fixed_headers: HashMap<String, String>,
) -> Result<Node, BuildError> {
use bitcoin::key::Secp256k1;
let logger = setup_logger(&self.log_writer_config, &self.config)?;
let seed_bytes = seed_bytes_from_config(
&self.config,
self.entropy_source_config.as_ref(),
Arc::clone(&logger),
)?;
let config = Arc::new(self.config.clone());
let vss_xprv =
derive_xprv(config, &seed_bytes, VSS_HARDENED_CHILD_INDEX, Arc::clone(&logger))?;
let lnurl_auth_xprv = vss_xprv
.derive_priv(
&Secp256k1::new(),
&[ChildNumber::Hardened { index: VSS_LNURL_AUTH_HARDENED_CHILD_INDEX }],
)
.map_err(|e| {
log_error!(logger, "Failed to derive VSS secret: {}", e);
BuildError::KVStoreSetupFailed
})?;
let lnurl_auth_jwt_provider =
LnurlAuthToJwtProvider::new(lnurl_auth_xprv, lnurl_auth_server_url, fixed_headers)
.map_err(|e| {
log_error!(logger, "Failed to create LnurlAuthToJwtProvider: {}", e);
BuildError::KVStoreSetupFailed
})?;
let header_provider = Arc::new(lnurl_auth_jwt_provider);
self.build_with_vss_store_and_header_provider(vss_url, store_id, header_provider)
}
pub fn build_with_vss_store_and_fixed_headers(
&self, vss_url: String, store_id: String, fixed_headers: HashMap<String, String>,
) -> Result<Node, BuildError> {
let header_provider = Arc::new(FixedHeaders::new(fixed_headers));
self.build_with_vss_store_and_header_provider(vss_url, store_id, header_provider)
}
pub fn build_with_vss_store_and_header_provider(
&self, vss_url: String, store_id: String, header_provider: Arc<dyn VssHeaderProvider>,
) -> Result<Node, BuildError> {
let logger = setup_logger(&self.log_writer_config, &self.config)?;
let seed_bytes = seed_bytes_from_config(
&self.config,
self.entropy_source_config.as_ref(),
Arc::clone(&logger),
)?;
let config = Arc::new(self.config.clone());
let vss_xprv = derive_xprv(
config.clone(),
&seed_bytes,
VSS_HARDENED_CHILD_INDEX,
Arc::clone(&logger),
)?;
let vss_seed_bytes: [u8; 32] = vss_xprv.private_key.secret_bytes();
let vss_store =
VssStore::new(vss_url, store_id, vss_seed_bytes, header_provider).map_err(|e| {
log_error!(logger, "Failed to setup VssStore: {}", e);
BuildError::KVStoreSetupFailed
})?;
build_with_store_internal(
config,
self.chain_data_source_config.as_ref(),
self.gossip_source_config.as_ref(),
self.liquidity_source_config.as_ref(),
seed_bytes,
logger,
Arc::new(vss_store),
)
}
pub fn build_with_store(&self, kv_store: Arc<DynStore>) -> Result<Node, BuildError> {
let logger = setup_logger(&self.log_writer_config, &self.config)?;
let seed_bytes = seed_bytes_from_config(
&self.config,
self.entropy_source_config.as_ref(),
Arc::clone(&logger),
)?;
let config = Arc::new(self.config.clone());
build_with_store_internal(
config,
self.chain_data_source_config.as_ref(),
self.gossip_source_config.as_ref(),
self.liquidity_source_config.as_ref(),
seed_bytes,
logger,
kv_store,
)
}
}
#[derive(Debug)]
#[cfg(feature = "uniffi")]
pub struct ArcedNodeBuilder {
inner: RwLock<NodeBuilder>,
}
#[cfg(feature = "uniffi")]
impl ArcedNodeBuilder {
pub fn new() -> Self {
let inner = RwLock::new(NodeBuilder::new());
Self { inner }
}
pub fn from_config(config: Config) -> Self {
let inner = RwLock::new(NodeBuilder::from_config(config));
Self { inner }
}
pub fn set_entropy_seed_path(&self, seed_path: String) {
self.inner.write().unwrap().set_entropy_seed_path(seed_path);
}
pub fn set_entropy_seed_bytes(&self, seed_bytes: Vec<u8>) -> Result<(), BuildError> {
if seed_bytes.len() != WALLET_KEYS_SEED_LEN {
return Err(BuildError::InvalidSeedBytes);
}
let mut bytes = [0u8; WALLET_KEYS_SEED_LEN];
bytes.copy_from_slice(&seed_bytes);
self.inner.write().unwrap().set_entropy_seed_bytes(bytes);
Ok(())
}
pub fn set_entropy_bip39_mnemonic(&self, mnemonic: Mnemonic, passphrase: Option<String>) {
self.inner.write().unwrap().set_entropy_bip39_mnemonic(mnemonic, passphrase);
}
pub fn set_chain_source_esplora(
&self, server_url: String, sync_config: Option<EsploraSyncConfig>,
) {
self.inner.write().unwrap().set_chain_source_esplora(server_url, sync_config);
}
pub fn set_chain_source_electrum(
&self, server_url: String, sync_config: Option<ElectrumSyncConfig>,
) {
self.inner.write().unwrap().set_chain_source_electrum(server_url, sync_config);
}
pub fn set_chain_source_bitcoind_rpc(
&self, rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String,
) {
self.inner.write().unwrap().set_chain_source_bitcoind_rpc(
rpc_host,
rpc_port,
rpc_user,
rpc_password,
);
}
pub fn set_gossip_source_p2p(&self) {
self.inner.write().unwrap().set_gossip_source_p2p();
}
pub fn set_gossip_source_rgs(&self, rgs_server_url: String) {
self.inner.write().unwrap().set_gossip_source_rgs(rgs_server_url);
}
pub fn set_liquidity_source_lsps1(
&self, node_id: PublicKey, address: SocketAddress, token: Option<String>,
) {
self.inner.write().unwrap().set_liquidity_source_lsps1(node_id, address, token);
}
pub fn set_liquidity_source_lsps2(
&self, node_id: PublicKey, address: SocketAddress, token: Option<String>,
) {
self.inner.write().unwrap().set_liquidity_source_lsps2(node_id, address, token);
}
pub fn set_liquidity_provider_lsps2(&self, service_config: LSPS2ServiceConfig) {
self.inner.write().unwrap().set_liquidity_provider_lsps2(service_config);
}
pub fn set_storage_dir_path(&self, storage_dir_path: String) {
self.inner.write().unwrap().set_storage_dir_path(storage_dir_path);
}
pub fn set_filesystem_logger(
&self, log_file_path: Option<String>, log_level: Option<LogLevel>,
) {
self.inner.write().unwrap().set_filesystem_logger(log_file_path, log_level);
}
pub fn set_log_facade_logger(&self) {
self.inner.write().unwrap().set_log_facade_logger();
}
pub fn set_custom_logger(&self, log_writer: Arc<dyn LogWriter>) {
self.inner.write().unwrap().set_custom_logger(log_writer);
}
pub fn set_network(&self, network: Network) {
self.inner.write().unwrap().set_network(network);
}
pub fn set_listening_addresses(
&self, listening_addresses: Vec<SocketAddress>,
) -> Result<(), BuildError> {
self.inner.write().unwrap().set_listening_addresses(listening_addresses).map(|_| ())
}
pub fn set_announcement_addresses(
&self, announcement_addresses: Vec<SocketAddress>,
) -> Result<(), BuildError> {
self.inner.write().unwrap().set_announcement_addresses(announcement_addresses).map(|_| ())
}
pub fn set_node_alias(&self, node_alias: String) -> Result<(), BuildError> {
self.inner.write().unwrap().set_node_alias(node_alias).map(|_| ())
}
pub fn build(&self) -> Result<Arc<Node>, BuildError> {
self.inner.read().unwrap().build().map(Arc::new)
}
pub fn build_with_fs_store(&self) -> Result<Arc<Node>, BuildError> {
self.inner.read().unwrap().build_with_fs_store().map(Arc::new)
}
pub fn build_with_vss_store(
&self, vss_url: String, store_id: String, lnurl_auth_server_url: String,
fixed_headers: HashMap<String, String>,
) -> Result<Arc<Node>, BuildError> {
self.inner
.read()
.unwrap()
.build_with_vss_store(vss_url, store_id, lnurl_auth_server_url, fixed_headers)
.map(Arc::new)
}
pub fn build_with_vss_store_and_fixed_headers(
&self, vss_url: String, store_id: String, fixed_headers: HashMap<String, String>,
) -> Result<Arc<Node>, BuildError> {
self.inner
.read()
.unwrap()
.build_with_vss_store_and_fixed_headers(vss_url, store_id, fixed_headers)
.map(Arc::new)
}
pub fn build_with_vss_store_and_header_provider(
&self, vss_url: String, store_id: String, header_provider: Arc<dyn VssHeaderProvider>,
) -> Result<Arc<Node>, BuildError> {
self.inner
.read()
.unwrap()
.build_with_vss_store_and_header_provider(vss_url, store_id, header_provider)
.map(Arc::new)
}
pub fn build_with_store(&self, kv_store: Arc<DynStore>) -> Result<Arc<Node>, BuildError> {
self.inner.read().unwrap().build_with_store(kv_store).map(Arc::new)
}
}
fn build_with_store_internal(
config: Arc<Config>, chain_data_source_config: Option<&ChainDataSourceConfig>,
gossip_source_config: Option<&GossipSourceConfig>,
liquidity_source_config: Option<&LiquiditySourceConfig>, seed_bytes: [u8; 64],
logger: Arc<Logger>, kv_store: Arc<DynStore>,
) -> Result<Node, BuildError> {
if let Err(err) = may_announce_channel(&config) {
if config.announcement_addresses.is_some() {
log_error!(logger, "Announcement addresses were set but some required configuration options for node announcement are missing: {}", err);
let build_error = if matches!(err, AnnounceError::MissingNodeAlias) {
BuildError::InvalidNodeAlias
} else {
BuildError::InvalidListeningAddresses
};
return Err(build_error);
}
if config.node_alias.is_some() {
log_error!(logger, "Node alias was set but some required configuration options for node announcement are missing: {}", err);
return Err(BuildError::InvalidListeningAddresses);
}
}
let is_listening = Arc::new(AtomicBool::new(false));
let node_metrics = match read_node_metrics(Arc::clone(&kv_store), Arc::clone(&logger)) {
Ok(metrics) => Arc::new(RwLock::new(metrics)),
Err(e) => {
if e.kind() == std::io::ErrorKind::NotFound {
Arc::new(RwLock::new(NodeMetrics::default()))
} else {
return Err(BuildError::ReadFailed);
}
},
};
let xprv = bitcoin::bip32::Xpriv::new_master(config.network, &seed_bytes).map_err(|e| {
log_error!(logger, "Failed to derive master secret: {}", e);
BuildError::InvalidSeedBytes
})?;
let descriptor = Bip84(xprv, KeychainKind::External);
let change_descriptor = Bip84(xprv, KeychainKind::Internal);
let mut wallet_persister =
KVStoreWalletPersister::new(Arc::clone(&kv_store), Arc::clone(&logger));
let wallet_opt = BdkWallet::load()
.descriptor(KeychainKind::External, Some(descriptor.clone()))
.descriptor(KeychainKind::Internal, Some(change_descriptor.clone()))
.extract_keys()
.check_network(config.network)
.load_wallet(&mut wallet_persister)
.map_err(|e| match e {
bdk_wallet::LoadWithPersistError::InvalidChangeSet(
bdk_wallet::LoadError::Mismatch(bdk_wallet::LoadMismatch::Network {
loaded,
expected,
}),
) => {
log_error!(
logger,
"Failed to setup wallet: Networks do not match. Expected {} but got {}",
expected,
loaded
);
BuildError::NetworkMismatch
},
_ => {
log_error!(logger, "Failed to set up wallet: {}", e);
BuildError::WalletSetupFailed
},
})?;
let bdk_wallet = match wallet_opt {
Some(wallet) => wallet,
None => BdkWallet::create(descriptor, change_descriptor)
.network(config.network)
.create_wallet(&mut wallet_persister)
.map_err(|e| {
log_error!(logger, "Failed to set up wallet: {}", e);
BuildError::WalletSetupFailed
})?,
};
let tx_broadcaster = Arc::new(TransactionBroadcaster::new(Arc::clone(&logger)));
let fee_estimator = Arc::new(OnchainFeeEstimator::new());
let payment_store = match io::utils::read_payments(Arc::clone(&kv_store), Arc::clone(&logger)) {
Ok(payments) => Arc::new(PaymentStore::new(
payments,
PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE.to_string(),
PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE.to_string(),
Arc::clone(&kv_store),
Arc::clone(&logger),
)),
Err(_) => {
return Err(BuildError::ReadFailed);
},
};
let wallet = Arc::new(Wallet::new(
bdk_wallet,
wallet_persister,
Arc::clone(&tx_broadcaster),
Arc::clone(&fee_estimator),
Arc::clone(&payment_store),
Arc::clone(&config),
Arc::clone(&logger),
));
let chain_source = match chain_data_source_config {
Some(ChainDataSourceConfig::Esplora { server_url, sync_config }) => {
let sync_config = sync_config.unwrap_or(EsploraSyncConfig::default());
Arc::new(ChainSource::new_esplora(
server_url.clone(),
sync_config,
Arc::clone(&wallet),
Arc::clone(&fee_estimator),
Arc::clone(&tx_broadcaster),
Arc::clone(&kv_store),
Arc::clone(&config),
Arc::clone(&logger),
Arc::clone(&node_metrics),
))
},
Some(ChainDataSourceConfig::Electrum { server_url, sync_config }) => {
let sync_config = sync_config.unwrap_or(ElectrumSyncConfig::default());
Arc::new(ChainSource::new_electrum(
server_url.clone(),
sync_config,
Arc::clone(&wallet),
Arc::clone(&fee_estimator),
Arc::clone(&tx_broadcaster),
Arc::clone(&kv_store),
Arc::clone(&config),
Arc::clone(&logger),
Arc::clone(&node_metrics),
))
},
Some(ChainDataSourceConfig::BitcoindRpc { rpc_host, rpc_port, rpc_user, rpc_password }) => {
Arc::new(ChainSource::new_bitcoind_rpc(
rpc_host.clone(),
*rpc_port,
rpc_user.clone(),
rpc_password.clone(),
Arc::clone(&wallet),
Arc::clone(&fee_estimator),
Arc::clone(&tx_broadcaster),
Arc::clone(&kv_store),
Arc::clone(&config),
Arc::clone(&logger),
Arc::clone(&node_metrics),
))
},
None => {
let server_url = DEFAULT_ESPLORA_SERVER_URL.to_string();
let sync_config = EsploraSyncConfig::default();
Arc::new(ChainSource::new_esplora(
server_url.clone(),
sync_config,
Arc::clone(&wallet),
Arc::clone(&fee_estimator),
Arc::clone(&tx_broadcaster),
Arc::clone(&kv_store),
Arc::clone(&config),
Arc::clone(&logger),
Arc::clone(&node_metrics),
))
},
};
let runtime = Arc::new(RwLock::new(None));
let chain_monitor: Arc<ChainMonitor> = Arc::new(chainmonitor::ChainMonitor::new(
Some(Arc::clone(&chain_source)),
Arc::clone(&tx_broadcaster),
Arc::clone(&logger),
Arc::clone(&fee_estimator),
Arc::clone(&kv_store),
));
let cur_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).map_err(|e| {
log_error!(logger, "Failed to get current time: {}", e);
BuildError::InvalidSystemTime
})?;
let ldk_seed_bytes: [u8; 32] = xprv.private_key.secret_bytes();
let keys_manager = Arc::new(KeysManager::new(
&ldk_seed_bytes,
cur_time.as_secs(),
cur_time.subsec_nanos(),
Arc::clone(&wallet),
Arc::clone(&logger),
));
let network_graph =
match io::utils::read_network_graph(Arc::clone(&kv_store), Arc::clone(&logger)) {
Ok(graph) => Arc::new(graph),
Err(e) => {
if e.kind() == std::io::ErrorKind::NotFound {
Arc::new(Graph::new(config.network.into(), Arc::clone(&logger)))
} else {
return Err(BuildError::ReadFailed);
}
},
};
let scorer = match io::utils::read_scorer(
Arc::clone(&kv_store),
Arc::clone(&network_graph),
Arc::clone(&logger),
) {
Ok(scorer) => Arc::new(Mutex::new(scorer)),
Err(e) => {
if e.kind() == std::io::ErrorKind::NotFound {
let params = ProbabilisticScoringDecayParameters::default();
Arc::new(Mutex::new(ProbabilisticScorer::new(
params,
Arc::clone(&network_graph),
Arc::clone(&logger),
)))
} else {
return Err(BuildError::ReadFailed);
}
},
};
let scoring_fee_params = ProbabilisticScoringFeeParameters::default();
let router = Arc::new(DefaultRouter::new(
Arc::clone(&network_graph),
Arc::clone(&logger),
Arc::clone(&keys_manager),
Arc::clone(&scorer),
scoring_fee_params,
));
let channel_monitors = match read_channel_monitors(
Arc::clone(&kv_store),
Arc::clone(&keys_manager),
Arc::clone(&keys_manager),
) {
Ok(monitors) => monitors,
Err(e) => {
if e.kind() == lightning::io::ErrorKind::NotFound {
Vec::new()
} else {
log_error!(logger, "Failed to read channel monitors: {}", e.to_string());
return Err(BuildError::ReadFailed);
}
},
};
let mut user_config = default_user_config(&config);
if liquidity_source_config.and_then(|lsc| lsc.lsps2_client.as_ref()).is_some() {
user_config.channel_config.accept_underpaying_htlcs = true;
user_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel =
100;
}
if liquidity_source_config.and_then(|lsc| lsc.lsps2_service.as_ref()).is_some() {
user_config.accept_intercept_htlcs = true;
user_config.accept_forwards_to_priv_channels = true;
user_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel =
100;
}
let message_router =
Arc::new(MessageRouter::new(Arc::clone(&network_graph), Arc::clone(&keys_manager)));
let channel_manager = {
if let Ok(res) = kv_store.read(
CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE,
CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE,
CHANNEL_MANAGER_PERSISTENCE_KEY,
) {
let mut reader = Cursor::new(res);
let channel_monitor_references =
channel_monitors.iter().map(|(_, chanmon)| chanmon).collect();
let read_args = ChannelManagerReadArgs::new(
Arc::clone(&keys_manager),
Arc::clone(&keys_manager),
Arc::clone(&keys_manager),
Arc::clone(&fee_estimator),
Arc::clone(&chain_monitor),
Arc::clone(&tx_broadcaster),
Arc::clone(&router),
Arc::clone(&message_router),
Arc::clone(&logger),
user_config,
channel_monitor_references,
);
let (_hash, channel_manager) =
<(BlockHash, ChannelManager)>::read(&mut reader, read_args).map_err(|e| {
log_error!(logger, "Failed to read channel manager from KVStore: {}", e);
BuildError::ReadFailed
})?;
channel_manager
} else {
let genesis_block_hash =
bitcoin::blockdata::constants::genesis_block(config.network).block_hash();
let chain_params = ChainParameters {
network: config.network.into(),
best_block: BestBlock::new(genesis_block_hash, 0),
};
channelmanager::ChannelManager::new(
Arc::clone(&fee_estimator),
Arc::clone(&chain_monitor),
Arc::clone(&tx_broadcaster),
Arc::clone(&router),
Arc::clone(&message_router),
Arc::clone(&logger),
Arc::clone(&keys_manager),
Arc::clone(&keys_manager),
Arc::clone(&keys_manager),
user_config,
chain_params,
cur_time.as_secs() as u32,
)
}
};
let channel_manager = Arc::new(channel_manager);
for (_blockhash, channel_monitor) in channel_monitors.into_iter() {
let funding_outpoint = channel_monitor.get_funding_txo().0;
chain_monitor.watch_channel(funding_outpoint, channel_monitor).map_err(|e| {
log_error!(logger, "Failed to watch channel monitor: {:?}", e);
BuildError::InvalidChannelMonitor
})?;
}
let onion_messenger: Arc<OnionMessenger> = Arc::new(OnionMessenger::new(
Arc::clone(&keys_manager),
Arc::clone(&keys_manager),
Arc::clone(&logger),
Arc::clone(&channel_manager),
message_router,
Arc::clone(&channel_manager),
IgnoringMessageHandler {},
IgnoringMessageHandler {},
IgnoringMessageHandler {},
));
let ephemeral_bytes: [u8; 32] = keys_manager.get_secure_random_bytes();
let gossip_source_config = gossip_source_config.unwrap_or(&GossipSourceConfig::P2PNetwork);
let gossip_source = match gossip_source_config {
GossipSourceConfig::P2PNetwork => {
let p2p_source =
Arc::new(GossipSource::new_p2p(Arc::clone(&network_graph), Arc::clone(&logger)));
{
let mut locked_node_metrics = node_metrics.write().unwrap();
locked_node_metrics.latest_rgs_snapshot_timestamp = None;
write_node_metrics(
&*locked_node_metrics,
Arc::clone(&kv_store),
Arc::clone(&logger),
)
.map_err(|e| {
log_error!(logger, "Failed writing to store: {}", e);
BuildError::WriteFailed
})?;
}
p2p_source
},
GossipSourceConfig::RapidGossipSync(rgs_server) => {
let latest_sync_timestamp =
node_metrics.read().unwrap().latest_rgs_snapshot_timestamp.unwrap_or(0);
Arc::new(GossipSource::new_rgs(
rgs_server.clone(),
latest_sync_timestamp,
Arc::clone(&network_graph),
Arc::clone(&logger),
))
},
};
let (liquidity_source, custom_message_handler) =
if let Some(lsc) = liquidity_source_config.as_ref() {
let mut liquidity_source_builder = LiquiditySourceBuilder::new(
Arc::clone(&wallet),
Arc::clone(&channel_manager),
Arc::clone(&keys_manager),
Arc::clone(&chain_source),
Arc::clone(&config),
Arc::clone(&logger),
);
lsc.lsps1_client.as_ref().map(|config| {
liquidity_source_builder.lsps1_client(
config.node_id,
config.address.clone(),
config.token.clone(),
)
});
lsc.lsps2_client.as_ref().map(|config| {
liquidity_source_builder.lsps2_client(
config.node_id,
config.address.clone(),
config.token.clone(),
)
});
let promise_secret = {
let lsps_xpriv = derive_xprv(
Arc::clone(&config),
&seed_bytes,
LSPS_HARDENED_CHILD_INDEX,
Arc::clone(&logger),
)?;
lsps_xpriv.private_key.secret_bytes()
};
lsc.lsps2_service.as_ref().map(|config| {
liquidity_source_builder.lsps2_service(promise_secret, config.clone())
});
let liquidity_source = Arc::new(liquidity_source_builder.build());
let custom_message_handler =
Arc::new(NodeCustomMessageHandler::new_liquidity(Arc::clone(&liquidity_source)));
(Some(liquidity_source), custom_message_handler)
} else {
(None, Arc::new(NodeCustomMessageHandler::new_ignoring()))
};
let msg_handler = match gossip_source.as_gossip_sync() {
GossipSync::P2P(p2p_gossip_sync) => MessageHandler {
chan_handler: Arc::clone(&channel_manager),
route_handler: Arc::clone(&p2p_gossip_sync)
as Arc<dyn RoutingMessageHandler + Sync + Send>,
onion_message_handler: Arc::clone(&onion_messenger),
custom_message_handler,
},
GossipSync::Rapid(_) => MessageHandler {
chan_handler: Arc::clone(&channel_manager),
route_handler: Arc::new(IgnoringMessageHandler {})
as Arc<dyn RoutingMessageHandler + Sync + Send>,
onion_message_handler: Arc::clone(&onion_messenger),
custom_message_handler,
},
GossipSync::None => {
unreachable!("We must always have a gossip sync!");
},
};
let cur_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).map_err(|e| {
log_error!(logger, "Failed to get current time: {}", e);
BuildError::InvalidSystemTime
})?;
let peer_manager = Arc::new(PeerManager::new(
msg_handler,
cur_time.as_secs().try_into().map_err(|e| {
log_error!(logger, "Failed to get current time: {}", e);
BuildError::InvalidSystemTime
})?,
&ephemeral_bytes,
Arc::clone(&logger),
Arc::clone(&keys_manager),
));
liquidity_source.as_ref().map(|l| l.set_peer_manager(Arc::clone(&peer_manager)));
gossip_source.set_gossip_verifier(
Arc::clone(&chain_source),
Arc::clone(&peer_manager),
Arc::clone(&runtime),
);
let connection_manager =
Arc::new(ConnectionManager::new(Arc::clone(&peer_manager), Arc::clone(&logger)));
let output_sweeper = match io::utils::read_output_sweeper(
Arc::clone(&tx_broadcaster),
Arc::clone(&fee_estimator),
Arc::clone(&chain_source),
Arc::clone(&keys_manager),
Arc::clone(&kv_store),
Arc::clone(&logger),
) {
Ok(output_sweeper) => Arc::new(output_sweeper),
Err(e) => {
if e.kind() == std::io::ErrorKind::NotFound {
Arc::new(OutputSweeper::new(
channel_manager.current_best_block(),
Arc::clone(&tx_broadcaster),
Arc::clone(&fee_estimator),
Some(Arc::clone(&chain_source)),
Arc::clone(&keys_manager),
Arc::clone(&keys_manager),
Arc::clone(&kv_store),
Arc::clone(&logger),
))
} else {
return Err(BuildError::ReadFailed);
}
},
};
match io::utils::migrate_deprecated_spendable_outputs(
Arc::clone(&output_sweeper),
Arc::clone(&kv_store),
Arc::clone(&logger),
) {
Ok(()) => {
log_info!(logger, "Successfully migrated OutputSweeper data.");
},
Err(e) => {
log_error!(logger, "Failed to migrate OutputSweeper data: {}", e);
return Err(BuildError::ReadFailed);
},
}
let event_queue = match io::utils::read_event_queue(Arc::clone(&kv_store), Arc::clone(&logger))
{
Ok(event_queue) => Arc::new(event_queue),
Err(e) => {
if e.kind() == std::io::ErrorKind::NotFound {
Arc::new(EventQueue::new(Arc::clone(&kv_store), Arc::clone(&logger)))
} else {
return Err(BuildError::ReadFailed);
}
},
};
let peer_store = match io::utils::read_peer_info(Arc::clone(&kv_store), Arc::clone(&logger)) {
Ok(peer_store) => Arc::new(peer_store),
Err(e) => {
if e.kind() == std::io::ErrorKind::NotFound {
Arc::new(PeerStore::new(Arc::clone(&kv_store), Arc::clone(&logger)))
} else {
return Err(BuildError::ReadFailed);
}
},
};
let (stop_sender, _) = tokio::sync::watch::channel(());
let (event_handling_stopped_sender, _) = tokio::sync::watch::channel(());
Ok(Node {
runtime,
stop_sender,
event_handling_stopped_sender,
config,
wallet,
chain_source,
tx_broadcaster,
event_queue,
channel_manager,
chain_monitor,
output_sweeper,
peer_manager,
onion_messenger,
connection_manager,
keys_manager,
network_graph,
gossip_source,
liquidity_source,
kv_store,
logger,
_router: router,
scorer,
peer_store,
payment_store,
is_listening,
node_metrics,
})
}
fn setup_logger(
log_writer_config: &Option<LogWriterConfig>, config: &Config,
) -> Result<Arc<Logger>, BuildError> {
let logger = match log_writer_config {
Some(LogWriterConfig::File { log_file_path, max_log_level }) => {
let log_file_path = log_file_path
.clone()
.unwrap_or_else(|| format!("{}/{}", config.storage_dir_path, DEFAULT_LOG_FILENAME));
let max_log_level = max_log_level.unwrap_or_else(|| DEFAULT_LOG_LEVEL);
Logger::new_fs_writer(log_file_path, max_log_level)
.map_err(|_| BuildError::LoggerSetupFailed)?
},
Some(LogWriterConfig::Log) => Logger::new_log_facade(),
Some(LogWriterConfig::Custom(custom_log_writer)) => {
Logger::new_custom_writer(Arc::clone(&custom_log_writer))
},
None => {
let log_file_path = format!("{}/{}", config.storage_dir_path, DEFAULT_LOG_FILENAME);
let log_level = DEFAULT_LOG_LEVEL;
Logger::new_fs_writer(log_file_path, log_level)
.map_err(|_| BuildError::LoggerSetupFailed)?
},
};
Ok(Arc::new(logger))
}
fn seed_bytes_from_config(
config: &Config, entropy_source_config: Option<&EntropySourceConfig>, logger: Arc<Logger>,
) -> Result<[u8; 64], BuildError> {
match entropy_source_config {
Some(EntropySourceConfig::SeedBytes(bytes)) => Ok(bytes.clone()),
Some(EntropySourceConfig::SeedFile(seed_path)) => {
Ok(io::utils::read_or_generate_seed_file(seed_path, Arc::clone(&logger))
.map_err(|_| BuildError::InvalidSeedFile)?)
},
Some(EntropySourceConfig::Bip39Mnemonic { mnemonic, passphrase }) => match passphrase {
Some(passphrase) => Ok(mnemonic.to_seed(passphrase)),
None => Ok(mnemonic.to_seed("")),
},
None => {
let seed_path = format!("{}/keys_seed", config.storage_dir_path);
Ok(io::utils::read_or_generate_seed_file(&seed_path, Arc::clone(&logger))
.map_err(|_| BuildError::InvalidSeedFile)?)
},
}
}
fn derive_xprv(
config: Arc<Config>, seed_bytes: &[u8; 64], hardened_child_index: u32, logger: Arc<Logger>,
) -> Result<Xpriv, BuildError> {
use bitcoin::key::Secp256k1;
let xprv = Xpriv::new_master(config.network, seed_bytes).map_err(|e| {
log_error!(logger, "Failed to derive master secret: {}", e);
BuildError::InvalidSeedBytes
})?;
xprv.derive_priv(&Secp256k1::new(), &[ChildNumber::Hardened { index: hardened_child_index }])
.map_err(|e| {
log_error!(logger, "Failed to derive hardened child secret: {}", e);
BuildError::InvalidSeedBytes
})
}
pub(crate) fn sanitize_alias(alias_str: &str) -> Result<NodeAlias, BuildError> {
let alias = alias_str.trim();
if alias.as_bytes().len() > 32 {
return Err(BuildError::InvalidNodeAlias);
}
let mut bytes = [0u8; 32];
bytes[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes());
Ok(NodeAlias(bytes))
}
#[cfg(test)]
mod tests {
use super::{sanitize_alias, BuildError, NodeAlias};
#[test]
fn sanitize_empty_node_alias() {
let alias = "";
let mut buf = [0u8; 32];
buf[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes());
let expected_node_alias = NodeAlias([0; 32]);
let node_alias = sanitize_alias(alias).unwrap();
assert_eq!(node_alias, expected_node_alias);
}
#[test]
fn sanitize_alias_with_sandwiched_null() {
let alias = "I\u{1F496}LDK-Node!";
let mut buf = [0u8; 32];
buf[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes());
let expected_alias = NodeAlias(buf);
let user_provided_alias = "I\u{1F496}LDK-Node!\0\u{26A1}";
let node_alias = sanitize_alias(user_provided_alias).unwrap();
let node_alias_display = format!("{}", node_alias);
assert_eq!(alias, &node_alias_display);
assert_ne!(expected_alias, node_alias);
}
#[test]
fn sanitize_alias_gt_32_bytes() {
let alias = "This is a string longer than thirty-two bytes!"; let node = sanitize_alias(alias);
assert_eq!(node.err().unwrap(), BuildError::InvalidNodeAlias);
}
}