use bitcoin::block::Header;
use bitcoin::constants::ChainHash;
use bitcoin::key::constants::SECRET_KEY_SIZE;
use bitcoin::network::Network;
use bitcoin::transaction::Transaction;
use bitcoin::hash_types::{BlockHash, Txid};
use bitcoin::hashes::hmac::Hmac;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hashes::{Hash, HashEngine, HmacEngine};
use bitcoin::secp256k1::Secp256k1;
use bitcoin::secp256k1::{PublicKey, SecretKey};
use bitcoin::{secp256k1, Sequence, SignedAmount};
use crate::blinded_path::message::{
AsyncPaymentsContext, BlindedMessagePath, MessageForwardNode, OffersContext,
};
use crate::blinded_path::payment::{
AsyncBolt12OfferContext, Bolt12OfferContext, PaymentContext, UnauthenticatedReceiveTlvs,
};
use crate::blinded_path::NodeIdLookUp;
use crate::chain;
use crate::chain::chaininterface::{
BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator,
};
use crate::chain::channelmonitor::{
Balance, ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, MonitorEvent,
WithChannelMonitor, ANTI_REORG_DELAY, CLTV_CLAIM_BUFFER, HTLC_FAIL_BACK_BUFFER,
LATENCY_GRACE_PERIOD_BLOCKS, MAX_BLOCKS_FOR_CONF,
};
use crate::chain::transaction::{OutPoint, TransactionData};
use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Watch};
use crate::events::{
self, ClosureReason, Event, EventHandler, EventsProvider, HTLCHandlingFailureType,
InboundChannelFunds, PaymentFailureReason, ReplayEvent,
};
use crate::events::{FundingInfo, PaidBolt12Invoice};
use crate::ln::chan_utils::selected_commitment_sat_per_1000_weight;
#[cfg(any(test, fuzzing))]
use crate::ln::channel::QuiescentAction;
use crate::ln::channel::{
self, hold_time_since, Channel, ChannelError, ChannelUpdateStatus, DisconnectResult,
FundedChannel, FundingTxSigned, InboundV1Channel, OutboundV1Channel, PendingV2Channel,
ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, UpdateFulfillCommitFetch,
WithChannelContext,
};
use crate::ln::channel_state::ChannelDetails;
use crate::ln::funding::SpliceContribution;
use crate::ln::inbound_payment;
use crate::ln::interactivetxs::InteractiveTxMessageSend;
use crate::ln::msgs;
use crate::ln::msgs::{
BaseMessageHandler, ChannelMessageHandler, CommitmentUpdate, DecodeError, LightningError,
MessageSendEvent,
};
use crate::ln::onion_payment::{
check_incoming_htlc_cltv, create_fwd_pending_htlc_info, create_recv_pending_htlc_info,
decode_incoming_update_add_htlc_onion, invalid_payment_err_data, HopConnector, InboundHTLCErr,
NextPacketDetails,
};
use crate::ln::onion_utils::{self};
use crate::ln::onion_utils::{
decode_fulfill_attribution_data, HTLCFailReason, LocalHTLCFailureReason,
};
use crate::ln::onion_utils::{process_fulfill_attribution_data, AttributionData};
use crate::ln::our_peer_storage::{EncryptedOurPeerStorage, PeerStorageMonitorHolder};
#[cfg(test)]
use crate::ln::outbound_payment;
use crate::ln::outbound_payment::{
OutboundPayments, PendingOutboundPayment, RetryableInvoiceRequest, SendAlongPathArgs,
StaleExpiration,
};
use crate::ln::types::ChannelId;
use crate::offers::async_receive_offer_cache::AsyncReceiveOfferCache;
use crate::offers::flow::{HeldHtlcReplyPath, InvreqResponseInstructions, OffersMessageFlow};
use crate::offers::invoice::{
Bolt12Invoice, DerivedSigningPubkey, InvoiceBuilder, DEFAULT_RELATIVE_EXPIRY,
};
use crate::offers::invoice_error::InvoiceError;
use crate::offers::invoice_request::InvoiceRequest;
use crate::offers::nonce::Nonce;
use crate::offers::offer::{Offer, OfferFromHrn};
use crate::offers::parse::Bolt12SemanticError;
use crate::offers::refund::Refund;
use crate::offers::signer;
use crate::offers::static_invoice::StaticInvoice;
use crate::onion_message::async_payments::{
AsyncPaymentsMessage, AsyncPaymentsMessageHandler, HeldHtlcAvailable, OfferPaths,
OfferPathsRequest, ReleaseHeldHtlc, ServeStaticInvoice, StaticInvoicePersisted,
};
use crate::onion_message::dns_resolution::HumanReadableName;
use crate::onion_message::messenger::{
MessageRouter, MessageSendInstructions, Responder, ResponseInstruction,
};
use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
use crate::routing::router::{
BlindedTail, FixedRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route,
RouteParameters, RouteParametersConfig, Router,
};
use crate::sign::ecdsa::EcdsaChannelSigner;
use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider};
#[cfg(any(feature = "_test_utils", test))]
use crate::types::features::Bolt11InvoiceFeatures;
use crate::types::features::{
Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures,
};
use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret};
use crate::types::string::UntrustedString;
use crate::util::config::{ChannelConfig, ChannelConfigOverrides, ChannelConfigUpdate, UserConfig};
use crate::util::errors::APIError;
use crate::util::logger::{Level, Logger, WithContext};
use crate::util::scid_utils::fake_scid;
use crate::util::ser::{
BigSize, FixedLengthReader, LengthReadable, MaybeReadable, Readable, ReadableArgs, VecWriter,
WithoutLength, Writeable, Writer,
};
use crate::util::wakers::{Future, Notifier};
#[cfg(test)]
use crate::blinded_path::payment::BlindedPaymentPath;
#[cfg(feature = "dnssec")]
use {
crate::blinded_path::message::DNSResolverContext,
crate::onion_message::dns_resolution::{
DNSResolverMessage, DNSResolverMessageHandler, DNSSECProof, DNSSECQuery,
},
crate::onion_message::messenger::Destination,
};
#[cfg(c_bindings)]
use {
crate::offers::offer::OfferWithDerivedMetadataBuilder,
crate::offers::refund::RefundMaybeWithDerivedMetadataBuilder,
};
#[cfg(not(c_bindings))]
use {
crate::offers::offer::{DerivedMetadata, OfferBuilder},
crate::offers::refund::RefundBuilder,
crate::onion_message::messenger::DefaultMessageRouter,
crate::routing::gossip::NetworkGraph,
crate::routing::router::DefaultRouter,
crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters},
crate::sign::KeysManager,
};
use lightning_invoice::{
Bolt11Invoice, Bolt11InvoiceDescription, CreationError, Currency, Description,
InvoiceBuilder as Bolt11InvoiceBuilder, SignOrCreationError, DEFAULT_EXPIRY_TIME,
};
use alloc::collections::{btree_map, BTreeMap};
use crate::io;
use crate::io::Read;
use crate::prelude::*;
use crate::sync::{Arc, FairRwLock, LockHeldState, LockTestExt, Mutex, RwLock, RwLockReadGuard};
use bitcoin::hex::impl_fmt_traits;
use core::borrow::Borrow;
use core::cell::RefCell;
use core::convert::Infallible;
use core::ops::Deref;
use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use core::time::Duration;
use core::{cmp, mem};
#[cfg(any(test, feature = "_externalize_tests"))]
pub(crate) use crate::ln::outbound_payment::PaymentSendFailure;
pub use crate::ln::outbound_payment::{
Bolt11PaymentError, Bolt12PaymentError, ProbeSendFailure, RecipientOnionFields, Retry,
RetryableSendFailure,
};
use crate::ln::script::ShutdownScript;
#[derive(Clone)] #[cfg_attr(test, derive(Debug, PartialEq))]
pub enum PendingHTLCRouting {
Forward {
onion_packet: msgs::OnionPacket,
short_channel_id: u64, blinded: Option<BlindedForward>,
incoming_cltv_expiry: Option<u32>,
hold_htlc: Option<()>,
},
TrampolineForward {
incoming_shared_secret: [u8; 32],
onion_packet: msgs::TrampolineOnionPacket,
node_id: PublicKey,
blinded: Option<BlindedForward>,
incoming_cltv_expiry: u32,
},
Receive {
payment_data: msgs::FinalOnionHopData,
payment_metadata: Option<Vec<u8>>,
payment_context: Option<PaymentContext>,
incoming_cltv_expiry: u32,
phantom_shared_secret: Option<[u8; 32]>,
custom_tlvs: Vec<(u64, Vec<u8>)>,
requires_blinded_error: bool,
},
ReceiveKeysend {
payment_data: Option<msgs::FinalOnionHopData>,
payment_preimage: PaymentPreimage,
payment_metadata: Option<Vec<u8>>,
incoming_cltv_expiry: u32,
custom_tlvs: Vec<(u64, Vec<u8>)>,
requires_blinded_error: bool,
has_recipient_created_payment_secret: bool,
invoice_request: Option<InvoiceRequest>,
payment_context: Option<PaymentContext>,
},
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct BlindedForward {
pub inbound_blinding_point: PublicKey,
pub failure: BlindedFailure,
pub next_blinding_override: Option<PublicKey>,
}
impl PendingHTLCRouting {
fn blinded_failure(&self) -> Option<BlindedFailure> {
match self {
Self::Forward { blinded: Some(BlindedForward { failure, .. }), .. } => Some(*failure),
Self::TrampolineForward { blinded: Some(BlindedForward { failure, .. }), .. } => {
Some(*failure)
},
Self::Receive { requires_blinded_error: true, .. } => {
Some(BlindedFailure::FromBlindedNode)
},
Self::ReceiveKeysend { requires_blinded_error: true, .. } => {
Some(BlindedFailure::FromBlindedNode)
},
_ => None,
}
}
fn incoming_cltv_expiry(&self) -> Option<u32> {
match self {
Self::Forward { incoming_cltv_expiry, .. } => *incoming_cltv_expiry,
Self::TrampolineForward { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry),
Self::Receive { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry),
Self::ReceiveKeysend { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry),
}
}
pub(super) fn should_hold_htlc(&self) -> bool {
match self {
Self::Forward { hold_htlc: Some(()), .. } => true,
_ => false,
}
}
}
#[derive(Clone)] #[cfg_attr(test, derive(Debug, PartialEq))]
pub struct PendingHTLCInfo {
pub routing: PendingHTLCRouting,
pub incoming_shared_secret: [u8; 32],
pub payment_hash: PaymentHash,
pub incoming_amt_msat: Option<u64>,
pub outgoing_amt_msat: u64,
pub outgoing_cltv_value: u32,
pub skimmed_fee_msat: Option<u64>,
}
#[derive(Clone)] pub(super) enum HTLCFailureMsg {
Relay(msgs::UpdateFailHTLC),
Malformed(msgs::UpdateFailMalformedHTLC),
}
#[derive(Clone)] pub(super) enum PendingHTLCStatus {
Forward(PendingHTLCInfo),
Fail(HTLCFailureMsg),
}
#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
pub(super) struct PendingAddHTLCInfo {
pub(super) forward_info: PendingHTLCInfo,
prev_outbound_scid_alias: u64,
prev_htlc_id: u64,
prev_counterparty_node_id: PublicKey,
prev_channel_id: ChannelId,
prev_funding_outpoint: OutPoint,
prev_user_channel_id: u128,
}
impl PendingAddHTLCInfo {
fn htlc_previous_hop_data(&self) -> HTLCPreviousHopData {
let phantom_shared_secret = match self.forward_info.routing {
PendingHTLCRouting::Receive { phantom_shared_secret, .. } => phantom_shared_secret,
_ => None,
};
HTLCPreviousHopData {
prev_outbound_scid_alias: self.prev_outbound_scid_alias,
user_channel_id: Some(self.prev_user_channel_id),
outpoint: self.prev_funding_outpoint,
channel_id: self.prev_channel_id,
counterparty_node_id: Some(self.prev_counterparty_node_id),
htlc_id: self.prev_htlc_id,
incoming_packet_shared_secret: self.forward_info.incoming_shared_secret,
phantom_shared_secret,
blinded_failure: self.forward_info.routing.blinded_failure(),
cltv_expiry: self.forward_info.routing.incoming_cltv_expiry(),
}
}
}
#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
pub(super) enum HTLCForwardInfo {
AddHTLC(PendingAddHTLCInfo),
FailHTLC { htlc_id: u64, err_packet: msgs::OnionErrorPacket },
FailMalformedHTLC { htlc_id: u64, failure_code: u16, sha256_of_onion: [u8; 32] },
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub enum BlindedFailure {
FromIntroductionNode,
FromBlindedNode,
}
#[derive(PartialEq, Eq)]
enum OnionPayload {
Invoice {
_legacy_hop_data: Option<msgs::FinalOnionHopData>,
},
Spontaneous(PaymentPreimage),
}
#[derive(PartialEq, Eq)]
struct ClaimableHTLC {
prev_hop: HTLCPreviousHopData,
cltv_expiry: u32,
value: u64,
sender_intended_value: u64,
onion_payload: OnionPayload,
timer_ticks: u8,
total_value_received: Option<u64>,
total_msat: u64,
counterparty_skimmed_fee_msat: Option<u64>,
}
impl From<&ClaimableHTLC> for events::ClaimedHTLC {
fn from(val: &ClaimableHTLC) -> Self {
events::ClaimedHTLC {
counterparty_node_id: val.prev_hop.counterparty_node_id,
channel_id: val.prev_hop.channel_id,
user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0),
cltv_expiry: val.cltv_expiry,
value_msat: val.value,
counterparty_skimmed_fee_msat: val.counterparty_skimmed_fee_msat.unwrap_or(0),
}
}
}
impl PartialOrd for ClaimableHTLC {
fn partial_cmp(&self, other: &ClaimableHTLC) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for ClaimableHTLC {
fn cmp(&self, other: &ClaimableHTLC) -> cmp::Ordering {
let res = (self.prev_hop.channel_id, self.prev_hop.htlc_id)
.cmp(&(other.prev_hop.channel_id, other.prev_hop.htlc_id));
if res.is_eq() {
debug_assert!(self == other, "ClaimableHTLCs from the same source should be identical");
}
res
}
}
pub trait Verification {
fn hmac_for_offer_payment(
&self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
) -> Hmac<Sha256>;
fn verify_for_offer_payment(
&self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
) -> Result<(), ()>;
}
impl Verification for UnauthenticatedReceiveTlvs {
fn hmac_for_offer_payment(
&self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
) -> Hmac<Sha256> {
signer::hmac_for_payment_tlvs(self, nonce, expanded_key)
}
fn verify_for_offer_payment(
&self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
) -> Result<(), ()> {
signer::verify_payment_tlvs(self, hmac, nonce, expanded_key)
}
}
#[derive(Hash, Copy, Clone, PartialEq, Eq)]
pub struct PaymentId(pub [u8; Self::LENGTH]);
impl PaymentId {
pub const LENGTH: usize = 32;
}
impl PaymentId {
fn for_inbound_from_htlcs<I: Iterator<Item = (ChannelId, u64)>>(
key: &[u8; 32], htlcs: I,
) -> PaymentId {
let mut prev_pair = None;
let mut hasher = HmacEngine::new(key);
for (channel_id, htlc_id) in htlcs {
hasher.input(&channel_id.0);
hasher.input(&htlc_id.to_le_bytes());
if let Some(prev) = prev_pair {
debug_assert!(prev < (channel_id, htlc_id), "HTLCs should be sorted");
}
prev_pair = Some((channel_id, htlc_id));
}
PaymentId(Hmac::<Sha256>::from_engine(hasher).to_byte_array())
}
}
impl Borrow<[u8]> for PaymentId {
fn borrow(&self) -> &[u8] {
&self.0[..]
}
}
impl_fmt_traits! {
impl fmt_traits for PaymentId {
const LENGTH: usize = 32;
}
}
impl Writeable for PaymentId {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
self.0.write(w)
}
}
impl Readable for PaymentId {
fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
let buf: [u8; 32] = Readable::read(r)?;
Ok(PaymentId(buf))
}
}
#[derive(Hash, Copy, Clone, PartialEq, Eq)]
pub struct InterceptId(pub [u8; 32]);
impl InterceptId {
fn from_incoming_shared_secret(ss: &[u8; 32]) -> Self {
Self(Sha256::hash(ss).to_byte_array())
}
fn from_htlc_id_and_chan_id(
htlc_id: u64, channel_id: &ChannelId, counterparty_node_id: &PublicKey,
) -> Self {
let mut sha = Sha256::engine();
sha.input(&htlc_id.to_be_bytes());
sha.input(&channel_id.0);
sha.input(&counterparty_node_id.serialize());
Self(Sha256::from_engine(sha).to_byte_array())
}
}
impl Borrow<[u8]> for InterceptId {
fn borrow(&self) -> &[u8] {
&self.0[..]
}
}
impl_fmt_traits! {
impl fmt_traits for InterceptId {
const LENGTH: usize = 32;
}
}
impl Writeable for InterceptId {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
self.0.write(w)
}
}
impl Readable for InterceptId {
fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
let buf: [u8; 32] = Readable::read(r)?;
Ok(InterceptId(buf))
}
}
#[cfg_attr(
feature = "dnssec",
doc = "and [`ChannelManager::pay_for_offer_from_human_readable_name`]"
)]
pub struct OptionalOfferPaymentParams {
pub payer_note: Option<String>,
pub route_params_config: RouteParametersConfig,
pub retry_strategy: Retry,
}
impl Default for OptionalOfferPaymentParams {
fn default() -> Self {
Self {
payer_note: None,
route_params_config: Default::default(),
#[cfg(feature = "std")]
retry_strategy: Retry::Timeout(core::time::Duration::from_secs(2)),
#[cfg(not(feature = "std"))]
retry_strategy: Retry::Attempts(3),
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub(crate) enum SentHTLCId {
PreviousHopData { prev_outbound_scid_alias: u64, htlc_id: u64 },
OutboundRoute { session_priv: [u8; SECRET_KEY_SIZE] },
}
impl SentHTLCId {
pub(crate) fn from_source(source: &HTLCSource) -> Self {
match source {
HTLCSource::PreviousHopData(hop_data) => Self::PreviousHopData {
prev_outbound_scid_alias: hop_data.prev_outbound_scid_alias,
htlc_id: hop_data.htlc_id,
},
HTLCSource::OutboundRoute { session_priv, .. } => {
Self::OutboundRoute { session_priv: session_priv.secret_bytes() }
},
}
}
}
impl_writeable_tlv_based_enum!(SentHTLCId,
(0, PreviousHopData) => {
(0, prev_outbound_scid_alias, required),
(2, htlc_id, required),
},
(2, OutboundRoute) => {
(0, session_priv, required),
},
);
type PerSourcePendingForward =
(u64, PublicKey, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>);
type FailedHTLCForward = (HTLCSource, PaymentHash, HTLCFailReason, HTLCHandlingFailureType);
mod fuzzy_channelmanager {
use super::*;
#[allow(clippy::derive_hash_xor_eq)] #[derive(Clone, Debug, PartialEq, Eq)]
pub enum HTLCSource {
PreviousHopData(HTLCPreviousHopData),
OutboundRoute {
path: Path,
session_priv: SecretKey,
first_hop_htlc_msat: u64,
payment_id: PaymentId,
bolt12_invoice: Option<PaidBolt12Invoice>,
},
}
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
pub struct HTLCPreviousHopData {
pub prev_outbound_scid_alias: u64,
pub user_channel_id: Option<u128>,
pub htlc_id: u64,
pub incoming_packet_shared_secret: [u8; 32],
pub phantom_shared_secret: Option<[u8; 32]>,
pub blinded_failure: Option<BlindedFailure>,
pub channel_id: ChannelId,
pub outpoint: OutPoint,
pub counterparty_node_id: Option<PublicKey>,
pub cltv_expiry: Option<u32>,
}
}
#[cfg(fuzzing)]
pub use self::fuzzy_channelmanager::*;
#[cfg(not(fuzzing))]
pub(crate) use self::fuzzy_channelmanager::*;
#[allow(clippy::derive_hash_xor_eq)] impl core::hash::Hash for HTLCSource {
fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
match self {
HTLCSource::PreviousHopData(prev_hop_data) => {
0u8.hash(hasher);
prev_hop_data.hash(hasher);
},
HTLCSource::OutboundRoute {
path,
session_priv,
payment_id,
first_hop_htlc_msat,
bolt12_invoice,
} => {
1u8.hash(hasher);
path.hash(hasher);
session_priv[..].hash(hasher);
payment_id.hash(hasher);
first_hop_htlc_msat.hash(hasher);
bolt12_invoice.hash(hasher);
},
}
}
}
impl HTLCSource {
#[cfg(any(test, all(ldk_test_vectors, feature = "grind_signatures")))]
pub fn dummy() -> Self {
HTLCSource::OutboundRoute {
path: Path { hops: Vec::new(), blinded_tail: None },
session_priv: SecretKey::from_slice(&[1; 32]).unwrap(),
first_hop_htlc_msat: 0,
payment_id: PaymentId([2; 32]),
bolt12_invoice: None,
}
}
pub(crate) fn possibly_matches_output(
&self, htlc: &super::chan_utils::HTLCOutputInCommitment,
) -> bool {
if let HTLCSource::OutboundRoute { first_hop_htlc_msat, .. } = self {
*first_hop_htlc_msat == htlc.amount_msat
} else {
true
}
}
pub(crate) fn inbound_htlc_expiry(&self) -> Option<u32> {
match self {
Self::PreviousHopData(HTLCPreviousHopData { cltv_expiry, .. }) => *cltv_expiry,
_ => None,
}
}
pub(crate) fn static_invoice(&self) -> Option<StaticInvoice> {
match self {
Self::OutboundRoute {
bolt12_invoice: Some(PaidBolt12Invoice::StaticInvoice(inv)),
..
} => Some(inv.clone()),
_ => None,
}
}
}
#[derive(Clone, Copy)]
pub enum FailureCode {
TemporaryNodeFailure,
RequiredNodeFeatureMissing,
IncorrectOrUnknownPaymentDetails,
InvalidOnionPayload(Option<(u64, u16)>),
}
impl Into<LocalHTLCFailureReason> for FailureCode {
fn into(self) -> LocalHTLCFailureReason {
match self {
FailureCode::TemporaryNodeFailure => LocalHTLCFailureReason::TemporaryNodeFailure,
FailureCode::RequiredNodeFeatureMissing => LocalHTLCFailureReason::RequiredNodeFeature,
FailureCode::IncorrectOrUnknownPaymentDetails => {
LocalHTLCFailureReason::IncorrectPaymentDetails
},
FailureCode::InvalidOnionPayload(_) => LocalHTLCFailureReason::InvalidOnionPayload,
}
}
}
struct MsgHandleErrInternal {
err: msgs::LightningError,
closes_channel: bool,
shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
tx_abort: Option<msgs::TxAbort>,
}
impl MsgHandleErrInternal {
fn send_err_msg_no_close(err: String, channel_id: ChannelId) -> Self {
Self {
err: LightningError {
err: err.clone(),
action: msgs::ErrorAction::SendErrorMessage {
msg: msgs::ErrorMessage { channel_id, data: err },
},
},
closes_channel: false,
shutdown_finish: None,
tx_abort: None,
}
}
fn from_no_close(err: msgs::LightningError) -> Self {
Self { err, closes_channel: false, shutdown_finish: None, tx_abort: None }
}
fn from_finish_shutdown(
err: String, channel_id: ChannelId, shutdown_res: ShutdownResult,
channel_update: Option<msgs::ChannelUpdate>,
) -> Self {
let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() };
let action = if shutdown_res.monitor_update.is_some() {
msgs::ErrorAction::DisconnectPeer { msg: Some(err_msg) }
} else {
msgs::ErrorAction::SendErrorMessage { msg: err_msg }
};
Self {
err: LightningError { err, action },
closes_channel: true,
shutdown_finish: Some((shutdown_res, channel_update)),
tx_abort: None,
}
}
fn from_chan_no_close(err: ChannelError, channel_id: ChannelId) -> Self {
let tx_abort = match &err {
&ChannelError::Abort(reason) => Some(reason.into_tx_abort_msg(channel_id)),
_ => None,
};
let err = match err {
ChannelError::Warn(msg) => LightningError {
err: msg.clone(),
action: msgs::ErrorAction::SendWarningMessage {
msg: msgs::WarningMessage { channel_id, data: msg },
log_level: Level::Warn,
},
},
ChannelError::WarnAndDisconnect(msg) => LightningError {
err: msg.clone(),
action: msgs::ErrorAction::DisconnectPeerWithWarning {
msg: msgs::WarningMessage { channel_id, data: msg },
},
},
ChannelError::Ignore(msg) => {
LightningError { err: msg, action: msgs::ErrorAction::IgnoreError }
},
ChannelError::Abort(reason) => {
LightningError { err: reason.to_string(), action: msgs::ErrorAction::IgnoreError }
},
ChannelError::Close((msg, _)) | ChannelError::SendError(msg) => LightningError {
err: msg.clone(),
action: msgs::ErrorAction::SendErrorMessage {
msg: msgs::ErrorMessage { channel_id, data: msg },
},
},
};
Self { err, closes_channel: false, shutdown_finish: None, tx_abort }
}
fn dont_send_error_message(&mut self) {
match &mut self.err.action {
msgs::ErrorAction::DisconnectPeer { msg } => *msg = None,
msgs::ErrorAction::SendErrorMessage { msg: _ } => {
self.err.action = msgs::ErrorAction::IgnoreError;
},
_ => {},
}
}
fn closes_channel(&self) -> bool {
self.closes_channel
}
}
#[derive(Clone, PartialEq, Debug)]
pub(super) enum RAACommitmentOrder {
CommitmentFirst,
RevokeAndACKFirst,
}
pub(super) enum ChannelReadyOrder {
ChannelReadyFirst,
SignaturesFirst,
}
#[derive(Clone, Debug, PartialEq, Eq)]
struct ClaimingPayment {
amount_msat: u64,
payment_purpose: events::PaymentPurpose,
receiver_node_id: PublicKey,
htlcs: Vec<events::ClaimedHTLC>,
sender_intended_value: Option<u64>,
onion_fields: Option<RecipientOnionFields>,
payment_id: Option<PaymentId>,
durable_preimage_channel: Option<(OutPoint, PublicKey, ChannelId)>,
}
impl_writeable_tlv_based!(ClaimingPayment, {
(0, amount_msat, required),
(1, durable_preimage_channel, option),
(2, payment_purpose, required),
(4, receiver_node_id, required),
(5, htlcs, optional_vec),
(7, sender_intended_value, option),
(9, onion_fields, option),
(11, payment_id, option),
});
struct ClaimablePayment {
purpose: events::PaymentPurpose,
onion_fields: Option<RecipientOnionFields>,
htlcs: Vec<ClaimableHTLC>,
}
impl ClaimablePayment {
fn inbound_payment_id(&self, secret: &[u8; 32]) -> PaymentId {
PaymentId::for_inbound_from_htlcs(
secret,
self.htlcs.iter().map(|htlc| (htlc.prev_hop.channel_id, htlc.prev_hop.htlc_id)),
)
}
fn receiving_channel_ids(&self) -> Vec<(ChannelId, Option<u128>)> {
self.htlcs
.iter()
.map(|htlc| (htlc.prev_hop.channel_id, htlc.prev_hop.user_channel_id))
.collect()
}
}
enum FundingType {
Checked(Transaction),
CheckedManualBroadcast(Transaction),
Unchecked(OutPoint),
}
impl FundingType {
fn txid(&self) -> Txid {
match self {
FundingType::Checked(tx) => tx.compute_txid(),
FundingType::CheckedManualBroadcast(tx) => tx.compute_txid(),
FundingType::Unchecked(outp) => outp.txid,
}
}
fn transaction_or_dummy(&self) -> Transaction {
match self {
FundingType::Checked(tx) => tx.clone(),
FundingType::CheckedManualBroadcast(tx) => tx.clone(),
FundingType::Unchecked(_) => Transaction {
version: bitcoin::transaction::Version::TWO,
lock_time: bitcoin::absolute::LockTime::ZERO,
input: Vec::new(),
output: Vec::new(),
},
}
}
fn is_manual_broadcast(&self) -> bool {
match self {
FundingType::Checked(_) => false,
FundingType::CheckedManualBroadcast(_) => true,
FundingType::Unchecked(_) => true,
}
}
}
struct ClaimablePayments {
claimable_payments: HashMap<PaymentHash, ClaimablePayment>,
pending_claiming_payments: HashMap<PaymentHash, ClaimingPayment>,
}
impl ClaimablePayments {
#[rustfmt::skip]
fn begin_claiming_payment<L: Deref, S: Deref>(
&mut self, payment_hash: PaymentHash, node_signer: &S, logger: &L,
inbound_payment_id_secret: &[u8; 32], custom_tlvs_known: bool,
) -> Result<(Vec<ClaimableHTLC>, ClaimingPayment), Vec<ClaimableHTLC>>
where L::Target: Logger, S::Target: NodeSigner,
{
match self.claimable_payments.remove(&payment_hash) {
Some(payment) => {
let mut receiver_node_id = node_signer.get_node_id(Recipient::Node)
.expect("Failed to get node_id for node recipient");
for htlc in payment.htlcs.iter() {
if htlc.prev_hop.phantom_shared_secret.is_some() {
let phantom_pubkey = node_signer.get_node_id(Recipient::PhantomNode)
.expect("Failed to get node_id for phantom node recipient");
receiver_node_id = phantom_pubkey;
break;
}
}
if let Some(RecipientOnionFields { custom_tlvs, .. }) = &payment.onion_fields {
if !custom_tlvs_known && custom_tlvs.iter().any(|(typ, _)| typ % 2 == 0) {
log_info!(logger, "Rejecting payment with payment hash {} as we cannot accept payment with unknown even TLVs: {}",
&payment_hash, log_iter!(custom_tlvs.iter().map(|(typ, _)| typ).filter(|typ| *typ % 2 == 0)));
return Err(payment.htlcs);
}
}
let payment_id = payment.inbound_payment_id(inbound_payment_id_secret);
let claiming_payment = self.pending_claiming_payments
.entry(payment_hash)
.and_modify(|_| {
debug_assert!(false, "Shouldn't get a duplicate pending claim event ever");
log_error!(logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug",
&payment_hash);
})
.or_insert_with(|| {
let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect();
let sender_intended_value = payment.htlcs.first().map(|htlc| htlc.total_msat);
let durable_preimage_channel = payment.htlcs.last().map_or(None, |htlc| {
if let Some(node_id) = htlc.prev_hop.counterparty_node_id {
Some((htlc.prev_hop.outpoint, node_id, htlc.prev_hop.channel_id))
} else {
None
}
});
debug_assert!(durable_preimage_channel.is_some());
ClaimingPayment {
amount_msat: payment.htlcs.iter().map(|source| source.value).sum(),
payment_purpose: payment.purpose,
receiver_node_id,
htlcs,
sender_intended_value,
onion_fields: payment.onion_fields,
payment_id: Some(payment_id),
durable_preimage_channel,
}
}).clone();
Ok((payment.htlcs, claiming_payment))
},
None => Err(Vec::new())
}
}
}
#[derive(Debug)]
enum BackgroundEvent {
MonitorUpdateRegeneratedOnStartup {
counterparty_node_id: PublicKey,
funding_txo: OutPoint,
channel_id: ChannelId,
update: ChannelMonitorUpdate,
},
MonitorUpdatesComplete {
counterparty_node_id: PublicKey,
channel_id: ChannelId,
highest_update_id_completed: u64,
},
}
#[derive(Debug)]
pub(crate) struct EventUnblockedChannel {
counterparty_node_id: PublicKey,
funding_txo: OutPoint,
channel_id: ChannelId,
blocking_action: RAAMonitorUpdateBlockingAction,
}
impl Writeable for EventUnblockedChannel {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
self.counterparty_node_id.write(writer)?;
self.funding_txo.write(writer)?;
self.channel_id.write(writer)?;
self.blocking_action.write(writer)
}
}
impl MaybeReadable for EventUnblockedChannel {
fn read<R: Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
let counterparty_node_id = Readable::read(reader)?;
let funding_txo = Readable::read(reader)?;
let channel_id = Readable::read(reader)?;
let blocking_action = match RAAMonitorUpdateBlockingAction::read(reader)? {
Some(blocking_action) => blocking_action,
None => return Ok(None),
};
Ok(Some(EventUnblockedChannel {
counterparty_node_id,
funding_txo,
channel_id,
blocking_action,
}))
}
}
#[derive(Debug)]
pub(crate) enum MonitorUpdateCompletionAction {
PaymentClaimed {
payment_hash: PaymentHash,
pending_mpp_claim: Option<(PublicKey, ChannelId, PendingMPPClaimPointer)>,
},
EmitEventAndFreeOtherChannel {
event: events::Event,
downstream_counterparty_and_funding_outpoint: Option<EventUnblockedChannel>,
},
FreeOtherChannelImmediately {
downstream_counterparty_node_id: PublicKey,
blocking_action: RAAMonitorUpdateBlockingAction,
downstream_channel_id: ChannelId,
},
}
impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction,
(0, PaymentClaimed) => {
(0, payment_hash, required),
(9999999999, pending_mpp_claim, (static_value, None)),
},
(1, FreeOtherChannelImmediately) => {
(0, downstream_counterparty_node_id, required),
(4, blocking_action, upgradable_required),
(5, downstream_channel_id, required),
},
(2, EmitEventAndFreeOtherChannel) => {
(0, event, upgradable_required),
(1, downstream_counterparty_and_funding_outpoint, upgradable_option),
},
);
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) struct PaymentCompleteUpdate {
counterparty_node_id: PublicKey,
channel_funding_outpoint: OutPoint,
channel_id: ChannelId,
htlc_id: SentHTLCId,
}
impl_writeable_tlv_based!(PaymentCompleteUpdate, {
(1, channel_funding_outpoint, required),
(3, counterparty_node_id, required),
(5, channel_id, required),
(7, htlc_id, required),
});
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) enum EventCompletionAction {
ReleaseRAAChannelMonitorUpdate {
counterparty_node_id: PublicKey,
channel_funding_outpoint: Option<OutPoint>,
channel_id: ChannelId,
},
ReleasePaymentCompleteChannelMonitorUpdate(PaymentCompleteUpdate),
}
impl_writeable_tlv_based_enum!(EventCompletionAction,
(0, ReleaseRAAChannelMonitorUpdate) => {
(0, channel_funding_outpoint, option),
(2, counterparty_node_id, required),
(3, channel_id, (default_value, {
if channel_funding_outpoint.is_none() {
Err(DecodeError::InvalidValue)?
}
ChannelId::v1_from_funding_outpoint(channel_funding_outpoint.unwrap())
})),
}
{1, ReleasePaymentCompleteChannelMonitorUpdate} => (),
);
struct HTLCClaimSource {
counterparty_node_id: PublicKey,
funding_txo: OutPoint,
channel_id: ChannelId,
htlc_id: u64,
}
impl From<&MPPClaimHTLCSource> for HTLCClaimSource {
fn from(o: &MPPClaimHTLCSource) -> HTLCClaimSource {
HTLCClaimSource {
counterparty_node_id: o.counterparty_node_id,
funding_txo: o.funding_txo,
channel_id: o.channel_id,
htlc_id: o.htlc_id,
}
}
}
#[derive(Debug)]
pub(crate) struct PendingMPPClaim {
channels_without_preimage: Vec<(PublicKey, ChannelId)>,
channels_with_preimage: Vec<(PublicKey, ChannelId)>,
}
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
struct MPPClaimHTLCSource {
counterparty_node_id: PublicKey,
funding_txo: OutPoint,
channel_id: ChannelId,
htlc_id: u64,
}
impl_writeable_tlv_based!(MPPClaimHTLCSource, {
(0, counterparty_node_id, required),
(2, funding_txo, required),
(4, channel_id, required),
(6, htlc_id, required),
});
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) struct PaymentClaimDetails {
mpp_parts: Vec<MPPClaimHTLCSource>,
claiming_payment: ClaimingPayment,
}
impl_writeable_tlv_based!(PaymentClaimDetails, {
(0, mpp_parts, required_vec),
(2, claiming_payment, required),
});
#[derive(Clone)]
pub(crate) struct PendingMPPClaimPointer(Arc<Mutex<PendingMPPClaim>>);
impl PartialEq for PendingMPPClaimPointer {
fn eq(&self, o: &Self) -> bool {
Arc::ptr_eq(&self.0, &o.0)
}
}
impl Eq for PendingMPPClaimPointer {}
impl core::fmt::Debug for PendingMPPClaimPointer {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
self.0.lock().unwrap().fmt(f)
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub(crate) enum RAAMonitorUpdateBlockingAction {
ForwardedPaymentInboundClaim {
channel_id: ChannelId,
htlc_id: u64,
},
ClaimedMPPPayment { pending_claim: PendingMPPClaimPointer },
}
impl RAAMonitorUpdateBlockingAction {
fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
Self::ForwardedPaymentInboundClaim {
channel_id: prev_hop.channel_id,
htlc_id: prev_hop.htlc_id,
}
}
}
impl_writeable_tlv_based_enum_upgradable!(RAAMonitorUpdateBlockingAction,
(0, ForwardedPaymentInboundClaim) => { (0, channel_id, required), (2, htlc_id, required) },
unread_variants: ClaimedMPPPayment
);
impl Readable for Option<RAAMonitorUpdateBlockingAction> {
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
Ok(RAAMonitorUpdateBlockingAction::read(reader)?)
}
}
pub(super) struct PeerState<SP: Deref>
where
SP::Target: SignerProvider,
{
pub(super) channel_by_id: HashMap<ChannelId, Channel<SP>>,
pub(super) inbound_channel_request_by_id: HashMap<ChannelId, InboundChannelRequest>,
latest_features: InitFeatures,
pub(super) pending_msg_events: Vec<MessageSendEvent>,
in_flight_monitor_updates: BTreeMap<ChannelId, (OutPoint, Vec<ChannelMonitorUpdate>)>,
monitor_update_blocked_actions: BTreeMap<ChannelId, Vec<MonitorUpdateCompletionAction>>,
actions_blocking_raa_monitor_updates: BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
closed_channel_monitor_update_ids: BTreeMap<ChannelId, u64>,
pub is_connected: bool,
peer_storage: Vec<u8>,
}
impl<SP: Deref> PeerState<SP>
where
SP::Target: SignerProvider,
{
fn ok_to_remove(&self, require_disconnected: bool) -> bool {
if require_disconnected && self.is_connected {
return false;
}
for (_, updates) in self.in_flight_monitor_updates.values() {
if !updates.is_empty() {
return false;
}
}
let chan_is_funded_or_outbound = |(_, channel): (_, &Channel<SP>)| {
channel.is_funded() || channel.funding().is_outbound()
};
!self.channel_by_id.iter().any(chan_is_funded_or_outbound)
&& self.monitor_update_blocked_actions.is_empty()
&& self.closed_channel_monitor_update_ids.is_empty()
}
fn total_channel_count(&self) -> usize {
self.channel_by_id.len() + self.inbound_channel_request_by_id.len()
}
fn has_channel(&self, channel_id: &ChannelId) -> bool {
self.channel_by_id.contains_key(channel_id)
|| self.inbound_channel_request_by_id.contains_key(channel_id)
}
}
#[derive(Clone)]
pub(super) enum OpenChannelMessage {
V1(msgs::OpenChannel),
V2(msgs::OpenChannelV2),
}
pub(super) enum OpenChannelMessageRef<'a> {
V1(&'a msgs::OpenChannel),
V2(&'a msgs::OpenChannelV2),
}
pub(super) struct InboundChannelRequest {
pub open_channel_msg: OpenChannelMessage,
pub ticks_remaining: i32,
}
const UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS: i32 = 2;
pub(super) const FEERATE_TRACKING_BLOCKS: usize = 144;
#[derive(Debug)]
struct PendingInboundPayment {
payment_secret: PaymentSecret,
expiry_time: u64,
user_payment_id: u64,
payment_preimage: Option<PaymentPreimage>,
min_value_msat: Option<u64>,
}
#[cfg(not(c_bindings))]
pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<
Arc<M>,
Arc<T>,
Arc<KeysManager>,
Arc<KeysManager>,
Arc<KeysManager>,
Arc<F>,
Arc<
DefaultRouter<
Arc<NetworkGraph<Arc<L>>>,
Arc<L>,
Arc<KeysManager>,
Arc<RwLock<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
ProbabilisticScoringFeeParameters,
ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>,
>,
>,
Arc<DefaultMessageRouter<Arc<NetworkGraph<Arc<L>>>, Arc<L>, Arc<KeysManager>>>,
Arc<L>,
>;
#[cfg(not(c_bindings))]
pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, 'i, M, T, F, L> = ChannelManager<
&'a M,
&'b T,
&'c KeysManager,
&'c KeysManager,
&'c KeysManager,
&'d F,
&'e DefaultRouter<
&'f NetworkGraph<&'g L>,
&'g L,
&'c KeysManager,
&'h RwLock<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>,
ProbabilisticScoringFeeParameters,
ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>,
>,
&'i DefaultMessageRouter<&'f NetworkGraph<&'g L>, &'g L, &'c KeysManager>,
&'g L,
>;
pub trait AChannelManager {
type Watch: chain::Watch<Self::Signer> + ?Sized;
type M: Deref<Target = Self::Watch>;
type Broadcaster: BroadcasterInterface + ?Sized;
type T: Deref<Target = Self::Broadcaster>;
type EntropySource: EntropySource + ?Sized;
type ES: Deref<Target = Self::EntropySource>;
type NodeSigner: NodeSigner + ?Sized;
type NS: Deref<Target = Self::NodeSigner>;
type Signer: EcdsaChannelSigner + Sized;
type SignerProvider: SignerProvider<EcdsaSigner = Self::Signer> + ?Sized;
type SP: Deref<Target = Self::SignerProvider>;
type FeeEstimator: FeeEstimator + ?Sized;
type F: Deref<Target = Self::FeeEstimator>;
type Router: Router + ?Sized;
type R: Deref<Target = Self::Router>;
type MessageRouter: MessageRouter + ?Sized;
type MR: Deref<Target = Self::MessageRouter>;
type Logger: Logger + ?Sized;
type L: Deref<Target = Self::Logger>;
fn get_cm(
&self,
) -> &ChannelManager<
Self::M,
Self::T,
Self::ES,
Self::NS,
Self::SP,
Self::F,
Self::R,
Self::MR,
Self::L,
>;
}
impl<
M: Deref,
T: Deref,
ES: Deref,
NS: Deref,
SP: Deref,
F: Deref,
R: Deref,
MR: Deref,
L: Deref,
> AChannelManager for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
type Watch = M::Target;
type M = M;
type Broadcaster = T::Target;
type T = T;
type EntropySource = ES::Target;
type ES = ES;
type NodeSigner = NS::Target;
type NS = NS;
type Signer = <SP::Target as SignerProvider>::EcdsaSigner;
type SignerProvider = SP::Target;
type SP = SP;
type FeeEstimator = F::Target;
type F = F;
type Router = R::Target;
type R = R;
type MessageRouter = MR::Target;
type MR = MR;
type Logger = L::Target;
type L = L;
fn get_cm(&self) -> &ChannelManager<M, T, ES, NS, SP, F, R, MR, L> {
self
}
}
pub struct ChannelManager<
M: Deref,
T: Deref,
ES: Deref,
NS: Deref,
SP: Deref,
F: Deref,
R: Deref,
MR: Deref,
L: Deref,
> where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
config: RwLock<UserConfig>,
chain_hash: ChainHash,
fee_estimator: LowerBoundedFeeEstimator<F>,
chain_monitor: M,
tx_broadcaster: T,
router: R,
#[cfg(test)]
pub(super) flow: OffersMessageFlow<MR, L>,
#[cfg(not(test))]
flow: OffersMessageFlow<MR, L>,
#[cfg(any(test, feature = "_test_utils"))]
pub(super) best_block: RwLock<BestBlock>,
#[cfg(not(any(test, feature = "_test_utils")))]
best_block: RwLock<BestBlock>,
pub(super) secp_ctx: Secp256k1<secp256k1::All>,
pending_outbound_payments: OutboundPayments,
#[cfg(test)]
pub(super) forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
#[cfg(not(test))]
forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
pending_intercepted_htlcs: Mutex<HashMap<InterceptId, PendingAddHTLCInfo>>,
decode_update_add_htlcs: Mutex<HashMap<u64, Vec<msgs::UpdateAddHTLC>>>,
claimable_payments: Mutex<ClaimablePayments>,
outbound_scid_aliases: Mutex<HashSet<u64>>,
#[cfg(test)]
pub(super) short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, ChannelId)>>,
#[cfg(not(test))]
short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, ChannelId)>>,
our_network_pubkey: PublicKey,
inbound_payment_key: inbound_payment::ExpandedKey,
fake_scid_rand_bytes: [u8; 32],
probing_cookie_secret: [u8; 32],
inbound_payment_id_secret: [u8; 32],
highest_seen_timestamp: AtomicUsize,
#[cfg(not(any(test, feature = "_test_utils")))]
per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<SP>>>>,
#[cfg(any(test, feature = "_test_utils"))]
pub(super) per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<SP>>>>,
#[cfg(not(any(test, feature = "_externalize_tests")))]
monitor_update_type: AtomicUsize,
#[cfg(not(any(test, feature = "_test_utils")))]
pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
#[cfg(any(test, feature = "_test_utils"))]
pub(crate) pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
pending_events_processor: AtomicBool,
pending_htlc_forwards_processor: AtomicBool,
pending_background_events: Mutex<Vec<BackgroundEvent>>,
total_consistency_lock: RwLock<()>,
funding_batch_states: Mutex<BTreeMap<Txid, Vec<(ChannelId, PublicKey, bool)>>>,
background_events_processed_since_startup: AtomicBool,
event_persist_notifier: Notifier,
needs_persist_flag: AtomicBool,
pending_broadcast_messages: Mutex<Vec<MessageSendEvent>>,
last_days_feerates: Mutex<VecDeque<(u32, u32)>>,
#[cfg(feature = "_test_utils")]
pub testing_dnssec_proof_offer_resolution_override: Mutex<HashMap<HumanReadableName, Offer>>,
#[cfg(test)]
pub(super) entropy_source: ES,
#[cfg(not(test))]
entropy_source: ES,
node_signer: NS,
#[cfg(test)]
pub(super) signer_provider: SP,
#[cfg(not(test))]
signer_provider: SP,
logger: L,
}
#[derive(Clone, Copy, PartialEq)]
pub struct ChainParameters {
pub network: Network,
pub best_block: BestBlock,
}
#[derive(Copy, Clone, PartialEq)]
#[must_use]
enum NotifyOption {
DoPersist,
SkipPersistHandleEvents,
SkipPersistNoEvents,
}
struct PersistenceNotifierGuard<'a, F: FnOnce() -> NotifyOption> {
event_persist_notifier: &'a Notifier,
needs_persist_flag: &'a AtomicBool,
should_persist: Option<F>,
_read_guard: RwLockReadGuard<'a, ()>,
}
impl<'a> PersistenceNotifierGuard<'a, fn() -> NotifyOption> {
fn notify_on_drop<C: AChannelManager>(
cm: &'a C,
) -> PersistenceNotifierGuard<'a, impl FnOnce() -> NotifyOption> {
Self::optionally_notify(cm, || -> NotifyOption { NotifyOption::DoPersist })
}
fn optionally_notify<F: FnOnce() -> NotifyOption, C: AChannelManager>(
cm: &'a C, persist_check: F,
) -> PersistenceNotifierGuard<'a, impl FnOnce() -> NotifyOption> {
let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
let force_notify = cm.get_cm().process_background_events();
PersistenceNotifierGuard {
event_persist_notifier: &cm.get_cm().event_persist_notifier,
needs_persist_flag: &cm.get_cm().needs_persist_flag,
should_persist: Some(move || {
let notify = persist_check();
match (notify, force_notify) {
(NotifyOption::DoPersist, _) => NotifyOption::DoPersist,
(_, NotifyOption::DoPersist) => NotifyOption::DoPersist,
(NotifyOption::SkipPersistHandleEvents, _) => {
NotifyOption::SkipPersistHandleEvents
},
(_, NotifyOption::SkipPersistHandleEvents) => {
NotifyOption::SkipPersistHandleEvents
},
_ => NotifyOption::SkipPersistNoEvents,
}
}),
_read_guard: read_guard,
}
}
fn optionally_notify_skipping_background_events<F: Fn() -> NotifyOption, C: AChannelManager>(
cm: &'a C, persist_check: F,
) -> PersistenceNotifierGuard<'a, F> {
let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
PersistenceNotifierGuard {
event_persist_notifier: &cm.get_cm().event_persist_notifier,
needs_persist_flag: &cm.get_cm().needs_persist_flag,
should_persist: Some(persist_check),
_read_guard: read_guard,
}
}
}
impl<'a, F: FnOnce() -> NotifyOption> Drop for PersistenceNotifierGuard<'a, F> {
fn drop(&mut self) {
let should_persist = match self.should_persist.take() {
Some(should_persist) => should_persist,
None => {
debug_assert!(false);
return;
},
};
match should_persist() {
NotifyOption::DoPersist => {
self.needs_persist_flag.store(true, Ordering::Release);
self.event_persist_notifier.notify()
},
NotifyOption::SkipPersistHandleEvents => self.event_persist_notifier.notify(),
NotifyOption::SkipPersistNoEvents => {},
}
}
}
pub const BREAKDOWN_TIMEOUT: u16 = 6 * 24;
pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7;
pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6 * 8;
pub(crate) const CLTV_FAR_FAR_AWAY: u32 = 14 * 24 * 6;
pub const MIN_FINAL_CLTV_EXPIRY_DELTA: u16 = HTLC_FAIL_BACK_BUFFER as u16 + 3;
const _CHECK_CLTV_EXPIRY_SANITY: () = assert!(
MIN_CLTV_EXPIRY_DELTA as u32
>= 2 * LATENCY_GRACE_PERIOD_BLOCKS + 2 * MAX_BLOCKS_FOR_CONF + ANTI_REORG_DELAY
);
const _ASSUMED_COUNTERPARTY_CLTV_CLAIM_BUFFER: u32 = 6 * 6;
const _CHECK_COUNTERPARTY_REALISTIC: () =
assert!(_ASSUMED_COUNTERPARTY_CLTV_CLAIM_BUFFER >= CLTV_CLAIM_BUFFER);
const _CHECK_CLTV_EXPIRY_OFFCHAIN: () = assert!(
MIN_CLTV_EXPIRY_DELTA as u32
>= 2 * LATENCY_GRACE_PERIOD_BLOCKS - 1 + _ASSUMED_COUNTERPARTY_CLTV_CLAIM_BUFFER
);
pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;
pub(crate) const DISABLE_GOSSIP_TICKS: u8 = 10;
pub(crate) const ENABLE_GOSSIP_TICKS: u8 = 5;
pub(super) const MAX_UNFUNDED_CHANS_PER_PEER: usize = 4;
pub(super) const MAX_UNFUNDED_CHANNEL_PEERS: usize = 50;
#[cfg(not(test))]
const MAX_PEER_STORAGE_SIZE: usize = 1024;
const MAX_NO_CHANNEL_PEERS: usize = 250;
#[derive(Debug, PartialEq)]
pub enum RecentPaymentDetails {
AwaitingInvoice {
payment_id: PaymentId,
},
Pending {
payment_id: PaymentId,
payment_hash: PaymentHash,
total_msat: u64,
},
Fulfilled {
payment_id: PaymentId,
payment_hash: Option<PaymentHash>,
},
Abandoned {
payment_id: PaymentId,
payment_hash: PaymentHash,
},
}
#[derive(Clone)]
pub struct PhantomRouteHints {
pub channels: Vec<ChannelDetails>,
pub phantom_scid: u64,
pub real_node_pubkey: PublicKey,
}
#[rustfmt::skip]
macro_rules! handle_error {
($self: ident, $internal: expr, $counterparty_node_id: expr) => { {
debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
match $internal {
Ok(msg) => Ok(msg),
Err(MsgHandleErrInternal { err, shutdown_finish, tx_abort, .. }) => {
let mut msg_event = None;
if let Some((shutdown_res, update_option)) = shutdown_finish {
let counterparty_node_id = shutdown_res.counterparty_node_id;
let channel_id = shutdown_res.channel_id;
let logger = WithContext::from(
&$self.logger, Some(counterparty_node_id), Some(channel_id), None
);
log_error!(logger, "Closing channel: {}", err.err);
$self.finish_close_channel(shutdown_res);
if let Some(update) = update_option {
let mut pending_broadcast_messages = $self.pending_broadcast_messages.lock().unwrap();
pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
} else {
log_error!($self.logger, "Got non-closing error: {}", err.err);
}
if let msgs::ErrorAction::IgnoreError = err.action {
if let Some(tx_abort) = tx_abort {
msg_event = Some(MessageSendEvent::SendTxAbort {
node_id: $counterparty_node_id,
msg: tx_abort,
});
}
} else {
msg_event = Some(MessageSendEvent::HandleError {
node_id: $counterparty_node_id,
action: err.action.clone()
});
}
if let Some(msg_event) = msg_event {
let per_peer_state = $self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) {
let mut peer_state = peer_state_mutex.lock().unwrap();
if peer_state.is_connected {
peer_state.pending_msg_events.push(msg_event);
}
}
}
Err(err)
},
}
} };
}
#[rustfmt::skip]
macro_rules! locked_close_channel {
($self: ident, $chan_context: expr, UNFUNDED) => {{
$self.short_to_chan_info.write().unwrap().remove(&$chan_context.outbound_scid_alias());
let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$chan_context.outbound_scid_alias());
debug_assert!(alias_removed);
}};
($self: ident, $peer_state: expr, $funded_chan: expr, $shutdown_res_mut: expr, FUNDED) => {{
if let Some((_, funding_txo, _, update)) = $shutdown_res_mut.monitor_update.take() {
handle_new_monitor_update!($self, funding_txo, update, $peer_state,
$funded_chan.context, REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER);
}
let update_id = $funded_chan.context.get_latest_monitor_update_id();
if $funded_chan.funding.get_funding_tx_confirmation_height().is_some() || $funded_chan.context.minimum_depth(&$funded_chan.funding) == Some(0) || update_id > 1 {
let chan_id = $funded_chan.context.channel_id();
$peer_state.closed_channel_monitor_update_ids.insert(chan_id, update_id);
}
let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
if let Some(short_id) = $funded_chan.funding.get_short_channel_id() {
short_to_chan_info.remove(&short_id);
} else {
let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$funded_chan.context.outbound_scid_alias());
debug_assert!(alias_removed);
}
short_to_chan_info.remove(&$funded_chan.context.outbound_scid_alias());
for scid in $funded_chan.context.historical_scids() {
short_to_chan_info.remove(scid);
}
}}
}
#[rustfmt::skip]
macro_rules! convert_channel_err {
($self: ident, $peer_state: expr, $err: expr, $chan: expr, $close: expr, $locked_close: expr, $channel_id: expr, _internal) => { {
match $err {
ChannelError::Warn(msg) => {
(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), $channel_id))
},
ChannelError::WarnAndDisconnect(msg) => {
(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::WarnAndDisconnect(msg), $channel_id))
},
ChannelError::Ignore(msg) => {
(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id))
},
ChannelError::Abort(reason) => {
(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Abort(reason), $channel_id))
},
ChannelError::Close((msg, reason)) => {
let (mut shutdown_res, chan_update) = $close(reason);
let logger = WithChannelContext::from(&$self.logger, &$chan.context(), None);
log_error!(logger, "Closed channel {} due to close-required error: {}", $channel_id, msg);
$locked_close(&mut shutdown_res, $chan);
let err =
MsgHandleErrInternal::from_finish_shutdown(msg, $channel_id, shutdown_res, chan_update);
(true, err)
},
ChannelError::SendError(msg) => {
(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::SendError(msg), $channel_id))
},
}
} };
($self: ident, $peer_state: expr, $shutdown_result: expr, $funded_channel: expr, COOP_CLOSED) => { {
let chan_id = $funded_channel.context.channel_id();
let reason = ChannelError::Close(("Coop Closed".to_owned(), $shutdown_result.closure_reason.clone()));
let do_close = |_| {
(
$shutdown_result,
$self.get_channel_update_for_broadcast(&$funded_channel).ok(),
)
};
let mut locked_close = |shutdown_res_mut: &mut ShutdownResult, funded_channel: &mut FundedChannel<_>| {
locked_close_channel!($self, $peer_state, funded_channel, shutdown_res_mut, FUNDED);
};
let (close, mut err) =
convert_channel_err!($self, $peer_state, reason, $funded_channel, do_close, locked_close, chan_id, _internal);
err.dont_send_error_message();
debug_assert!(close);
err
} };
($self: ident, $peer_state: expr, $err: expr, $funded_channel: expr, FUNDED_CHANNEL) => { {
let chan_id = $funded_channel.context.channel_id();
let mut do_close = |reason| {
(
$funded_channel.force_shutdown(reason),
$self.get_channel_update_for_broadcast(&$funded_channel).ok(),
)
};
let mut locked_close = |shutdown_res_mut: &mut ShutdownResult, funded_channel: &mut FundedChannel<_>| {
locked_close_channel!($self, $peer_state, funded_channel, shutdown_res_mut, FUNDED);
};
convert_channel_err!($self, $peer_state, $err, $funded_channel, do_close, locked_close, chan_id, _internal)
} };
($self: ident, $peer_state: expr, $err: expr, $channel: expr, UNFUNDED_CHANNEL) => { {
let chan_id = $channel.context().channel_id();
let mut do_close = |reason| { ($channel.force_shutdown(reason), None) };
let locked_close = |_, chan: &mut Channel<_>| { locked_close_channel!($self, chan.context(), UNFUNDED); };
convert_channel_err!($self, $peer_state, $err, $channel, do_close, locked_close, chan_id, _internal)
} };
($self: ident, $peer_state: expr, $err: expr, $channel: expr) => {
match $channel.as_funded_mut() {
Some(funded_channel) => {
convert_channel_err!($self, $peer_state, $err, funded_channel, FUNDED_CHANNEL)
},
None => {
convert_channel_err!($self, $peer_state, $err, $channel, UNFUNDED_CHANNEL)
},
}
};
}
macro_rules! break_channel_entry {
($self: ident, $peer_state: expr, $res: expr, $entry: expr) => {
match $res {
Ok(res) => res,
Err(e) => {
let (drop, res) = convert_channel_err!($self, $peer_state, e, $entry.get_mut());
if drop {
$entry.remove_entry();
}
break Err(res);
},
}
};
}
macro_rules! try_channel_entry {
($self: ident, $peer_state: expr, $res: expr, $entry: expr) => {
match $res {
Ok(res) => res,
Err(e) => {
let (drop, res) = convert_channel_err!($self, $peer_state, e, $entry.get_mut());
if drop {
$entry.remove_entry();
}
return Err(res);
},
}
};
}
macro_rules! send_channel_ready {
($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{
if $channel.context.is_connected() {
$pending_msg_events.push(MessageSendEvent::SendChannelReady {
node_id: $channel.context.get_counterparty_node_id(),
msg: $channel_ready_msg,
});
}
let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
let outbound_alias_insert = short_to_chan_info.insert($channel.context.outbound_scid_alias(), ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()));
assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()),
"SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
insert_short_channel_id!(short_to_chan_info, $channel);
}}
}
macro_rules! insert_short_channel_id {
($short_to_chan_info: ident, $channel: expr) => {{
if let Some(real_scid) = $channel.funding.get_short_channel_id() {
let scid_insert = $short_to_chan_info.insert(real_scid, ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()));
assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()),
"SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
}
}}
}
macro_rules! emit_funding_tx_broadcast_safe_event {
($locked_events: expr, $channel: expr, $funding_txo: expr) => {
if !$channel.context.funding_tx_broadcast_safe_event_emitted() {
$locked_events.push_back((events::Event::FundingTxBroadcastSafe {
channel_id: $channel.context.channel_id(),
user_channel_id: $channel.context.get_user_id(),
funding_txo: $funding_txo,
counterparty_node_id: $channel.context.get_counterparty_node_id(),
former_temporary_channel_id: $channel.context.temporary_channel_id()
.expect("Unreachable: FundingTxBroadcastSafe event feature added to channel establishment process in LDK v0.0.124 where this should never be None."),
}, None));
$channel.context.set_funding_tx_broadcast_safe_event_emitted();
}
}
}
macro_rules! emit_channel_pending_event {
($locked_events: expr, $channel: expr) => {
if $channel.context.should_emit_channel_pending_event() {
let funding_txo = $channel.funding.get_funding_txo().unwrap();
let funding_redeem_script =
Some($channel.funding.channel_transaction_parameters.make_funding_redeemscript());
$locked_events.push_back((
events::Event::ChannelPending {
channel_id: $channel.context.channel_id(),
former_temporary_channel_id: $channel.context.temporary_channel_id(),
counterparty_node_id: $channel.context.get_counterparty_node_id(),
user_channel_id: $channel.context.get_user_id(),
funding_txo: funding_txo.into_bitcoin_outpoint(),
channel_type: Some($channel.funding.get_channel_type().clone()),
funding_redeem_script,
},
None,
));
$channel.context.set_channel_pending_event_emitted();
}
};
}
macro_rules! emit_initial_channel_ready_event {
($locked_events: expr, $channel: expr) => {
if $channel.context.should_emit_initial_channel_ready_event() {
debug_assert!($channel.context.channel_pending_event_emitted());
$locked_events.push_back((
events::Event::ChannelReady {
channel_id: $channel.context.channel_id(),
user_channel_id: $channel.context.get_user_id(),
counterparty_node_id: $channel.context.get_counterparty_node_id(),
funding_txo: $channel
.funding
.get_funding_txo()
.map(|outpoint| outpoint.into_bitcoin_outpoint()),
channel_type: $channel.funding.get_channel_type().clone(),
},
None,
));
$channel.context.set_initial_channel_ready_event_emitted();
}
};
}
macro_rules! handle_monitor_update_completion {
($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => {{
let channel_id = $chan.context.channel_id();
let outbound_scid_alias = $chan.context().outbound_scid_alias();
let counterparty_node_id = $chan.context.get_counterparty_node_id();
#[cfg(debug_assertions)]
{
let in_flight_updates =
$peer_state.in_flight_monitor_updates.get(&channel_id);
assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true));
assert!($chan.is_awaiting_monitor_update());
}
let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
let update_actions = $peer_state.monitor_update_blocked_actions
.remove(&channel_id).unwrap_or(Vec::new());
if $chan.blocked_monitor_updates_pending() != 0 {
mem::drop($peer_state_lock);
mem::drop($per_peer_state_lock);
log_debug!(logger, "Channel has blocked monitor updates, completing update actions but leaving channel blocked");
$self.handle_monitor_update_completion_actions(update_actions);
} else {
log_debug!(logger, "Channel is open and awaiting update, resuming it");
let mut updates = $chan.monitor_updating_restored(
&&logger,
&$self.node_signer,
$self.chain_hash,
&*$self.config.read().unwrap(),
$self.best_block.read().unwrap().height,
|htlc_id| {
$self.path_for_release_held_htlc(htlc_id, outbound_scid_alias, &channel_id, &counterparty_node_id)
},
);
let channel_update = if updates.channel_ready.is_some()
&& $chan.context.is_usable()
&& $peer_state.is_connected
{
if let Ok(msg) = $self.get_channel_update_for_unicast($chan) {
Some(MessageSendEvent::SendChannelUpdate { node_id: counterparty_node_id, msg })
} else {
None
}
} else {
None
};
let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption(
&mut $peer_state.pending_msg_events,
$chan,
updates.raa,
updates.commitment_update,
updates.commitment_order,
updates.accepted_htlcs,
updates.pending_update_adds,
updates.funding_broadcastable,
updates.channel_ready,
updates.announcement_sigs,
updates.tx_signatures,
None,
updates.channel_ready_order,
);
if let Some(upd) = channel_update {
$peer_state.pending_msg_events.push(upd);
}
let unbroadcasted_batch_funding_txid =
$chan.context.unbroadcasted_batch_funding_txid(&$chan.funding);
core::mem::drop($peer_state_lock);
core::mem::drop($per_peer_state_lock);
if let Some(txid) = unbroadcasted_batch_funding_txid {
let mut funding_batch_states = $self.funding_batch_states.lock().unwrap();
let mut batch_completed = false;
if let Some(batch_state) = funding_batch_states.get_mut(&txid) {
let channel_state = batch_state.iter_mut().find(|(chan_id, pubkey, _)| (
*chan_id == channel_id &&
*pubkey == counterparty_node_id
));
if let Some(channel_state) = channel_state {
channel_state.2 = true;
} else {
debug_assert!(false, "Missing channel batch state for channel which completed initial monitor update");
}
batch_completed = batch_state.iter().all(|(_, _, completed)| *completed);
} else {
debug_assert!(false, "Missing batch state for channel which completed initial monitor update");
}
if batch_completed {
let removed_batch_state = funding_batch_states.remove(&txid).into_iter().flatten();
let per_peer_state = $self.per_peer_state.read().unwrap();
let mut batch_funding_tx = None;
for (channel_id, counterparty_node_id, _) in removed_batch_state {
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state = peer_state_mutex.lock().unwrap();
if let Some(funded_chan) = peer_state.channel_by_id
.get_mut(&channel_id)
.and_then(Channel::as_funded_mut)
{
batch_funding_tx = batch_funding_tx.or_else(|| funded_chan.context.unbroadcasted_funding(&funded_chan.funding));
funded_chan.set_batch_ready();
let mut pending_events = $self.pending_events.lock().unwrap();
emit_channel_pending_event!(pending_events, funded_chan);
}
}
}
if let Some(tx) = batch_funding_tx {
log_info!($self.logger, "Broadcasting batch funding transaction with txid {}", tx.compute_txid());
$self.tx_broadcaster.broadcast_transactions(&[&tx]);
}
}
}
$self.handle_monitor_update_completion_actions(update_actions);
if let Some(forwards) = htlc_forwards {
$self.forward_htlcs(&mut [forwards][..]);
}
if let Some(decode) = decode_update_add_htlcs {
$self.push_decode_update_add_htlcs(decode);
}
$self.finalize_claims(updates.finalized_claimed_htlcs);
for failure in updates.failed_htlcs.drain(..) {
let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id), channel_id };
$self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver, None);
}
}
}};
}
macro_rules! handle_new_monitor_update {
($self: ident, $update_res: expr, $logger: expr, $channel_id: expr, _internal, $completed: expr) => { {
debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire));
match $update_res {
ChannelMonitorUpdateStatus::UnrecoverableError => {
let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
log_error!($logger, "{}", err_str);
panic!("{}", err_str);
},
ChannelMonitorUpdateStatus::InProgress => {
#[cfg(not(any(test, feature = "_externalize_tests")))]
if $self.monitor_update_type.swap(1, Ordering::Relaxed) == 2 {
panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart");
}
log_debug!($logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
$channel_id);
false
},
ChannelMonitorUpdateStatus::Completed => {
#[cfg(not(any(test, feature = "_externalize_tests")))]
if $self.monitor_update_type.swap(2, Ordering::Relaxed) == 1 {
panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart");
}
$completed;
true
},
}
} };
($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, INITIAL_MONITOR) => {
let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
handle_new_monitor_update!($self, $update_res, logger, $chan.context.channel_id(), _internal,
handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan))
};
(
$self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $logger: expr,
$chan_id: expr, $counterparty_node_id: expr, $in_flight_updates: ident, $update_idx: ident,
_internal_outer, $completed: expr
) => { {
$in_flight_updates = &mut $peer_state.in_flight_monitor_updates.entry($chan_id)
.or_insert_with(|| ($funding_txo, Vec::new())).1;
$update_idx = $in_flight_updates.iter().position(|upd| upd == &$update)
.unwrap_or_else(|| {
$in_flight_updates.push($update);
$in_flight_updates.len() - 1
});
if $self.background_events_processed_since_startup.load(Ordering::Acquire) {
let update_res = $self.chain_monitor.update_channel($chan_id, &$in_flight_updates[$update_idx]);
handle_new_monitor_update!($self, update_res, $logger, $chan_id, _internal, $completed)
} else {
let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id: $counterparty_node_id,
funding_txo: $funding_txo,
channel_id: $chan_id,
update: $in_flight_updates[$update_idx].clone(),
};
$self.pending_background_events.lock().unwrap().push(event);
false
}
} };
(
$self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $chan_context: expr,
REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER
) => { {
let logger = WithChannelContext::from(&$self.logger, &$chan_context, None);
let chan_id = $chan_context.channel_id();
let counterparty_node_id = $chan_context.get_counterparty_node_id();
let in_flight_updates;
let idx;
handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
counterparty_node_id, in_flight_updates, idx, _internal_outer,
{
let _ = in_flight_updates.remove(idx);
})
} };
(
$self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
$per_peer_state_lock: expr, $counterparty_node_id: expr, $channel_id: expr, POST_CHANNEL_CLOSE
) => { {
let logger = WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None);
let in_flight_updates;
let idx;
handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger,
$channel_id, $counterparty_node_id, in_flight_updates, idx, _internal_outer,
{
let _ = in_flight_updates.remove(idx);
if in_flight_updates.is_empty() {
let update_actions = $peer_state.monitor_update_blocked_actions
.remove(&$channel_id).unwrap_or(Vec::new());
mem::drop($peer_state_lock);
mem::drop($per_peer_state_lock);
$self.handle_monitor_update_completion_actions(update_actions);
}
})
} };
(
$self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
$per_peer_state_lock: expr, $chan: expr
) => { {
let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
let chan_id = $chan.context.channel_id();
let counterparty_node_id = $chan.context.get_counterparty_node_id();
let in_flight_updates;
let idx;
handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
counterparty_node_id, in_flight_updates, idx, _internal_outer,
{
let _ = in_flight_updates.remove(idx);
if in_flight_updates.is_empty() {
handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan);
}
})
} };
}
#[rustfmt::skip]
macro_rules! process_events_body {
($self: expr, $event_to_handle: expr, $handle_event: expr) => {
let mut handling_failed = false;
let mut processed_all_events = false;
while !handling_failed && !processed_all_events {
if $self.pending_events_processor.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed).is_err() {
return;
}
let mut result;
{
let _read_guard = $self.total_consistency_lock.read().unwrap();
result = $self.process_background_events();
if $self.process_pending_monitor_events() {
result = NotifyOption::DoPersist;
}
}
let pending_events = $self.pending_events.lock().unwrap().clone();
if !pending_events.is_empty() {
result = NotifyOption::DoPersist;
}
let mut post_event_actions = Vec::new();
let mut num_handled_events = 0;
for (event, action_opt) in pending_events {
log_trace!($self.logger, "Handling event {:?}...", event);
$event_to_handle = event;
let event_handling_result = $handle_event;
log_trace!($self.logger, "Done handling event, result: {:?}", event_handling_result);
match event_handling_result {
Ok(()) => {
if let Some(action) = action_opt {
post_event_actions.push(action);
}
num_handled_events += 1;
}
Err(_e) => {
handling_failed = true;
break;
}
}
}
{
let mut pending_events = $self.pending_events.lock().unwrap();
pending_events.drain(..num_handled_events);
processed_all_events = pending_events.is_empty();
$self.pending_events_processor.store(false, Ordering::Release);
}
if !post_event_actions.is_empty() {
$self.handle_post_event_actions(post_event_actions);
processed_all_events = false;
}
match result {
NotifyOption::DoPersist => {
$self.needs_persist_flag.store(true, Ordering::Release);
$self.event_persist_notifier.notify();
},
NotifyOption::SkipPersistHandleEvents =>
$self.event_persist_notifier.notify(),
NotifyOption::SkipPersistNoEvents => {},
}
}
}
}
impl<
M: Deref,
T: Deref,
ES: Deref,
NS: Deref,
SP: Deref,
F: Deref,
R: Deref,
MR: Deref,
L: Deref,
> ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
#[rustfmt::skip]
pub fn new(
fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, message_router: MR, logger: L,
entropy_source: ES, node_signer: NS, signer_provider: SP, config: UserConfig,
params: ChainParameters, current_timestamp: u32,
) -> Self
where
L: Clone,
{
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
let expanded_inbound_key = node_signer.get_expanded_key();
let our_network_pubkey = node_signer.get_node_id(Recipient::Node).unwrap();
let flow = OffersMessageFlow::new(
ChainHash::using_genesis_block(params.network), params.best_block,
our_network_pubkey, current_timestamp, expanded_inbound_key,
node_signer.get_receive_auth_key(), secp_ctx.clone(), message_router, logger.clone(),
);
ChannelManager {
config: RwLock::new(config),
chain_hash: ChainHash::using_genesis_block(params.network),
fee_estimator: LowerBoundedFeeEstimator::new(fee_est),
chain_monitor,
tx_broadcaster,
router,
flow,
best_block: RwLock::new(params.best_block),
outbound_scid_aliases: Mutex::new(new_hash_set()),
pending_outbound_payments: OutboundPayments::new(new_hash_map()),
forward_htlcs: Mutex::new(new_hash_map()),
decode_update_add_htlcs: Mutex::new(new_hash_map()),
claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }),
pending_intercepted_htlcs: Mutex::new(new_hash_map()),
short_to_chan_info: FairRwLock::new(new_hash_map()),
our_network_pubkey,
secp_ctx,
inbound_payment_key: expanded_inbound_key,
fake_scid_rand_bytes: entropy_source.get_secure_random_bytes(),
probing_cookie_secret: entropy_source.get_secure_random_bytes(),
inbound_payment_id_secret: entropy_source.get_secure_random_bytes(),
highest_seen_timestamp: AtomicUsize::new(current_timestamp as usize),
per_peer_state: FairRwLock::new(new_hash_map()),
#[cfg(not(any(test, feature = "_externalize_tests")))]
monitor_update_type: AtomicUsize::new(0),
pending_events: Mutex::new(VecDeque::new()),
pending_events_processor: AtomicBool::new(false),
pending_htlc_forwards_processor: AtomicBool::new(false),
pending_background_events: Mutex::new(Vec::new()),
total_consistency_lock: RwLock::new(()),
background_events_processed_since_startup: AtomicBool::new(false),
event_persist_notifier: Notifier::new(),
needs_persist_flag: AtomicBool::new(false),
funding_batch_states: Mutex::new(BTreeMap::new()),
pending_broadcast_messages: Mutex::new(Vec::new()),
last_days_feerates: Mutex::new(VecDeque::new()),
entropy_source,
node_signer,
signer_provider,
logger,
#[cfg(feature = "_test_utils")]
testing_dnssec_proof_offer_resolution_override: Mutex::new(new_hash_map()),
}
}
pub fn get_current_config(&self) -> UserConfig {
self.config.read().unwrap().clone()
}
pub fn set_current_config(&self, new_config: UserConfig) {
*self.config.write().unwrap() = new_config;
}
#[cfg(test)]
pub fn create_and_insert_outbound_scid_alias_for_test(&self) -> u64 {
self.create_and_insert_outbound_scid_alias()
}
fn create_and_insert_outbound_scid_alias(&self) -> u64 {
let height = self.best_block.read().unwrap().height;
let mut outbound_scid_alias = 0;
let mut i = 0;
loop {
if cfg!(fuzzing) {
outbound_scid_alias += 1;
} else {
outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(
height,
&self.chain_hash,
&self.fake_scid_rand_bytes,
&self.entropy_source,
);
}
if outbound_scid_alias != 0
&& self.outbound_scid_aliases.lock().unwrap().insert(outbound_scid_alias)
{
break;
}
i += 1;
if i > 1_000_000 {
panic!("Your RNG is busted or we ran out of possible outbound SCID aliases (which should never happen before we run out of memory to store channels");
}
}
outbound_scid_alias
}
#[rustfmt::skip]
pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u128, temporary_channel_id: Option<ChannelId>, override_config: Option<UserConfig>) -> Result<ChannelId, APIError> {
if channel_value_satoshis < 1000 {
return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
}
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
debug_assert!(&self.total_consistency_lock.try_write().is_err());
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(&their_network_key)
.ok_or_else(|| APIError::APIMisuseError{ err: format!("Not connected to node: {}", their_network_key) })?;
let mut peer_state = peer_state_mutex.lock().unwrap();
if !peer_state.is_connected {
return Err(APIError::APIMisuseError{ err: format!("Not connected to node: {}", their_network_key) });
}
if let Some(temporary_channel_id) = temporary_channel_id {
if peer_state.channel_by_id.contains_key(&temporary_channel_id) {
return Err(APIError::APIMisuseError{ err: format!("Channel with temporary channel ID {} already exists!", temporary_channel_id)});
}
}
let mut channel = {
let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
let their_features = &peer_state.latest_features;
let config = self.config.read().unwrap();
let config = if let Some(config) = &override_config {
config
} else {
&*config
};
match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key,
their_features, channel_value_satoshis, push_msat, user_channel_id, config,
self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id, &*self.logger)
{
Ok(res) => res,
Err(e) => {
self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
return Err(e);
},
}
};
let logger = WithChannelContext::from(&self.logger, &channel.context, None);
let res = channel.get_open_channel(self.chain_hash, &&logger);
let temporary_channel_id = channel.context.channel_id();
match peer_state.channel_by_id.entry(temporary_channel_id) {
hash_map::Entry::Occupied(_) => {
if cfg!(fuzzing) {
return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
} else {
panic!("RNG is bad???");
}
},
hash_map::Entry::Vacant(entry) => { entry.insert(Channel::from(channel)); }
}
if let Some(msg) = res {
peer_state.pending_msg_events.push(MessageSendEvent::SendOpenChannel {
node_id: their_network_key,
msg,
});
}
Ok(temporary_channel_id)
}
fn list_funded_channels_with_filter<
Fn: FnMut(&(&InitFeatures, &ChannelId, &Channel<SP>)) -> bool,
>(
&self, mut f: Fn,
) -> Vec<ChannelDetails> {
let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
{
let best_block_height = self.best_block.read().unwrap().height;
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let filtered_chan_by_id = peer_state
.channel_by_id
.iter()
.map(|(cid, c)| (&peer_state.latest_features, cid, c))
.filter(|(_, _, chan)| chan.is_funded())
.filter(|v| f(v));
res.extend(filtered_chan_by_id.map(|(_, _channel_id, channel)| {
ChannelDetails::from_channel(
channel,
best_block_height,
peer_state.latest_features.clone(),
&self.fee_estimator,
)
}));
}
}
res
}
pub fn list_channels(&self) -> Vec<ChannelDetails> {
let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
{
let best_block_height = self.best_block.read().unwrap().height;
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for (_, channel) in peer_state.channel_by_id.iter() {
let details = ChannelDetails::from_channel(
channel,
best_block_height,
peer_state.latest_features.clone(),
&self.fee_estimator,
);
res.push(details);
}
}
}
res
}
pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
self.list_funded_channels_with_filter(|&(_, _, ref channel)| channel.context().is_live())
}
pub fn list_channels_with_counterparty(
&self, counterparty_node_id: &PublicKey,
) -> Vec<ChannelDetails> {
let best_block_height = self.best_block.read().unwrap().height;
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let features = &peer_state.latest_features;
let channel_to_details = |channel| {
ChannelDetails::from_channel(
channel,
best_block_height,
features.clone(),
&self.fee_estimator,
)
};
let chan_by_id = peer_state.channel_by_id.iter();
return chan_by_id.map(|(_, chan)| chan).map(channel_to_details).collect();
}
vec![]
}
#[rustfmt::skip]
pub fn list_recent_payments(&self) -> Vec<RecentPaymentDetails> {
self.pending_outbound_payments.pending_outbound_payments.lock().unwrap().iter()
.filter_map(|(payment_id, pending_outbound_payment)| match pending_outbound_payment {
PendingOutboundPayment::AwaitingInvoice { .. }
| PendingOutboundPayment::AwaitingOffer { .. }
| PendingOutboundPayment::InvoiceReceived { .. } =>
{
Some(RecentPaymentDetails::AwaitingInvoice { payment_id: *payment_id })
},
PendingOutboundPayment::StaticInvoiceReceived { .. } => {
Some(RecentPaymentDetails::AwaitingInvoice { payment_id: *payment_id })
},
PendingOutboundPayment::Retryable { payment_hash, total_msat, .. } => {
Some(RecentPaymentDetails::Pending {
payment_id: *payment_id,
payment_hash: *payment_hash,
total_msat: *total_msat,
})
},
PendingOutboundPayment::Abandoned { payment_hash, .. } => {
Some(RecentPaymentDetails::Abandoned { payment_id: *payment_id, payment_hash: *payment_hash })
},
PendingOutboundPayment::Fulfilled { payment_hash, .. } => {
Some(RecentPaymentDetails::Fulfilled { payment_id: *payment_id, payment_hash: *payment_hash })
},
PendingOutboundPayment::Legacy { .. } => None
})
.collect()
}
#[rustfmt::skip]
fn close_channel_internal(&self, chan_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new();
let mut shutdown_result = Ok(());
{
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}") })?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(*chan_id) {
hash_map::Entry::Occupied(mut chan_entry) => {
if !chan_entry.get().context().is_connected() {
return Err(APIError::ChannelUnavailable {
err: "Cannot begin shutdown while peer is disconnected, maybe force-close instead?".to_owned(),
});
}
if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
let funding_txo_opt = chan.funding.get_funding_txo();
let their_features = &peer_state.latest_features;
let (shutdown_msg, mut monitor_update_opt, htlcs) =
chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
failed_htlcs = htlcs;
peer_state.pending_msg_events.push(MessageSendEvent::SendShutdown {
node_id: *counterparty_node_id,
msg: shutdown_msg,
});
debug_assert!(monitor_update_opt.is_none() || !chan.is_shutdown(),
"We can't both complete shutdown and generate a monitor update");
if let Some(monitor_update) = monitor_update_opt.take() {
handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
peer_state_lock, peer_state, per_peer_state, chan);
}
} else {
let reason = ClosureReason::LocallyCoopClosedUnfundedChannel;
let err = ChannelError::Close((reason.to_string(), reason));
let mut chan = chan_entry.remove();
let (_, mut e) = convert_channel_err!(self, peer_state, err, &mut chan);
e.dont_send_error_message();
shutdown_result = Err(e);
}
},
hash_map::Entry::Vacant(_) => {
return Err(APIError::ChannelUnavailable {
err: format!(
"Channel with id {} not found for the passed counterparty node_id {}",
chan_id, counterparty_node_id,
)
});
},
}
}
for htlc_source in failed_htlcs.drain(..) {
let failure_reason = LocalHTLCFailureReason::ChannelClosed;
let reason = HTLCFailReason::from_failure_code(failure_reason);
let receiver = HTLCHandlingFailureType::Forward { node_id: Some(*counterparty_node_id), channel_id: *chan_id };
let (source, hash) = htlc_source;
self.fail_htlc_backwards_internal(&source, &hash, &reason, receiver, None);
}
let _ = handle_error!(self, shutdown_result, *counterparty_node_id);
Ok(())
}
pub fn close_channel(
&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey,
) -> Result<(), APIError> {
self.close_channel_internal(channel_id, counterparty_node_id, None, None)
}
pub fn close_channel_with_feerate_and_script(
&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey,
target_feerate_sats_per_1000_weight: Option<u32>, shutdown_script: Option<ShutdownScript>,
) -> Result<(), APIError> {
self.close_channel_internal(
channel_id,
counterparty_node_id,
target_feerate_sats_per_1000_weight,
shutdown_script,
)
}
#[rustfmt::skip]
fn apply_post_close_monitor_update(
&self, counterparty_node_id: PublicKey, channel_id: ChannelId, funding_txo: OutPoint,
monitor_update: ChannelMonitorUpdate,
) {
let per_peer_state = self.per_peer_state.read().unwrap();
let mut peer_state_lock = per_peer_state.get(&counterparty_node_id)
.expect("We must always have a peer entry for a peer with which we have channels that have ChannelMonitors")
.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(channel_id) {
hash_map::Entry::Occupied(mut chan_entry) => {
if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
handle_new_monitor_update!(self, funding_txo,
monitor_update, peer_state_lock, peer_state, per_peer_state, chan);
return;
} else {
debug_assert!(false, "We shouldn't have an update for a non-funded channel");
}
},
hash_map::Entry::Vacant(_) => {},
}
handle_new_monitor_update!(
self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
counterparty_node_id, channel_id, POST_CHANNEL_CLOSE
);
}
#[rustfmt::skip]
fn finish_close_channel(&self, mut shutdown_res: ShutdownResult) {
debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
#[cfg(debug_assertions)]
for (_, peer) in self.per_peer_state.read().unwrap().iter() {
debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
}
let logger = WithContext::from(
&self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id), None
);
log_debug!(logger, "Finishing closure of channel due to {} with {} HTLCs to fail",
shutdown_res.closure_reason, shutdown_res.dropped_outbound_htlcs.len());
for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
let failure_reason = LocalHTLCFailureReason::ChannelClosed;
let reason = HTLCFailReason::from_failure_code(failure_reason);
let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id), channel_id };
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver, None);
}
if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
debug_assert!(false, "This should have been handled in `locked_close_channel`");
self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
}
if self.background_events_processed_since_startup.load(Ordering::Acquire) {
if shutdown_res.channel_funding_txo.is_some() {
self.channel_monitor_updated(&shutdown_res.channel_id, None, &shutdown_res.counterparty_node_id);
}
}
let mut shutdown_results: Vec<(Result<Infallible, _>, _)> = Vec::new();
if let Some(txid) = shutdown_res.unbroadcasted_batch_funding_txid {
let mut funding_batch_states = self.funding_batch_states.lock().unwrap();
let affected_channels = funding_batch_states.remove(&txid).into_iter().flatten();
let per_peer_state = self.per_peer_state.read().unwrap();
let mut has_uncompleted_channel = None;
for (channel_id, counterparty_node_id, state) in affected_channels {
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state = peer_state_mutex.lock().unwrap();
if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) {
let reason = ClosureReason::FundingBatchClosure;
let err = ChannelError::Close((reason.to_string(), reason));
let (_, e) = convert_channel_err!(self, peer_state, err, &mut chan);
shutdown_results.push((Err(e), counterparty_node_id));
}
}
has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state));
}
debug_assert!(
has_uncompleted_channel.unwrap_or(true),
"Closing a batch where all channels have completed initial monitor update",
);
}
{
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push_back((events::Event::ChannelClosed {
channel_id: shutdown_res.channel_id,
user_channel_id: shutdown_res.user_channel_id,
reason: shutdown_res.closure_reason,
counterparty_node_id: Some(shutdown_res.counterparty_node_id),
channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis),
channel_funding_txo: shutdown_res.channel_funding_txo,
last_local_balance_msat: Some(shutdown_res.last_local_balance_msat),
}, None));
if let Some(splice_funding_failed) = shutdown_res.splice_funding_failed.take() {
pending_events.push_back((events::Event::SpliceFailed {
channel_id: shutdown_res.channel_id,
counterparty_node_id: shutdown_res.counterparty_node_id,
user_channel_id: shutdown_res.user_channel_id,
abandoned_funding_txo: splice_funding_failed.funding_txo,
channel_type: splice_funding_failed.channel_type,
contributed_inputs: splice_funding_failed.contributed_inputs,
contributed_outputs: splice_funding_failed.contributed_outputs,
}, None));
}
if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx {
let funding_info = if shutdown_res.is_manual_broadcast {
FundingInfo::OutPoint {
outpoint: shutdown_res.channel_funding_txo
.expect("We had an unbroadcasted funding tx, so should also have had a funding outpoint"),
}
} else {
FundingInfo::Tx{ transaction }
};
pending_events.push_back((events::Event::DiscardFunding {
channel_id: shutdown_res.channel_id, funding_info
}, None));
}
}
for (err, counterparty_node_id) in shutdown_results.drain(..) {
let _ = handle_error!(self, err, counterparty_node_id);
}
}
#[rustfmt::skip]
fn force_close_channel_with_peer(&self, channel_id: &ChannelId, peer_node_id: &PublicKey, reason: ClosureReason)
-> Result<(), APIError> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(peer_node_id)
.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None);
let is_from_counterparty = matches!(reason, ClosureReason::CounterpartyForceClosed { .. });
let message = match &reason {
ClosureReason::HolderForceClosed { message, .. } => message.clone(),
_ => reason.to_string(),
};
if let Some(mut chan) = peer_state.channel_by_id.remove(channel_id) {
log_error!(logger, "Force-closing channel {}", channel_id);
let err = ChannelError::Close((message, reason));
let (_, mut e) = convert_channel_err!(self, peer_state, err, &mut chan);
mem::drop(peer_state_lock);
mem::drop(per_peer_state);
if is_from_counterparty {
e.dont_send_error_message();
}
let _ = handle_error!(self, Err::<(), _>(e), *peer_node_id);
Ok(())
} else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() {
log_error!(logger, "Force-closing inbound channel request {}", &channel_id);
if !is_from_counterparty && peer_state.is_connected {
peer_state.pending_msg_events.push(
MessageSendEvent::HandleError {
node_id: *peer_node_id,
action: msgs::ErrorAction::SendErrorMessage {
msg: msgs::ErrorMessage { channel_id: *channel_id, data: message }
},
}
);
}
Ok(())
} else {
Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, peer_node_id) })
}
}
#[rustfmt::skip]
fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String)
-> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
log_debug!(self.logger,
"Force-closing channel, The error message sent to the peer : {}", error_message);
let reason = ClosureReason::HolderForceClosed {
broadcasted_latest_txn: Some(true),
message: error_message,
};
self.force_close_channel_with_peer(channel_id, &counterparty_node_id, reason)
}
pub fn force_close_broadcasting_latest_txn(
&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String,
) -> Result<(), APIError> {
self.force_close_sending_error(channel_id, counterparty_node_id, error_message)
}
pub fn force_close_all_channels_broadcasting_latest_txn(&self, error_message: String) {
for chan in self.list_channels() {
let _ = self.force_close_broadcasting_latest_txn(
&chan.channel_id,
&chan.counterparty.node_id,
error_message.clone(),
);
}
}
#[rustfmt::skip]
pub fn splice_channel(
&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey,
contribution: SpliceContribution, funding_feerate_per_kw: u32, locktime: Option<u32>,
) -> Result<(), APIError> {
let mut res = Ok(());
PersistenceNotifierGuard::optionally_notify(self, || {
let result = self.internal_splice_channel(
channel_id, counterparty_node_id, contribution, funding_feerate_per_kw, locktime
);
res = result;
match res {
Ok(_) => NotifyOption::DoPersist,
Err(_) => NotifyOption::SkipPersistNoEvents,
}
});
res
}
fn internal_splice_channel(
&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey,
contribution: SpliceContribution, funding_feerate_per_kw: u32, locktime: Option<u32>,
) -> Result<(), APIError> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = match per_peer_state.get(counterparty_node_id).ok_or_else(|| {
APIError::ChannelUnavailable {
err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"),
}
}) {
Ok(p) => p,
Err(e) => return Err(e),
};
let mut peer_state = peer_state_mutex.lock().unwrap();
if !peer_state.latest_features.supports_splicing() {
return Err(APIError::ChannelUnavailable {
err: "Peer does not support splicing".to_owned(),
});
}
if !peer_state.latest_features.supports_quiescence() {
return Err(APIError::ChannelUnavailable {
err: "Peer does not support quiescence, a splicing prerequisite".to_owned(),
});
}
match peer_state.channel_by_id.entry(*channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
let locktime = locktime.unwrap_or_else(|| self.current_best_block().height);
if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let msg_opt = chan.splice_channel(
contribution,
funding_feerate_per_kw,
locktime,
&&logger,
)?;
if let Some(msg) = msg_opt {
peer_state.pending_msg_events.push(MessageSendEvent::SendStfu {
node_id: *counterparty_node_id,
msg,
});
}
Ok(())
} else {
Err(APIError::ChannelUnavailable {
err: format!(
"Channel with id {} is not funded, cannot splice it",
channel_id
),
})
}
},
hash_map::Entry::Vacant(_) => Err(APIError::ChannelUnavailable {
err: format!(
"Channel with id {} not found for the passed counterparty node_id {}",
channel_id, counterparty_node_id,
),
}),
}
}
#[cfg(test)]
pub(crate) fn abandon_splice(
&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey,
) -> Result<(), APIError> {
let mut res = Ok(());
PersistenceNotifierGuard::optionally_notify(self, || {
let result = self.internal_abandon_splice(channel_id, counterparty_node_id);
res = result;
match res {
Ok(_) => NotifyOption::SkipPersistHandleEvents,
Err(_) => NotifyOption::SkipPersistNoEvents,
}
});
res
}
#[cfg(test)]
fn internal_abandon_splice(
&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey,
) -> Result<(), APIError> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = match per_peer_state.get(counterparty_node_id).ok_or_else(|| {
APIError::ChannelUnavailable {
err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"),
}
}) {
Ok(p) => p,
Err(e) => return Err(e),
};
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(*channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if !chan_phase_entry.get().context().is_connected() {
return Err(APIError::ChannelUnavailable {
err: "Cannot abandon splice while peer is disconnected".to_owned(),
});
}
if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() {
let (tx_abort, splice_funding_failed) = chan.abandon_splice()?;
peer_state.pending_msg_events.push(MessageSendEvent::SendTxAbort {
node_id: *counterparty_node_id,
msg: tx_abort,
});
if let Some(splice_funding_failed) = splice_funding_failed {
let pending_events = &mut self.pending_events.lock().unwrap();
pending_events.push_back((
events::Event::SpliceFailed {
channel_id: *channel_id,
counterparty_node_id: *counterparty_node_id,
user_channel_id: chan.context.get_user_id(),
abandoned_funding_txo: splice_funding_failed.funding_txo,
channel_type: splice_funding_failed.channel_type,
contributed_inputs: splice_funding_failed.contributed_inputs,
contributed_outputs: splice_funding_failed.contributed_outputs,
},
None,
));
}
Ok(())
} else {
Err(APIError::ChannelUnavailable {
err: format!(
"Channel with id {} is not funded, cannot abandon splice",
channel_id
),
})
}
},
hash_map::Entry::Vacant(_) => Err(APIError::ChannelUnavailable {
err: format!(
"Channel with id {} not found for the passed counterparty node_id {}",
channel_id, counterparty_node_id,
),
}),
}
}
#[rustfmt::skip]
fn can_forward_htlc_to_outgoing_channel(
&self, chan: &mut FundedChannel<SP>, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails
) -> Result<(), LocalHTLCFailureReason> {
if !chan.context.should_announce()
&& !self.config.read().unwrap().accept_forwards_to_priv_channels
{
return Err(LocalHTLCFailureReason::PrivateChannelForward);
}
if let HopConnector::ShortChannelId(outgoing_scid) = next_packet.outgoing_connector {
if chan.funding.get_channel_type().supports_scid_privacy() && outgoing_scid != chan.context.outbound_scid_alias() {
return Err(LocalHTLCFailureReason::RealSCIDForward);
}
} else {
return Err(LocalHTLCFailureReason::InvalidTrampolineForward);
}
if !chan.context.is_live() {
if !chan.context.is_enabled() {
return Err(LocalHTLCFailureReason::ChannelDisabled);
} else {
return Err(LocalHTLCFailureReason::ChannelNotReady);
}
}
if next_packet.outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() {
return Err(LocalHTLCFailureReason::AmountBelowMinimum);
}
chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value)?;
Ok(())
}
fn do_funded_channel_callback<X, C: Fn(&mut FundedChannel<SP>) -> X>(
&self, scid: u64, callback: C,
) -> Option<X> {
let (counterparty_node_id, channel_id) =
match self.short_to_chan_info.read().unwrap().get(&scid).cloned() {
None => return None,
Some((cp_id, id)) => (cp_id, id),
};
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
if peer_state_mutex_opt.is_none() {
return None;
}
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.get_mut(&channel_id).and_then(Channel::as_funded_mut) {
None => None,
Some(chan) => Some(callback(chan)),
}
}
#[rustfmt::skip]
fn can_forward_htlc(
&self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails
) -> Result<(), LocalHTLCFailureReason> {
let outgoing_scid = match next_packet_details.outgoing_connector {
HopConnector::ShortChannelId(scid) => scid,
HopConnector::Trampoline(_) => {
return Err(LocalHTLCFailureReason::InvalidTrampolineForward);
}
};
match self.do_funded_channel_callback(outgoing_scid, |chan: &mut FundedChannel<SP>| {
self.can_forward_htlc_to_outgoing_channel(chan, msg, next_packet_details)
}) {
Some(Ok(())) => {},
Some(Err(e)) => return Err(e),
None => {
if (self.config.read().unwrap().accept_intercept_htlcs &&
fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)) ||
fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)
{} else {
return Err(LocalHTLCFailureReason::UnknownNextPeer);
}
}
}
let cur_height = self.best_block.read().unwrap().height + 1;
check_incoming_htlc_cltv(cur_height, next_packet_details.outgoing_cltv_value, msg.cltv_expiry)?;
Ok(())
}
#[rustfmt::skip]
fn htlc_failure_from_update_add_err(
&self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey,
reason: LocalHTLCFailureReason, is_intro_node_blinded_forward: bool,
shared_secret: &[u8; 32]
) -> HTLCFailureMsg {
let mut res = VecWriter(Vec::with_capacity(8 + 2));
if reason.is_temporary() {
if reason == LocalHTLCFailureReason::AmountBelowMinimum ||
reason == LocalHTLCFailureReason::FeeInsufficient {
msg.amount_msat.write(&mut res).expect("Writes cannot fail");
}
else if reason == LocalHTLCFailureReason::IncorrectCLTVExpiry {
msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
}
else if reason == LocalHTLCFailureReason::ChannelDisabled {
0u16.write(&mut res).expect("Writes cannot fail");
}
(0u16).write(&mut res).expect("Writes cannot fail");
}
log_info!(
WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash)),
"Failed to accept/forward incoming HTLC: {:?}", reason,
);
if msg.blinding_point.is_some() {
return HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
channel_id: msg.channel_id,
htlc_id: msg.htlc_id,
sha256_of_onion: [0; 32],
failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(),
});
}
let (reason, err_data) = if is_intro_node_blinded_forward {
(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32][..])
} else {
(reason, &res.0[..])
};
let failure = HTLCFailReason::reason(reason, err_data.to_vec())
.get_encrypted_failure_packet(shared_secret, &None);
HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
channel_id: msg.channel_id,
htlc_id: msg.htlc_id,
reason: failure.data,
attribution_data: failure.attribution_data,
})
}
#[rustfmt::skip]
fn construct_pending_htlc_fail_msg<'a>(
&self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey,
shared_secret: [u8; 32], inbound_err: InboundHTLCErr
) -> HTLCFailureMsg {
let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash));
log_info!(logger, "Failed to accept/forward incoming HTLC: {}", inbound_err.msg);
if msg.blinding_point.is_some() {
return HTLCFailureMsg::Malformed(
msgs::UpdateFailMalformedHTLC {
channel_id: msg.channel_id,
htlc_id: msg.htlc_id,
sha256_of_onion: [0; 32],
failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(),
}
)
}
let failure = HTLCFailReason::reason(inbound_err.reason, inbound_err.err_data.to_vec())
.get_encrypted_failure_packet(&shared_secret, &None);
return HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
channel_id: msg.channel_id,
htlc_id: msg.htlc_id,
reason: failure.data,
attribution_data: failure.attribution_data,
});
}
#[rustfmt::skip]
fn get_pending_htlc_info<'a>(
&self, msg: &msgs::UpdateAddHTLC, shared_secret: [u8; 32],
decoded_hop: onion_utils::Hop, allow_underpay: bool,
next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>,
) -> Result<PendingHTLCInfo, InboundHTLCErr> {
match decoded_hop {
onion_utils::Hop::Receive { .. } | onion_utils::Hop::BlindedReceive { .. } |
onion_utils::Hop::TrampolineReceive { .. } | onion_utils::Hop::TrampolineBlindedReceive { .. } => {
let current_height: u32 = self.best_block.read().unwrap().height;
create_recv_pending_htlc_info(decoded_hop, shared_secret, msg.payment_hash,
msg.amount_msat, msg.cltv_expiry, None, allow_underpay, msg.skimmed_fee_msat,
current_height)
},
onion_utils::Hop::Forward { .. } | onion_utils::Hop::BlindedForward { .. } => {
create_fwd_pending_htlc_info(msg, decoded_hop, shared_secret, next_packet_pubkey_opt)
},
onion_utils::Hop::TrampolineForward { .. } | onion_utils::Hop::TrampolineBlindedForward { .. } => {
create_fwd_pending_htlc_info(msg, decoded_hop, shared_secret, next_packet_pubkey_opt)
},
}
}
fn get_channel_update_for_broadcast(
&self, chan: &FundedChannel<SP>,
) -> Result<msgs::ChannelUpdate, LightningError> {
if !chan.context.should_announce() {
return Err(LightningError {
err: "Cannot broadcast a channel_update for a private channel".to_owned(),
action: msgs::ErrorAction::IgnoreError,
});
}
if chan.funding.get_short_channel_id().is_none() {
return Err(LightningError {
err: "Channel not yet established".to_owned(),
action: msgs::ErrorAction::IgnoreError,
});
}
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(
logger,
"Attempting to generate broadcast channel update for channel {}",
&chan.context.channel_id()
);
self.get_channel_update_for_unicast(chan)
}
#[rustfmt::skip]
fn get_channel_update_for_unicast(&self, chan: &FundedChannel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Attempting to generate channel update for channel {}", chan.context.channel_id());
let short_channel_id = match chan.funding.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) {
None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
Some(id) => id,
};
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Generating channel update for channel {}", chan.context.channel_id());
let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
let enabled = chan.context.is_enabled();
let unsigned = msgs::UnsignedChannelUpdate {
chain_hash: self.chain_hash,
short_channel_id,
timestamp: chan.context.get_update_time_counter(),
message_flags: 1, channel_flags: (!were_node_one) as u8 | ((!enabled as u8) << 1),
cltv_expiry_delta: chan.context.get_cltv_expiry_delta(),
htlc_minimum_msat: chan.context.get_counterparty_htlc_minimum_msat(),
htlc_maximum_msat: chan.get_announced_htlc_max_msat(),
fee_base_msat: chan.context.get_outbound_forwarding_fee_base_msat(),
fee_proportional_millionths: chan.context.get_fee_proportional_millionths(),
excess_data: Vec::new(),
};
let sig = self.node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelUpdate(&unsigned)).unwrap();
Ok(msgs::ChannelUpdate {
signature: sig,
contents: unsigned
})
}
#[cfg(any(test, feature = "_externalize_tests"))]
pub(crate) fn test_send_payment_along_path(
&self, path: &Path, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields,
total_value: u64, cur_height: u32, payment_id: PaymentId,
keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32],
) -> Result<(), APIError> {
let _lck = self.total_consistency_lock.read().unwrap();
self.send_payment_along_path(SendAlongPathArgs {
path,
payment_hash,
recipient_onion: &recipient_onion,
total_value,
cur_height,
payment_id,
keysend_preimage,
invoice_request: None,
bolt12_invoice: None,
session_priv_bytes,
hold_htlc_at_next_hop: false,
})
}
fn send_payment_along_path(&self, args: SendAlongPathArgs) -> Result<(), APIError> {
let SendAlongPathArgs {
path,
payment_hash,
recipient_onion,
total_value,
cur_height,
payment_id,
keysend_preimage,
invoice_request,
bolt12_invoice,
session_priv_bytes,
hold_htlc_at_next_hop,
} = args;
debug_assert!(self.total_consistency_lock.try_write().is_err());
let prng_seed = self.entropy_source.get_secure_random_bytes();
let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
let (onion_packet, htlc_msat, htlc_cltv) = onion_utils::create_payment_onion(
&self.secp_ctx,
&path,
&session_priv,
total_value,
recipient_onion,
cur_height,
payment_hash,
keysend_preimage,
invoice_request,
prng_seed,
)
.map_err(|e| {
let first_hop_key = Some(path.hops.first().unwrap().pubkey);
let logger = WithContext::from(&self.logger, first_hop_key, None, Some(*payment_hash));
log_error!(logger, "Failed to build an onion for path for payment hash {payment_hash}");
e
})?;
let err: Result<(), _> = loop {
let first_chan_scid = &path.hops.first().unwrap().short_channel_id;
let first_chan = self.short_to_chan_info.read().unwrap().get(first_chan_scid).cloned();
let (counterparty_node_id, id) = match first_chan {
None => {
let first_hop_key = Some(path.hops.first().unwrap().pubkey);
let logger =
WithContext::from(&self.logger, first_hop_key, None, Some(*payment_hash));
log_error!(logger, "Failed to find first-hop for payment hash {payment_hash}");
return Err(APIError::ChannelUnavailable {
err: "No channel available with first hop!".to_owned(),
});
},
Some((cp_id, chan_id)) => (cp_id, chan_id),
};
let logger = WithContext::from(
&self.logger,
Some(counterparty_node_id),
Some(id),
Some(*payment_hash),
);
log_trace!(
logger,
"Attempting to send payment with payment hash {payment_hash} along path with next hop {first_chan_scid}"
);
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(&counterparty_node_id).ok_or_else(|| {
APIError::ChannelUnavailable {
err: "No peer matching the path's first hop found!".to_owned(),
}
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(id) {
match chan_entry.get_mut().as_funded_mut() {
Some(chan) => {
if !chan.context.is_live() {
return Err(APIError::ChannelUnavailable {
err: "Peer for first hop currently disconnected".to_owned(),
});
}
let funding_txo = chan.funding.get_funding_txo().unwrap();
let logger = WithChannelContext::from(
&self.logger,
&chan.context,
Some(*payment_hash),
);
let htlc_source = HTLCSource::OutboundRoute {
path: path.clone(),
session_priv: session_priv.clone(),
first_hop_htlc_msat: htlc_msat,
payment_id,
bolt12_invoice: bolt12_invoice.cloned(),
};
let send_res = chan.send_htlc_and_commit(
htlc_msat,
*payment_hash,
htlc_cltv,
htlc_source,
onion_packet,
None,
hold_htlc_at_next_hop,
&self.fee_estimator,
&&logger,
);
match break_channel_entry!(self, peer_state, send_res, chan_entry) {
Some(monitor_update) => {
let ok = handle_new_monitor_update!(
self,
funding_txo,
monitor_update,
peer_state_lock,
peer_state,
per_peer_state,
chan
);
if !ok {
return Err(APIError::MonitorUpdateInProgress);
}
},
None => {},
}
},
None => {
return Err(APIError::ChannelUnavailable {
err: "Channel to first hop is unfunded".to_owned(),
})
},
};
} else {
return Err(APIError::ChannelUnavailable {
err: "No channel available with first hop!".to_owned(),
});
}
return Ok(());
};
match handle_error!(self, err, path.hops.first().unwrap().pubkey) {
Ok(_) => unreachable!(),
Err(e) => Err(APIError::ChannelUnavailable { err: e.err }),
}
}
#[rustfmt::skip]
pub fn send_payment_with_route(
&self, mut route: Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields,
payment_id: PaymentId
) -> Result<(), RetryableSendFailure> {
let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let route_params = route.route_params.clone().unwrap_or_else(|| {
let (payee_node_id, cltv_delta) = route.paths.first()
.and_then(|path| path.hops.last().map(|hop| (hop.pubkey, hop.cltv_expiry_delta as u32)))
.unwrap_or_else(|| (PublicKey::from_slice(&[2; 32]).unwrap(), MIN_FINAL_CLTV_EXPIRY_DELTA as u32));
let dummy_payment_params = PaymentParameters::from_node_id(payee_node_id, cltv_delta);
RouteParameters::from_payment_params_and_value(dummy_payment_params, route.get_total_amount())
});
if route.route_params.is_none() { route.route_params = Some(route_params.clone()); }
let router = FixedRouter::new(route);
let logger = WithContext::from(&self.logger, None, None, Some(payment_hash));
self.pending_outbound_payments
.send_payment(payment_hash, recipient_onion, payment_id, Retry::Attempts(0),
route_params, &&router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
&self.entropy_source, &self.node_signer, best_block_height,
&self.pending_events, |args| self.send_payment_along_path(args), &logger)
}
pub fn send_payment(
&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields,
payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry,
) -> Result<(), RetryableSendFailure> {
let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.send_payment(
payment_hash,
recipient_onion,
payment_id,
retry_strategy,
route_params,
&self.router,
self.list_usable_channels(),
|| self.compute_inflight_htlcs(),
&self.entropy_source,
&self.node_signer,
best_block_height,
&self.pending_events,
|args| self.send_payment_along_path(args),
&WithContext::from(&self.logger, None, None, Some(payment_hash)),
)
}
#[cfg(any(test, feature = "_externalize_tests"))]
pub(super) fn test_send_payment_internal(
&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields,
keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId,
recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>,
) -> Result<(), PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.test_send_payment_internal(
route,
payment_hash,
recipient_onion,
keysend_preimage,
payment_id,
recv_value_msat,
onion_session_privs,
&self.node_signer,
best_block_height,
|args| self.send_payment_along_path(args),
)
}
#[cfg(any(test, feature = "_externalize_tests"))]
pub(crate) fn test_add_new_pending_payment(
&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields,
payment_id: PaymentId, route: &Route,
) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height;
self.pending_outbound_payments.test_add_new_pending_payment(
payment_hash,
recipient_onion,
payment_id,
route,
None,
&self.entropy_source,
best_block_height,
)
}
#[cfg(test)]
pub(crate) fn test_modify_pending_payment<Fn>(&self, payment_id: &PaymentId, mut callback: Fn)
where
Fn: FnMut(&mut PendingOutboundPayment),
{
let mut outbounds =
self.pending_outbound_payments.pending_outbound_payments.lock().unwrap();
match outbounds.get_mut(payment_id) {
Some(outb) => callback(outb),
_ => panic!(),
}
}
#[cfg(test)]
pub(crate) fn test_set_payment_metadata(
&self, payment_id: PaymentId, new_payment_metadata: Option<Vec<u8>>,
) {
self.pending_outbound_payments.test_set_payment_metadata(payment_id, new_payment_metadata);
}
pub fn pay_for_bolt11_invoice(
&self, invoice: &Bolt11Invoice, payment_id: PaymentId, amount_msats: Option<u64>,
route_params_config: RouteParametersConfig, retry_strategy: Retry,
) -> Result<(), Bolt11PaymentError> {
let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let payment_hash = PaymentHash(invoice.payment_hash().to_byte_array());
self.pending_outbound_payments.pay_for_bolt11_invoice(
invoice,
payment_id,
amount_msats,
route_params_config,
retry_strategy,
&self.router,
self.list_usable_channels(),
|| self.compute_inflight_htlcs(),
&self.entropy_source,
&self.node_signer,
best_block_height,
&self.pending_events,
|args| self.send_payment_along_path(args),
&WithContext::from(&self.logger, None, None, Some(payment_hash)),
)
}
pub fn send_payment_for_bolt12_invoice(
&self, invoice: &Bolt12Invoice, context: Option<&OffersContext>,
) -> Result<(), Bolt12PaymentError> {
match self.verify_bolt12_invoice(invoice, context) {
Ok(payment_id) => self.send_payment_for_verified_bolt12_invoice(invoice, payment_id),
Err(()) => Err(Bolt12PaymentError::UnexpectedInvoice),
}
}
fn verify_bolt12_invoice(
&self, invoice: &Bolt12Invoice, context: Option<&OffersContext>,
) -> Result<PaymentId, ()> {
let secp_ctx = &self.secp_ctx;
let expanded_key = &self.inbound_payment_key;
match context {
None if invoice.is_for_refund_without_paths() => {
invoice.verify_using_metadata(expanded_key, secp_ctx)
},
Some(&OffersContext::OutboundPayment { payment_id, nonce, .. }) => {
invoice.verify_using_payer_data(payment_id, nonce, expanded_key, secp_ctx)
},
_ => Err(()),
}
}
fn send_payment_for_verified_bolt12_invoice(
&self, invoice: &Bolt12Invoice, payment_id: PaymentId,
) -> Result<(), Bolt12PaymentError> {
let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let features = self.bolt12_invoice_features();
self.pending_outbound_payments.send_payment_for_bolt12_invoice(
invoice,
payment_id,
&self.router,
self.list_usable_channels(),
features,
|| self.compute_inflight_htlcs(),
&self.entropy_source,
&self.node_signer,
&self,
&self.secp_ctx,
best_block_height,
&self.pending_events,
|args| self.send_payment_along_path(args),
&WithContext::from(&self.logger, None, None, None),
)
}
fn check_refresh_async_receive_offer_cache(&self, timer_tick_occurred: bool) {
let peers = self.get_peers_for_blinded_path();
let channels = self.list_usable_channels();
let entropy = &*self.entropy_source;
let router = &*self.router;
let refresh_res = self.flow.check_refresh_async_receive_offer_cache(
peers,
channels,
entropy,
router,
timer_tick_occurred,
);
match refresh_res {
Err(()) => {
log_error!(
self.logger,
"Failed to create blinded paths when requesting async receive offer paths"
);
},
Ok(()) => {},
}
}
#[cfg(test)]
pub(crate) fn test_check_refresh_async_receive_offers(&self) {
self.check_refresh_async_receive_offer_cache(false);
}
pub fn static_invoice_persisted(&self, invoice_persisted_path: Responder) {
self.flow.static_invoice_persisted(invoice_persisted_path);
}
pub fn respond_to_static_invoice_request(
&self, invoice: StaticInvoice, responder: Responder, invoice_request: InvoiceRequest,
invoice_request_path: BlindedMessagePath,
) -> Result<(), Bolt12SemanticError> {
self.flow.enqueue_invoice_request_to_forward(
invoice_request,
invoice_request_path,
responder.clone(),
);
self.flow.enqueue_static_invoice(invoice, responder)
}
fn initiate_async_payment(
&self, invoice: &StaticInvoice, payment_id: PaymentId,
) -> Result<(), Bolt12PaymentError> {
let mut res = Ok(());
PersistenceNotifierGuard::optionally_notify(self, || {
let best_block_height = self.best_block.read().unwrap().height;
let features = self.bolt12_invoice_features();
let outbound_pmts_res = self.pending_outbound_payments.static_invoice_received(
invoice,
payment_id,
features,
best_block_height,
self.duration_since_epoch(),
&*self.entropy_source,
&self.pending_events,
);
match outbound_pmts_res {
Ok(()) => {},
Err(Bolt12PaymentError::UnexpectedInvoice)
| Err(Bolt12PaymentError::DuplicateInvoice) => {
res = outbound_pmts_res.map(|_| ());
return NotifyOption::SkipPersistNoEvents;
},
Err(e) => {
res = Err(e);
return NotifyOption::DoPersist;
},
};
if let Ok(channels) = self.hold_htlc_channels() {
if let Err(e) =
self.send_payment_for_static_invoice_no_persist(payment_id, channels, true)
{
log_trace!(
self.logger,
"Failed to send held HTLC with payment id {}: {:?}",
payment_id,
e
);
}
} else {
let reply_path = HeldHtlcReplyPath::ToUs {
payment_id,
peers: self.get_peers_for_blinded_path(),
};
let enqueue_held_htlc_available_res =
self.flow.enqueue_held_htlc_available(invoice, reply_path);
if enqueue_held_htlc_available_res.is_err() {
self.abandon_payment_with_reason(
payment_id,
PaymentFailureReason::BlindedPathCreationFailed,
);
res = Err(Bolt12PaymentError::BlindedPathCreationFailed);
return NotifyOption::DoPersist;
};
}
NotifyOption::DoPersist
});
res
}
fn hold_htlc_channels(&self) -> Result<Vec<ChannelDetails>, ()> {
let should_send_async = self.config.read().unwrap().hold_outbound_htlcs_at_next_hop;
if !should_send_async {
return Err(());
}
let hold_htlc_channels =
self.list_funded_channels_with_filter(|&(init_features, _, ref channel)| {
init_features.supports_htlc_hold() && channel.context().is_live()
});
if hold_htlc_channels.is_empty() {
Err(())
} else {
Ok(hold_htlc_channels)
}
}
fn send_payment_for_static_invoice(
&self, payment_id: PaymentId,
) -> Result<(), Bolt12PaymentError> {
let mut res = Ok(());
let first_hops = self.list_usable_channels();
PersistenceNotifierGuard::optionally_notify(self, || {
let outbound_pmts_res =
self.send_payment_for_static_invoice_no_persist(payment_id, first_hops, false);
match outbound_pmts_res {
Err(Bolt12PaymentError::UnexpectedInvoice)
| Err(Bolt12PaymentError::DuplicateInvoice) => {
res = outbound_pmts_res.map(|_| ());
NotifyOption::SkipPersistNoEvents
},
other_res => {
res = other_res;
NotifyOption::DoPersist
},
}
});
res
}
fn send_payment_for_static_invoice_no_persist(
&self, payment_id: PaymentId, first_hops: Vec<ChannelDetails>, hold_htlcs_at_next_hop: bool,
) -> Result<(), Bolt12PaymentError> {
let best_block_height = self.best_block.read().unwrap().height;
self.pending_outbound_payments.send_payment_for_static_invoice(
payment_id,
hold_htlcs_at_next_hop,
&self.router,
first_hops,
|| self.compute_inflight_htlcs(),
&self.entropy_source,
&self.node_signer,
&self,
&self.secp_ctx,
best_block_height,
&self.pending_events,
|args| self.send_payment_along_path(args),
&WithContext::from(&self.logger, None, None, None),
)
}
fn path_for_release_held_htlc(
&self, htlc_id: u64, prev_outbound_scid_alias: u64, channel_id: &ChannelId,
counterparty_node_id: &PublicKey,
) -> BlindedMessagePath {
let intercept_id =
InterceptId::from_htlc_id_and_chan_id(htlc_id, channel_id, counterparty_node_id);
self.flow.path_for_release_held_htlc(
intercept_id,
prev_outbound_scid_alias,
htlc_id,
&*self.entropy_source,
)
}
pub fn abandon_payment(&self, payment_id: PaymentId) {
self.abandon_payment_with_reason(payment_id, PaymentFailureReason::UserAbandoned)
}
fn abandon_payment_with_reason(&self, payment_id: PaymentId, reason: PaymentFailureReason) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.abandon_payment(payment_id, reason, &self.pending_events);
}
pub fn send_spontaneous_payment(
&self, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields,
payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry,
) -> Result<PaymentHash, RetryableSendFailure> {
let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let payment_hash = payment_preimage.map(|preimage| preimage.into());
self.pending_outbound_payments.send_spontaneous_payment(
payment_preimage,
recipient_onion,
payment_id,
retry_strategy,
route_params,
&self.router,
self.list_usable_channels(),
|| self.compute_inflight_htlcs(),
&self.entropy_source,
&self.node_signer,
best_block_height,
&self.pending_events,
|args| self.send_payment_along_path(args),
&WithContext::from(&self.logger, None, None, payment_hash),
)
}
pub fn send_probe(&self, path: Path) -> Result<(PaymentHash, PaymentId), ProbeSendFailure> {
let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.send_probe(
path,
self.probing_cookie_secret,
&self.entropy_source,
&self.node_signer,
best_block_height,
|args| self.send_payment_along_path(args),
)
}
#[cfg(test)]
pub(crate) fn payment_is_probe(
&self, payment_hash: &PaymentHash, payment_id: &PaymentId,
) -> bool {
outbound_payment::payment_is_probe(payment_hash, payment_id, self.probing_cookie_secret)
}
pub fn send_spontaneous_preflight_probes(
&self, node_id: PublicKey, amount_msat: u64, final_cltv_expiry_delta: u32,
liquidity_limit_multiplier: Option<u64>,
) -> Result<Vec<(PaymentHash, PaymentId)>, ProbeSendFailure> {
let payment_params = PaymentParameters::from_node_id(node_id, final_cltv_expiry_delta);
let route_params =
RouteParameters::from_payment_params_and_value(payment_params, amount_msat);
self.send_preflight_probes(route_params, liquidity_limit_multiplier)
}
pub fn send_preflight_probes(
&self, route_params: RouteParameters, liquidity_limit_multiplier: Option<u64>,
) -> Result<Vec<(PaymentHash, PaymentId)>, ProbeSendFailure> {
let liquidity_limit_multiplier = liquidity_limit_multiplier.unwrap_or(3);
let payer = self.get_our_node_id();
let usable_channels = self.list_usable_channels();
let first_hops = usable_channels.iter().collect::<Vec<_>>();
let inflight_htlcs = self.compute_inflight_htlcs();
let route = self
.router
.find_route(&payer, &route_params, Some(&first_hops), inflight_htlcs)
.map_err(|e| {
log_error!(self.logger, "Failed to find path for payment probe: {:?}", e);
ProbeSendFailure::RouteNotFound
})?;
let mut used_liquidity_map = hash_map_with_capacity(first_hops.len());
let mut res = Vec::new();
for mut path in route.paths {
while let Some(last_path_hop) = path.hops.last() {
if last_path_hop.maybe_announced_channel {
break;
} else {
log_debug!(
self.logger,
"Avoided sending payment probe all the way to last hop {} as it is likely unannounced.",
last_path_hop.short_channel_id
);
let final_value_msat = path.final_value_msat();
path.hops.pop();
if let Some(new_last) = path.hops.last_mut() {
new_last.fee_msat += final_value_msat;
}
}
}
if path.hops.len() < 2 {
log_debug!(
self.logger,
"Skipped sending payment probe over path with less than two hops."
);
continue;
}
if let Some(first_path_hop) = path.hops.first() {
if let Some(first_hop) = first_hops.iter().find(|h| {
h.get_outbound_payment_scid() == Some(first_path_hop.short_channel_id)
}) {
let path_value = path.final_value_msat() + path.fee_msat();
let used_liquidity =
used_liquidity_map.entry(first_path_hop.short_channel_id).or_insert(0);
if first_hop.next_outbound_htlc_limit_msat
< (*used_liquidity + path_value) * liquidity_limit_multiplier
{
log_debug!(self.logger, "Skipped sending payment probe to avoid putting channel {} under the liquidity limit.", first_path_hop.short_channel_id);
continue;
} else {
*used_liquidity += path_value;
}
}
}
res.push(self.send_probe(path).map_err(|e| {
log_error!(self.logger, "Failed to send pre-flight probe: {:?}", e);
e
})?);
}
Ok(res)
}
#[rustfmt::skip]
fn funding_transaction_generated_intern<FundingOutput: FnMut(&OutboundV1Channel<SP>) -> Result<OutPoint, &'static str>>(
&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding_transaction: Transaction, is_batch_funding: bool,
mut find_funding_output: FundingOutput, is_manual_broadcast: bool,
) -> Result<(), APIError> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}") })?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
macro_rules! abandon_chan { ($err: expr, $api_err: expr, $chan: expr) => { {
let counterparty;
let err = if let ChannelError::Close((msg, reason)) = $err {
let channel_id = $chan.context.channel_id();
counterparty = $chan.context.get_counterparty_node_id();
let shutdown_res = $chan.abandon_unfunded_chan(reason);
MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None)
} else { unreachable!(); };
mem::drop(peer_state_lock);
mem::drop(per_peer_state);
let _: Result<(), _> = handle_error!(self, Err(err), counterparty);
Err($api_err)
} } }
let mut chan = match peer_state.channel_by_id.entry(temporary_channel_id) {
hash_map::Entry::Occupied(chan) => {
if !chan.get().ready_to_fund() {
return Err(APIError::APIMisuseError {
err: format!("Channel {temporary_channel_id} with counterparty {counterparty_node_id} is not an unfunded, outbound channel ready to fund"),
});
}
match chan.remove().into_unfunded_outbound_v1() {
Ok(chan) => chan,
Err(chan) => {
debug_assert!(false, "ready_to_fund guarantees into_unfunded_outbound_v1 will succeed");
peer_state.channel_by_id.insert(temporary_channel_id, chan);
return Err(APIError::APIMisuseError {
err: "Invalid state, please report this bug".to_owned(),
});
},
}
},
hash_map::Entry::Vacant(_) => {
return Err(APIError::ChannelUnavailable {
err: format!("Channel {temporary_channel_id} with counterparty {counterparty_node_id} not found"),
});
},
};
let funding_txo = match find_funding_output(&chan) {
Ok(found_funding_txo) => found_funding_txo,
Err(err) => {
let chan_err = ChannelError::close(err.to_owned());
let api_err = APIError::APIMisuseError { err: err.to_owned() };
return abandon_chan!(chan_err, api_err, chan);
},
};
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger);
let (mut chan, msg_opt) = match funding_res {
Ok(funding_msg) => (chan, funding_msg),
Err((mut chan, chan_err)) => {
let api_err = APIError::ChannelUnavailable { err: "Signer refused to sign the initial commitment transaction".to_owned() };
return abandon_chan!(chan_err, api_err, chan);
}
};
match peer_state.channel_by_id.entry(chan.context.channel_id()) {
hash_map::Entry::Occupied(_) => {
let err = format!(
"An existing channel using ID {} is open with peer {}",
chan.context.channel_id(), chan.context.get_counterparty_node_id(),
);
let chan_err = ChannelError::close(err.to_owned());
let api_err = APIError::APIMisuseError { err: err.to_owned() };
chan.unset_funding_info();
return abandon_chan!(chan_err, api_err, chan);
},
hash_map::Entry::Vacant(e) => {
if let Some(msg) = msg_opt {
peer_state.pending_msg_events.push(MessageSendEvent::SendFundingCreated {
node_id: chan.context.get_counterparty_node_id(),
msg,
});
}
if is_manual_broadcast {
chan.context.set_manual_broadcast();
}
e.insert(Channel::from(chan));
Ok(())
}
}
}
#[cfg(any(test, feature = "_externalize_tests"))]
pub(crate) fn funding_transaction_generated_unchecked(
&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey,
funding_transaction: Transaction, output_index: u16,
) -> Result<(), APIError> {
let txid = funding_transaction.compute_txid();
self.funding_transaction_generated_intern(
temporary_channel_id,
counterparty_node_id,
funding_transaction,
false,
|_| Ok(OutPoint { txid, index: output_index }),
false,
)
}
pub fn funding_transaction_generated(
&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey,
funding_transaction: Transaction,
) -> Result<(), APIError> {
let temporary_chan = &[(&temporary_channel_id, &counterparty_node_id)];
self.batch_funding_transaction_generated(temporary_chan, funding_transaction)
}
pub fn unsafe_manual_funding_transaction_generated(
&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding: OutPoint,
) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let temporary_chans = &[(&temporary_channel_id, &counterparty_node_id)];
let funding_type = FundingType::Unchecked(funding);
self.batch_funding_transaction_generated_intern(temporary_chans, funding_type)
}
pub fn funding_transaction_generated_manual_broadcast(
&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey,
funding_transaction: Transaction,
) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.batch_funding_transaction_generated_intern(
&[(&temporary_channel_id, &counterparty_node_id)],
FundingType::CheckedManualBroadcast(funding_transaction),
)
}
pub fn batch_funding_transaction_generated(
&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding_transaction: Transaction,
) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let funding_type = FundingType::Checked(funding_transaction);
self.batch_funding_transaction_generated_intern(temporary_channels, funding_type)
}
#[rustfmt::skip]
fn batch_funding_transaction_generated_intern(&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding: FundingType) -> Result<(), APIError> {
let mut result = Ok(());
if let FundingType::Checked(funding_transaction) |
FundingType::CheckedManualBroadcast(funding_transaction) = &funding
{
if !funding_transaction.is_coinbase() {
for inp in funding_transaction.input.iter() {
if inp.witness.is_empty() {
result = result.and(Err(APIError::APIMisuseError {
err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned()
}));
}
}
}
if funding_transaction.output.len() > u16::max_value() as usize {
result = result.and(Err(APIError::APIMisuseError {
err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
}));
}
let height = self.best_block.read().unwrap().height;
if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) &&
funding_transaction.lock_time.is_block_height() &&
funding_transaction.lock_time.to_consensus_u32() > height + 1
{
result = result.and(Err(APIError::APIMisuseError {
err: "Funding transaction absolute timelock is non-final".to_owned()
}));
}
}
let txid = funding.txid();
let is_batch_funding = temporary_channels.len() > 1;
let mut funding_batch_states = if is_batch_funding {
Some(self.funding_batch_states.lock().unwrap())
} else {
None
};
let mut funding_batch_state = funding_batch_states.as_mut().and_then(|states| {
match states.entry(txid) {
btree_map::Entry::Occupied(_) => {
result = result.clone().and(Err(APIError::APIMisuseError {
err: "Batch funding transaction with the same txid already exists".to_owned()
}));
None
},
btree_map::Entry::Vacant(vacant) => Some(vacant.insert(Vec::new())),
}
});
let is_manual_broadcast = funding.is_manual_broadcast();
for &(temporary_channel_id, counterparty_node_id) in temporary_channels {
result = result.and_then(|_| self.funding_transaction_generated_intern(
*temporary_channel_id,
*counterparty_node_id,
funding.transaction_or_dummy(),
is_batch_funding,
|chan| {
let mut output_index = None;
let expected_spk = chan.funding.get_funding_redeemscript().to_p2wsh();
let outpoint = match &funding {
FundingType::Checked(tx) | FundingType::CheckedManualBroadcast(tx) => {
for (idx, outp) in tx.output.iter().enumerate() {
if outp.script_pubkey == expected_spk && outp.value.to_sat() == chan.funding.get_value_satoshis() {
if output_index.is_some() {
return Err("Multiple outputs matched the expected script and value");
}
output_index = Some(idx as u16);
}
}
if output_index.is_none() {
return Err("No output matched the script_pubkey and value in the FundingGenerationReady event");
}
OutPoint { txid, index: output_index.unwrap() }
},
FundingType::Unchecked(outpoint) => outpoint.clone(),
};
if let Some(funding_batch_state) = funding_batch_state.as_mut() {
funding_batch_state.push((ChannelId::v1_from_funding_outpoint(outpoint), *counterparty_node_id, false));
}
Ok(outpoint)
},
is_manual_broadcast)
);
}
if let Err(ref e) = result {
let e = format!("Error in transaction funding: {:?}", e);
let mut channels_to_remove = Vec::new();
channels_to_remove.extend(funding_batch_states.as_mut()
.and_then(|states| states.remove(&txid))
.into_iter().flatten()
.map(|(chan_id, node_id, _state)| (chan_id, node_id))
);
channels_to_remove.extend(temporary_channels.iter()
.map(|(&chan_id, &node_id)| (chan_id, node_id))
);
let mut shutdown_results: Vec<(Result<Infallible, _>, _)> = Vec::new();
{
let per_peer_state = self.per_peer_state.read().unwrap();
for (channel_id, counterparty_node_id) in channels_to_remove {
per_peer_state.get(&counterparty_node_id)
.map(|peer_state_mutex| peer_state_mutex.lock().unwrap())
.and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id).map(|chan| (chan, peer_state)))
.map(|(mut chan, mut peer_state)| {
let reason = ClosureReason::ProcessingError { err: e.clone() };
let err = ChannelError::Close((e.clone(), reason));
let (_, e) =
convert_channel_err!(self, peer_state, err, &mut chan);
shutdown_results.push((Err(e), counterparty_node_id));
});
}
}
mem::drop(funding_batch_states);
for (err, counterparty_node_id) in shutdown_results {
let _ = handle_error!(self, err, counterparty_node_id);
}
}
result
}
pub fn funding_transaction_signed(
&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, transaction: Transaction,
) -> Result<(), APIError> {
let mut result = Ok(());
PersistenceNotifierGuard::optionally_notify(self, || {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
if peer_state_mutex_opt.is_none() {
result = Err(APIError::ChannelUnavailable {
err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}")
});
return NotifyOption::SkipPersistNoEvents;
}
let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
match peer_state.channel_by_id.get_mut(channel_id) {
Some(channel) => match channel.as_funded_mut() {
Some(chan) => {
let txid = transaction.compute_txid();
let witnesses: Vec<_> = transaction
.input
.into_iter()
.map(|input| input.witness)
.filter(|witness| !witness.is_empty())
.collect();
let best_block_height = self.best_block.read().unwrap().height;
match chan.funding_transaction_signed(
txid,
witnesses,
best_block_height,
&self.logger,
) {
Ok(FundingTxSigned {
tx_signatures: Some(tx_signatures),
funding_tx,
splice_negotiated,
splice_locked,
}) => {
if let Some(funding_tx) = funding_tx {
self.broadcast_interactive_funding(
chan,
&funding_tx,
&self.logger,
);
}
if let Some(splice_negotiated) = splice_negotiated {
self.pending_events.lock().unwrap().push_back((
events::Event::SplicePending {
channel_id: *channel_id,
counterparty_node_id: *counterparty_node_id,
user_channel_id: chan.context.get_user_id(),
new_funding_txo: splice_negotiated.funding_txo,
channel_type: splice_negotiated.channel_type,
new_funding_redeem_script: splice_negotiated
.funding_redeem_script,
},
None,
));
}
peer_state.pending_msg_events.push(
MessageSendEvent::SendTxSignatures {
node_id: *counterparty_node_id,
msg: tx_signatures,
},
);
if let Some(splice_locked) = splice_locked {
peer_state.pending_msg_events.push(
MessageSendEvent::SendSpliceLocked {
node_id: *counterparty_node_id,
msg: splice_locked,
},
);
}
return NotifyOption::DoPersist;
},
Err(err) => {
result = Err(err);
return NotifyOption::SkipPersistNoEvents;
},
Ok(FundingTxSigned {
tx_signatures: None,
funding_tx,
splice_negotiated,
splice_locked,
}) => {
debug_assert!(funding_tx.is_none());
debug_assert!(splice_negotiated.is_none());
debug_assert!(splice_locked.is_none());
return NotifyOption::SkipPersistNoEvents;
},
}
},
None => {
result = Err(APIError::APIMisuseError {
err: format!(
"Channel with id {} not expecting funding signatures",
channel_id
),
});
return NotifyOption::SkipPersistNoEvents;
},
},
None => {
result = Err(APIError::ChannelUnavailable {
err: format!(
"Channel with id {} not found for the passed counterparty node_id {}",
channel_id, counterparty_node_id
),
});
return NotifyOption::SkipPersistNoEvents;
},
}
});
result
}
fn broadcast_interactive_funding(
&self, channel: &mut FundedChannel<SP>, funding_tx: &Transaction, logger: &L,
) {
let logger = WithChannelContext::from(logger, channel.context(), None);
log_info!(
logger,
"Broadcasting signed interactive funding transaction {}",
funding_tx.compute_txid()
);
self.tx_broadcaster.broadcast_transactions(&[funding_tx]);
{
let mut pending_events = self.pending_events.lock().unwrap();
emit_channel_pending_event!(pending_events, channel);
}
}
#[rustfmt::skip]
pub fn update_partial_channel_config(
&self, counterparty_node_id: &PublicKey, channel_ids: &[ChannelId], config_update: &ChannelConfigUpdate,
) -> Result<(), APIError> {
if config_update.cltv_expiry_delta.map(|delta| delta < MIN_CLTV_EXPIRY_DELTA).unwrap_or(false) {
return Err(APIError::APIMisuseError {
err: format!("The chosen CLTV expiry delta is below the minimum of {}", MIN_CLTV_EXPIRY_DELTA),
});
}
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}") })?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for channel_id in channel_ids {
if !peer_state.has_channel(channel_id) {
return Err(APIError::ChannelUnavailable {
err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, counterparty_node_id),
});
};
}
for channel_id in channel_ids {
if let Some(channel) = peer_state.channel_by_id.get_mut(channel_id) {
let mut config = channel.context().config();
config.apply(config_update);
if !channel.context_mut().update_config(&config) {
continue;
}
if let Some(channel) = channel.as_funded() {
if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { msg });
} else if peer_state.is_connected {
if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
peer_state.pending_msg_events.push(MessageSendEvent::SendChannelUpdate {
node_id: channel.context.get_counterparty_node_id(),
msg,
});
}
}
}
continue;
} else {
debug_assert!(false);
return Err(APIError::ChannelUnavailable {
err: format!(
"Channel with ID {} for passed counterparty_node_id {} disappeared after we confirmed its existence - this should not be reachable!",
channel_id, counterparty_node_id),
});
};
}
Ok(())
}
pub fn update_channel_config(
&self, counterparty_node_id: &PublicKey, channel_ids: &[ChannelId], config: &ChannelConfig,
) -> Result<(), APIError> {
self.update_partial_channel_config(counterparty_node_id, channel_ids, &(*config).into())
}
pub fn forward_intercepted_htlc(
&self, intercept_id: InterceptId, next_hop_channel_id: &ChannelId, next_node_id: PublicKey,
amt_to_forward_msat: u64,
) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let outbound_scid_alias = {
let peer_state_lock = self.per_peer_state.read().unwrap();
let peer_state_mutex =
peer_state_lock.get(&next_node_id).ok_or_else(|| APIError::ChannelUnavailable {
err: format!(
"Can't find a peer matching the passed counterparty node_id {next_node_id}"
),
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.get(next_hop_channel_id) {
Some(chan) => {
if let Some(funded_chan) = chan.as_funded() {
if !funded_chan.context.is_usable() {
return Err(APIError::ChannelUnavailable {
err: format!(
"Channel with id {next_hop_channel_id} not fully established"
),
});
}
funded_chan.context.outbound_scid_alias()
} else {
return Err(APIError::ChannelUnavailable {
err: format!(
"Channel with id {next_hop_channel_id} for the passed counterparty node_id {next_node_id} is still opening."
)
});
}
},
None => {
let error = format!(
"Channel with id {next_hop_channel_id} not found for the passed counterparty node_id {next_node_id}"
);
let logger = WithContext::from(
&self.logger,
Some(next_node_id),
Some(*next_hop_channel_id),
None,
);
log_error!(logger, "{error} when attempting to forward intercepted HTLC");
return Err(APIError::ChannelUnavailable { err: error });
},
}
};
let payment = self
.pending_intercepted_htlcs
.lock()
.unwrap()
.remove(&intercept_id)
.ok_or_else(|| APIError::APIMisuseError {
err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)),
})?;
let routing = match payment.forward_info.routing {
PendingHTLCRouting::Forward {
onion_packet,
blinded,
incoming_cltv_expiry,
hold_htlc,
..
} => {
debug_assert!(hold_htlc.is_none(), "Held intercept HTLCs should not be surfaced in an event until the recipient comes online");
PendingHTLCRouting::Forward {
onion_packet,
blinded,
incoming_cltv_expiry,
hold_htlc,
short_channel_id: outbound_scid_alias,
}
},
_ => unreachable!(), };
let skimmed_fee_msat =
payment.forward_info.outgoing_amt_msat.saturating_sub(amt_to_forward_msat);
let pending_htlc_info = PendingHTLCInfo {
skimmed_fee_msat: if skimmed_fee_msat == 0 { None } else { Some(skimmed_fee_msat) },
outgoing_amt_msat: amt_to_forward_msat,
routing,
..payment.forward_info
};
let mut per_source_pending_forward = [(
payment.prev_outbound_scid_alias,
payment.prev_counterparty_node_id,
payment.prev_funding_outpoint,
payment.prev_channel_id,
payment.prev_user_channel_id,
vec![(pending_htlc_info, payment.prev_htlc_id)],
)];
self.forward_htlcs(&mut per_source_pending_forward);
Ok(())
}
#[rustfmt::skip]
pub fn fail_intercepted_htlc(&self, intercept_id: InterceptId) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id)
.ok_or_else(|| APIError::APIMisuseError {
err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0))
})?;
if let PendingHTLCRouting::Forward { short_channel_id, .. } = payment.forward_info.routing {
let htlc_source = HTLCSource::PreviousHopData(payment.htlc_previous_hop_data());
let reason = HTLCFailReason::from_failure_code(LocalHTLCFailureReason::UnknownNextPeer);
let destination = HTLCHandlingFailureType::InvalidForward { requested_forward_scid: short_channel_id };
let hash = payment.forward_info.payment_hash;
self.fail_htlc_backwards_internal(&htlc_source, &hash, &reason, destination, None);
} else { unreachable!() }
Ok(())
}
#[cfg(any(test, feature = "_test_utils"))]
pub fn test_process_pending_update_add_htlcs(&self) -> bool {
self.process_pending_update_add_htlcs()
}
fn process_pending_update_add_htlcs(&self) -> bool {
let mut should_persist = false;
let mut decode_update_add_htlcs = new_hash_map();
mem::swap(&mut decode_update_add_htlcs, &mut self.decode_update_add_htlcs.lock().unwrap());
let get_htlc_failure_type = |outgoing_scid_opt: Option<u64>, payment_hash: PaymentHash| {
if let Some(outgoing_scid) = outgoing_scid_opt {
match self.short_to_chan_info.read().unwrap().get(&outgoing_scid) {
Some((outgoing_counterparty_node_id, outgoing_channel_id)) => {
HTLCHandlingFailureType::Forward {
node_id: Some(*outgoing_counterparty_node_id),
channel_id: *outgoing_channel_id,
}
},
None => HTLCHandlingFailureType::InvalidForward {
requested_forward_scid: outgoing_scid,
},
}
} else {
HTLCHandlingFailureType::Receive { payment_hash }
}
};
'outer_loop: for (incoming_scid_alias, update_add_htlcs) in decode_update_add_htlcs {
should_persist = true;
let incoming_channel_details_opt = self.do_funded_channel_callback(
incoming_scid_alias,
|chan: &mut FundedChannel<SP>| {
let counterparty_node_id = chan.context.get_counterparty_node_id();
let channel_id = chan.context.channel_id();
let funding_txo = chan.funding.get_funding_txo().unwrap();
let user_channel_id = chan.context.get_user_id();
let accept_underpaying_htlcs = chan.context.config().accept_underpaying_htlcs;
(
counterparty_node_id,
channel_id,
funding_txo,
user_channel_id,
accept_underpaying_htlcs,
)
},
);
let (
incoming_counterparty_node_id,
incoming_channel_id,
incoming_funding_txo,
incoming_user_channel_id,
incoming_accept_underpaying_htlcs,
) = if let Some(incoming_channel_details) = incoming_channel_details_opt {
incoming_channel_details
} else {
continue;
};
let mut htlc_forwards = Vec::new();
let mut htlc_fails = Vec::new();
for update_add_htlc in &update_add_htlcs {
let (next_hop, next_packet_details_opt) =
match decode_incoming_update_add_htlc_onion(
&update_add_htlc,
&*self.node_signer,
&*self.logger,
&self.secp_ctx,
) {
Ok(decoded_onion) => decoded_onion,
Err((htlc_fail, reason)) => {
let failure_type = HTLCHandlingFailureType::InvalidOnion;
htlc_fails.push((htlc_fail, failure_type, reason.into()));
continue;
},
};
let is_intro_node_blinded_forward = next_hop.is_intro_node_blinded_forward();
let outgoing_scid_opt =
next_packet_details_opt.as_ref().and_then(|d| match d.outgoing_connector {
HopConnector::ShortChannelId(scid) => Some(scid),
HopConnector::Trampoline(_) => None,
});
let shared_secret = next_hop.shared_secret().secret_bytes();
if update_add_htlc.hold_htlc.is_some()
&& !BaseMessageHandler::provided_node_features(self).supports_htlc_hold()
{
let reason = LocalHTLCFailureReason::TemporaryNodeFailure;
let htlc_fail = self.htlc_failure_from_update_add_err(
&update_add_htlc,
&incoming_counterparty_node_id,
reason,
is_intro_node_blinded_forward,
&shared_secret,
);
let failure_type =
get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash);
htlc_fails.push((htlc_fail, failure_type, reason.into()));
continue;
}
match self.do_funded_channel_callback(
incoming_scid_alias,
|chan: &mut FundedChannel<SP>| {
let logger = WithChannelContext::from(
&self.logger,
&chan.context,
Some(update_add_htlc.payment_hash),
);
chan.can_accept_incoming_htlc(&self.fee_estimator, &logger)
},
) {
Some(Ok(_)) => {},
Some(Err(reason)) => {
let htlc_fail = self.htlc_failure_from_update_add_err(
&update_add_htlc,
&incoming_counterparty_node_id,
reason,
is_intro_node_blinded_forward,
&shared_secret,
);
let failure_type =
get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash);
htlc_fails.push((htlc_fail, failure_type, reason.into()));
continue;
},
None => continue 'outer_loop,
}
if let Some(next_packet_details) = next_packet_details_opt.as_ref() {
if let Err(reason) =
self.can_forward_htlc(&update_add_htlc, next_packet_details)
{
let htlc_fail = self.htlc_failure_from_update_add_err(
&update_add_htlc,
&incoming_counterparty_node_id,
reason,
is_intro_node_blinded_forward,
&shared_secret,
);
let failure_type =
get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash);
htlc_fails.push((htlc_fail, failure_type, reason.into()));
continue;
}
}
match self.get_pending_htlc_info(
&update_add_htlc,
shared_secret,
next_hop,
incoming_accept_underpaying_htlcs,
next_packet_details_opt.map(|d| d.next_packet_pubkey),
) {
Ok(info) => htlc_forwards.push((info, update_add_htlc.htlc_id)),
Err(inbound_err) => {
let failure_type =
get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash);
let htlc_failure = inbound_err.reason.into();
let htlc_fail = self.construct_pending_htlc_fail_msg(
&update_add_htlc,
&incoming_counterparty_node_id,
shared_secret,
inbound_err,
);
htlc_fails.push((htlc_fail, failure_type, htlc_failure));
},
}
}
let pending_forwards = (
incoming_scid_alias,
incoming_counterparty_node_id,
incoming_funding_txo,
incoming_channel_id,
incoming_user_channel_id,
htlc_forwards.drain(..).collect(),
);
self.forward_htlcs(&mut [pending_forwards]);
for (htlc_fail, failure_type, failure_reason) in htlc_fails.drain(..) {
let failure = match htlc_fail {
HTLCFailureMsg::Relay(fail_htlc) => HTLCForwardInfo::FailHTLC {
htlc_id: fail_htlc.htlc_id,
err_packet: fail_htlc.into(),
},
HTLCFailureMsg::Malformed(fail_malformed_htlc) => {
HTLCForwardInfo::FailMalformedHTLC {
htlc_id: fail_malformed_htlc.htlc_id,
sha256_of_onion: fail_malformed_htlc.sha256_of_onion,
failure_code: fail_malformed_htlc.failure_code.into(),
}
},
};
self.forward_htlcs
.lock()
.unwrap()
.entry(incoming_scid_alias)
.or_default()
.push(failure);
self.pending_events.lock().unwrap().push_back((
events::Event::HTLCHandlingFailed {
prev_channel_id: incoming_channel_id,
failure_type,
failure_reason: Some(failure_reason),
},
None,
));
}
}
should_persist
}
pub fn needs_pending_htlc_processing(&self) -> bool {
if !self.forward_htlcs.lock().unwrap().is_empty() {
return true;
}
if !self.decode_update_add_htlcs.lock().unwrap().is_empty() {
return true;
}
if self.pending_outbound_payments.needs_abandon_or_retry() {
return true;
}
false
}
pub fn process_pending_htlc_forwards(&self) {
if self
.pending_htlc_forwards_processor
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
return;
}
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
self.internal_process_pending_htlc_forwards()
});
self.pending_htlc_forwards_processor.store(false, Ordering::Release);
}
fn internal_process_pending_htlc_forwards(&self) -> NotifyOption {
let mut should_persist = NotifyOption::SkipPersistNoEvents;
if self.process_pending_update_add_htlcs() {
should_persist = NotifyOption::DoPersist;
}
let mut new_events = VecDeque::new();
let mut failed_forwards = Vec::new();
let mut phantom_receives: Vec<PerSourcePendingForward> = Vec::new();
let mut forward_htlcs = new_hash_map();
mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
for (short_chan_id, mut pending_forwards) in forward_htlcs {
should_persist = NotifyOption::DoPersist;
if short_chan_id != 0 {
self.process_forward_htlcs(
short_chan_id,
&mut pending_forwards,
&mut failed_forwards,
&mut phantom_receives,
);
} else {
self.process_receive_htlcs(
&mut pending_forwards,
&mut new_events,
&mut failed_forwards,
);
}
}
let best_block_height = self.best_block.read().unwrap().height;
let needs_persist = self.pending_outbound_payments.check_retry_payments(
&self.router,
|| self.list_usable_channels(),
|| self.compute_inflight_htlcs(),
&self.entropy_source,
&self.node_signer,
best_block_height,
&self.pending_events,
|args| self.send_payment_along_path(args),
&WithContext::from(&self.logger, None, None, None),
);
if needs_persist {
should_persist = NotifyOption::DoPersist;
}
for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
self.fail_htlc_backwards_internal(
&htlc_source,
&payment_hash,
&failure_reason,
destination,
None,
);
}
self.forward_htlcs(&mut phantom_receives);
if self.check_free_holding_cells() {
should_persist = NotifyOption::DoPersist;
}
if new_events.is_empty() {
return should_persist;
}
let mut events = self.pending_events.lock().unwrap();
events.append(&mut new_events);
should_persist = NotifyOption::DoPersist;
should_persist
}
fn forwarding_channel_not_found(
&self, forward_infos: impl Iterator<Item = HTLCForwardInfo>, short_chan_id: u64,
forwarding_counterparty: Option<PublicKey>, failed_forwards: &mut Vec<FailedHTLCForward>,
phantom_receives: &mut Vec<PerSourcePendingForward>,
) {
for forward_info in forward_infos {
match forward_info {
HTLCForwardInfo::AddHTLC(payment) => {
let PendingAddHTLCInfo {
prev_outbound_scid_alias,
prev_htlc_id,
prev_channel_id,
prev_funding_outpoint,
prev_user_channel_id,
prev_counterparty_node_id,
forward_info:
PendingHTLCInfo {
ref routing,
incoming_shared_secret,
payment_hash,
outgoing_amt_msat,
outgoing_cltv_value,
..
},
} = payment;
let logger = WithContext::from(
&self.logger,
forwarding_counterparty,
Some(prev_channel_id),
Some(payment_hash),
);
let mut failure_handler =
|msg, reason, err_data, phantom_ss, next_hop_unknown| {
log_info!(logger, "Failed to accept/forward incoming HTLC: {}", msg);
let mut prev_hop = payment.htlc_previous_hop_data();
prev_hop.phantom_shared_secret = phantom_ss;
let failure_type = if next_hop_unknown {
HTLCHandlingFailureType::InvalidForward {
requested_forward_scid: short_chan_id,
}
} else {
HTLCHandlingFailureType::Receive { payment_hash }
};
failed_forwards.push((
HTLCSource::PreviousHopData(prev_hop),
payment_hash,
HTLCFailReason::reason(reason, err_data),
failure_type,
));
};
if let PendingHTLCRouting::Forward { ref onion_packet, .. } = routing {
let phantom_pubkey_res =
self.node_signer.get_node_id(Recipient::PhantomNode);
if phantom_pubkey_res.is_ok()
&& fake_scid::is_valid_phantom(
&self.fake_scid_rand_bytes,
short_chan_id,
&self.chain_hash,
) {
let decode_res = onion_utils::decode_next_payment_hop(
Recipient::PhantomNode,
&onion_packet.public_key.unwrap(),
&onion_packet.hop_data,
onion_packet.hmac,
payment_hash,
None,
&*self.node_signer,
);
let next_hop = match decode_res {
Ok(res) => res,
Err(onion_utils::OnionDecodeErr::Malformed { err_msg, reason }) => {
let sha256_of_onion =
Sha256::hash(&onion_packet.hop_data).to_byte_array();
failure_handler(
err_msg,
reason,
sha256_of_onion.to_vec(),
None,
false,
);
continue;
},
Err(onion_utils::OnionDecodeErr::Relay {
err_msg,
reason,
shared_secret,
..
}) => {
let phantom_shared_secret = shared_secret.secret_bytes();
failure_handler(
err_msg,
reason,
Vec::new(),
Some(phantom_shared_secret),
false,
);
continue;
},
};
let phantom_shared_secret = next_hop.shared_secret().secret_bytes();
let current_height: u32 = self.best_block.read().unwrap().height;
let create_res = create_recv_pending_htlc_info(
next_hop,
incoming_shared_secret,
payment_hash,
outgoing_amt_msat,
outgoing_cltv_value,
Some(phantom_shared_secret),
false,
None,
current_height,
);
match create_res {
Ok(info) => phantom_receives.push((
prev_outbound_scid_alias,
prev_counterparty_node_id,
prev_funding_outpoint,
prev_channel_id,
prev_user_channel_id,
vec![(info, prev_htlc_id)],
)),
Err(InboundHTLCErr { reason, err_data, msg }) => {
failure_handler(
msg,
reason,
err_data,
Some(phantom_shared_secret),
false,
);
continue;
},
}
} else {
let msg = format!(
"Unknown short channel id {} for forward HTLC",
short_chan_id
);
failure_handler(
&msg,
LocalHTLCFailureReason::UnknownNextPeer,
Vec::new(),
None,
true,
);
continue;
}
} else {
let msg =
format!("Unknown short channel id {} for forward HTLC", short_chan_id);
failure_handler(
&msg,
LocalHTLCFailureReason::UnknownNextPeer,
Vec::new(),
None,
true,
);
continue;
}
},
HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => {
},
}
}
}
fn process_forward_htlcs(
&self, short_chan_id: u64, pending_forwards: &mut Vec<HTLCForwardInfo>,
failed_forwards: &mut Vec<FailedHTLCForward>,
phantom_receives: &mut Vec<PerSourcePendingForward>,
) {
let mut forwarding_counterparty = None;
let chan_info_opt = self.short_to_chan_info.read().unwrap().get(&short_chan_id).cloned();
let (counterparty_node_id, forward_chan_id) = match chan_info_opt {
Some((cp_id, chan_id)) => (cp_id, chan_id),
None => {
self.forwarding_channel_not_found(
pending_forwards.drain(..),
short_chan_id,
forwarding_counterparty,
failed_forwards,
phantom_receives,
);
return;
},
};
forwarding_counterparty = Some(counterparty_node_id);
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
if peer_state_mutex_opt.is_none() {
self.forwarding_channel_not_found(
pending_forwards.drain(..),
short_chan_id,
forwarding_counterparty,
failed_forwards,
phantom_receives,
);
return;
}
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
let mut draining_pending_forwards = pending_forwards.drain(..);
while let Some(forward_info) = draining_pending_forwards.next() {
let queue_fail_htlc_res = match forward_info {
HTLCForwardInfo::AddHTLC(ref payment) => {
let htlc_source = HTLCSource::PreviousHopData(payment.htlc_previous_hop_data());
let PendingAddHTLCInfo {
prev_outbound_scid_alias,
forward_info:
PendingHTLCInfo {
payment_hash,
outgoing_amt_msat,
outgoing_cltv_value,
routing,
skimmed_fee_msat,
..
},
..
} = payment;
let (onion_packet, blinded) = match routing {
PendingHTLCRouting::Forward { ref onion_packet, blinded, .. } => {
(onion_packet, blinded)
},
_ => {
panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
},
};
let next_blinding_point = blinded.and_then(|b| {
b.next_blinding_override.or_else(|| {
let encrypted_tlvs_ss = self
.node_signer
.ecdh(Recipient::Node, &b.inbound_blinding_point, None)
.unwrap()
.secret_bytes();
onion_utils::next_hop_pubkey(
&self.secp_ctx,
b.inbound_blinding_point,
&encrypted_tlvs_ss,
)
.ok()
})
});
let maybe_optimal_channel = peer_state
.channel_by_id
.values_mut()
.filter_map(Channel::as_funded_mut)
.filter_map(|chan| {
let balances = chan.get_available_balances(&self.fee_estimator);
let is_in_range = (balances.next_outbound_htlc_minimum_msat
..=balances.next_outbound_htlc_limit_msat)
.contains(&outgoing_amt_msat);
if is_in_range && chan.context.is_usable() {
Some((chan, balances))
} else {
None
}
})
.min_by_key(|(_, balances)| balances.next_outbound_htlc_limit_msat)
.map(|(c, _)| c);
let optimal_channel = match maybe_optimal_channel {
Some(chan) => chan,
None => {
if let Some(chan) = peer_state
.channel_by_id
.get_mut(&forward_chan_id)
.and_then(Channel::as_funded_mut)
{
chan
} else {
let fwd_iter =
core::iter::once(forward_info).chain(draining_pending_forwards);
self.forwarding_channel_not_found(
fwd_iter,
short_chan_id,
forwarding_counterparty,
failed_forwards,
phantom_receives,
);
break;
}
},
};
let logger = WithChannelContext::from(
&self.logger,
&optimal_channel.context,
Some(*payment_hash),
);
let channel_description =
if optimal_channel.funding.get_short_channel_id() == Some(short_chan_id) {
"specified"
} else {
"alternate"
};
log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} channel {} with corresponding peer {}",
prev_outbound_scid_alias, &payment_hash, short_chan_id, channel_description, optimal_channel.context.channel_id(), &counterparty_node_id);
if let Err((reason, msg)) = optimal_channel.queue_add_htlc(
*outgoing_amt_msat,
*payment_hash,
*outgoing_cltv_value,
htlc_source.clone(),
onion_packet.clone(),
*skimmed_fee_msat,
next_blinding_point,
&self.fee_estimator,
&&logger,
) {
log_trace!(
logger,
"Failed to forward HTLC with payment_hash {} to peer {}: {}",
&payment_hash,
&counterparty_node_id,
msg
);
if let Some(chan) = peer_state
.channel_by_id
.get_mut(&forward_chan_id)
.and_then(Channel::as_funded_mut)
{
let data = self.get_htlc_inbound_temp_fail_data(reason);
let failure_type = HTLCHandlingFailureType::Forward {
node_id: Some(chan.context.get_counterparty_node_id()),
channel_id: forward_chan_id,
};
failed_forwards.push((
htlc_source,
*payment_hash,
HTLCFailReason::reason(reason, data),
failure_type,
));
} else {
self.forwarding_channel_not_found(
core::iter::once(forward_info).chain(draining_pending_forwards),
short_chan_id,
forwarding_counterparty,
failed_forwards,
phantom_receives,
);
break;
}
}
None
},
HTLCForwardInfo::FailHTLC { htlc_id, ref err_packet } => {
if let Some(chan) = peer_state
.channel_by_id
.get_mut(&forward_chan_id)
.and_then(Channel::as_funded_mut)
{
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
Some((chan.queue_fail_htlc(htlc_id, err_packet.clone(), &&logger), htlc_id))
} else {
self.forwarding_channel_not_found(
core::iter::once(forward_info).chain(draining_pending_forwards),
short_chan_id,
forwarding_counterparty,
failed_forwards,
phantom_receives,
);
break;
}
},
HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
if let Some(chan) = peer_state
.channel_by_id
.get_mut(&forward_chan_id)
.and_then(Channel::as_funded_mut)
{
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
let res = chan.queue_fail_malformed_htlc(
htlc_id,
failure_code,
sha256_of_onion,
&&logger,
);
Some((res, htlc_id))
} else {
self.forwarding_channel_not_found(
core::iter::once(forward_info).chain(draining_pending_forwards),
short_chan_id,
forwarding_counterparty,
failed_forwards,
phantom_receives,
);
break;
}
},
};
if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
if let Err(e) = queue_fail_htlc_res {
if let ChannelError::Ignore(msg) = e {
if let Some(chan) = peer_state
.channel_by_id
.get_mut(&forward_chan_id)
.and_then(Channel::as_funded_mut)
{
let logger =
WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(
logger,
"Failed to fail HTLC with ID {} backwards to short_id {}: {}",
htlc_id,
short_chan_id,
msg
);
}
} else {
panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
}
}
}
}
}
fn process_receive_htlcs(
&self, pending_forwards: &mut Vec<HTLCForwardInfo>,
new_events: &mut VecDeque<(Event, Option<EventCompletionAction>)>,
failed_forwards: &mut Vec<FailedHTLCForward>,
) {
'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
match forward_info {
HTLCForwardInfo::AddHTLC(payment) => {
let prev_hop = payment.htlc_previous_hop_data();
let PendingAddHTLCInfo {
prev_channel_id,
prev_funding_outpoint,
forward_info:
PendingHTLCInfo {
routing,
payment_hash,
incoming_amt_msat,
outgoing_amt_msat,
skimmed_fee_msat,
..
},
..
} = payment;
let blinded_failure = routing.blinded_failure();
let (
cltv_expiry,
onion_payload,
payment_data,
payment_context,
phantom_shared_secret,
mut onion_fields,
has_recipient_created_payment_secret,
invoice_request_opt,
) = match routing {
PendingHTLCRouting::Receive {
payment_data,
payment_metadata,
payment_context,
incoming_cltv_expiry,
phantom_shared_secret,
custom_tlvs,
requires_blinded_error: _,
} => {
let _legacy_hop_data = Some(payment_data.clone());
let onion_fields = RecipientOnionFields {
payment_secret: Some(payment_data.payment_secret),
payment_metadata,
custom_tlvs,
};
(
incoming_cltv_expiry,
OnionPayload::Invoice { _legacy_hop_data },
Some(payment_data),
payment_context,
phantom_shared_secret,
onion_fields,
true,
None,
)
},
PendingHTLCRouting::ReceiveKeysend {
payment_data,
payment_preimage,
payment_metadata,
incoming_cltv_expiry,
custom_tlvs,
requires_blinded_error: _,
has_recipient_created_payment_secret,
payment_context,
invoice_request,
} => {
let onion_fields = RecipientOnionFields {
payment_secret: payment_data
.as_ref()
.map(|data| data.payment_secret),
payment_metadata,
custom_tlvs,
};
(
incoming_cltv_expiry,
OnionPayload::Spontaneous(payment_preimage),
payment_data,
payment_context,
None,
onion_fields,
has_recipient_created_payment_secret,
invoice_request,
)
},
_ => {
panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
},
};
let claimable_htlc = ClaimableHTLC {
prev_hop,
value: incoming_amt_msat.unwrap_or(outgoing_amt_msat),
sender_intended_value: outgoing_amt_msat,
timer_ticks: 0,
total_value_received: None,
total_msat: if let Some(data) = &payment_data {
data.total_msat
} else {
outgoing_amt_msat
},
cltv_expiry,
onion_payload,
counterparty_skimmed_fee_msat: skimmed_fee_msat,
};
let mut committed_to_claimable = false;
macro_rules! fail_htlc {
($htlc: expr, $payment_hash: expr) => {
debug_assert!(!committed_to_claimable);
let err_data = invalid_payment_err_data(
$htlc.value,
self.best_block.read().unwrap().height,
);
let counterparty_node_id = $htlc.prev_hop.counterparty_node_id;
let incoming_packet_shared_secret =
$htlc.prev_hop.incoming_packet_shared_secret;
let prev_outbound_scid_alias = $htlc.prev_hop.prev_outbound_scid_alias;
failed_forwards.push((
HTLCSource::PreviousHopData(HTLCPreviousHopData {
prev_outbound_scid_alias,
user_channel_id: $htlc.prev_hop.user_channel_id,
counterparty_node_id,
channel_id: prev_channel_id,
outpoint: prev_funding_outpoint,
htlc_id: $htlc.prev_hop.htlc_id,
incoming_packet_shared_secret,
phantom_shared_secret,
blinded_failure,
cltv_expiry: Some(cltv_expiry),
}),
payment_hash,
HTLCFailReason::reason(
LocalHTLCFailureReason::IncorrectPaymentDetails,
err_data,
),
HTLCHandlingFailureType::Receive { payment_hash: $payment_hash },
));
continue 'next_forwardable_htlc;
};
}
let phantom_shared_secret = claimable_htlc.prev_hop.phantom_shared_secret;
let mut receiver_node_id = self.our_network_pubkey;
if phantom_shared_secret.is_some() {
receiver_node_id = self
.node_signer
.get_node_id(Recipient::PhantomNode)
.expect("Failed to get node_id for phantom node recipient");
}
macro_rules! check_total_value {
($purpose: expr) => {{
let mut payment_claimable_generated = false;
let is_keysend = $purpose.is_keysend();
let mut claimable_payments = self.claimable_payments.lock().unwrap();
if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
fail_htlc!(claimable_htlc, payment_hash);
}
let ref mut claimable_payment = claimable_payments.claimable_payments
.entry(payment_hash)
.or_insert_with(|| {
committed_to_claimable = true;
ClaimablePayment {
purpose: $purpose.clone(), htlcs: Vec::new(), onion_fields: None,
}
});
if $purpose != claimable_payment.purpose {
let log_keysend = |keysend| if keysend { "keysend" } else { "non-keysend" };
log_trace!(self.logger, "Failing new {} HTLC with payment_hash {} as we already had an existing {} HTLC with the same payment hash", log_keysend(is_keysend), &payment_hash, log_keysend(!is_keysend));
fail_htlc!(claimable_htlc, payment_hash);
}
if let Some(earlier_fields) = &mut claimable_payment.onion_fields {
if earlier_fields.check_merge(&mut onion_fields).is_err() {
fail_htlc!(claimable_htlc, payment_hash);
}
} else {
claimable_payment.onion_fields = Some(onion_fields);
}
let mut total_value = claimable_htlc.sender_intended_value;
let mut earliest_expiry = claimable_htlc.cltv_expiry;
for htlc in claimable_payment.htlcs.iter() {
total_value += htlc.sender_intended_value;
earliest_expiry = cmp::min(earliest_expiry, htlc.cltv_expiry);
if htlc.total_msat != claimable_htlc.total_msat {
log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
&payment_hash, claimable_htlc.total_msat, htlc.total_msat);
total_value = msgs::MAX_VALUE_MSAT;
}
if total_value >= msgs::MAX_VALUE_MSAT { break; }
}
if total_value >= msgs::MAX_VALUE_MSAT {
fail_htlc!(claimable_htlc, payment_hash);
} else if total_value - claimable_htlc.sender_intended_value >= claimable_htlc.total_msat {
log_trace!(self.logger, "Failing HTLC with payment_hash {} as payment is already claimable",
&payment_hash);
fail_htlc!(claimable_htlc, payment_hash);
} else if total_value >= claimable_htlc.total_msat {
#[allow(unused_assignments)] {
committed_to_claimable = true;
}
claimable_payment.htlcs.push(claimable_htlc);
let amount_msat =
claimable_payment.htlcs.iter().map(|htlc| htlc.value).sum();
claimable_payment.htlcs.iter_mut()
.for_each(|htlc| htlc.total_value_received = Some(amount_msat));
let counterparty_skimmed_fee_msat = claimable_payment.htlcs.iter()
.map(|htlc| htlc.counterparty_skimmed_fee_msat.unwrap_or(0)).sum();
debug_assert!(total_value.saturating_sub(amount_msat) <=
counterparty_skimmed_fee_msat);
claimable_payment.htlcs.sort();
let payment_id =
claimable_payment.inbound_payment_id(&self.inbound_payment_id_secret);
new_events.push_back((events::Event::PaymentClaimable {
receiver_node_id: Some(receiver_node_id),
payment_hash,
purpose: $purpose,
amount_msat,
counterparty_skimmed_fee_msat,
receiving_channel_ids: claimable_payment.receiving_channel_ids(),
claim_deadline: Some(earliest_expiry - HTLC_FAIL_BACK_BUFFER),
onion_fields: claimable_payment.onion_fields.clone(),
payment_id: Some(payment_id),
}, None));
payment_claimable_generated = true;
} else {
claimable_payment.htlcs.push(claimable_htlc);
#[allow(unused_assignments)] {
committed_to_claimable = true;
}
}
payment_claimable_generated
}}
}
let payment_preimage = if has_recipient_created_payment_secret {
if let Some(ref payment_data) = payment_data {
let verify_res = inbound_payment::verify(
payment_hash,
&payment_data,
self.highest_seen_timestamp.load(Ordering::Acquire) as u64,
&self.inbound_payment_key,
&self.logger,
);
let (payment_preimage, min_final_cltv_expiry_delta) = match verify_res {
Ok(result) => result,
Err(()) => {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as payment verification failed", &payment_hash);
fail_htlc!(claimable_htlc, payment_hash);
},
};
if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta {
let expected_min_expiry_height = (self.current_best_block().height
+ min_final_cltv_expiry_delta as u32)
as u64;
if (cltv_expiry as u64) < expected_min_expiry_height {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})",
&payment_hash, cltv_expiry, expected_min_expiry_height);
fail_htlc!(claimable_htlc, payment_hash);
}
}
payment_preimage
} else {
fail_htlc!(claimable_htlc, payment_hash);
}
} else {
None
};
match claimable_htlc.onion_payload {
OnionPayload::Invoice { .. } => {
let payment_data = payment_data.unwrap();
let from_parts_res = events::PaymentPurpose::from_parts(
payment_preimage,
payment_data.payment_secret,
payment_context,
);
let purpose = match from_parts_res {
Ok(purpose) => purpose,
Err(()) => {
fail_htlc!(claimable_htlc, payment_hash);
},
};
check_total_value!(purpose);
},
OnionPayload::Spontaneous(keysend_preimage) => {
let purpose = if let Some(PaymentContext::AsyncBolt12Offer(
AsyncBolt12OfferContext { offer_nonce },
)) = payment_context
{
let payment_data = match payment_data {
Some(data) => data,
None => {
debug_assert!(
false,
"We checked that payment_data is Some above"
);
fail_htlc!(claimable_htlc, payment_hash);
},
};
let verify_opt = invoice_request_opt.and_then(|invreq| {
invreq
.verify_using_recipient_data(
offer_nonce,
&self.inbound_payment_key,
&self.secp_ctx,
)
.ok()
});
let verified_invreq = match verify_opt {
Some(verified_invreq) => {
if let Some(invreq_amt_msat) =
verified_invreq.amount_msats()
{
if payment_data.total_msat < invreq_amt_msat {
fail_htlc!(claimable_htlc, payment_hash);
}
}
verified_invreq
},
None => {
fail_htlc!(claimable_htlc, payment_hash);
},
};
let payment_purpose_context =
PaymentContext::Bolt12Offer(Bolt12OfferContext {
offer_id: verified_invreq.offer_id,
invoice_request: verified_invreq.fields(),
});
let from_parts_res = events::PaymentPurpose::from_parts(
Some(keysend_preimage),
payment_data.payment_secret,
Some(payment_purpose_context),
);
match from_parts_res {
Ok(purpose) => purpose,
Err(()) => {
fail_htlc!(claimable_htlc, payment_hash);
},
}
} else if payment_context.is_some() {
log_trace!(self.logger, "Failing new HTLC with payment_hash {}: received a keysend payment to a non-async payments context {:#?}", payment_hash, payment_context);
fail_htlc!(claimable_htlc, payment_hash);
} else {
events::PaymentPurpose::SpontaneousPayment(keysend_preimage)
};
check_total_value!(purpose);
},
}
},
HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => {
panic!("Got pending fail of our own HTLC");
},
}
}
}
fn process_background_events(&self) -> NotifyOption {
debug_assert_ne!(
self.total_consistency_lock.held_by_thread(),
LockHeldState::NotHeldByThread
);
self.background_events_processed_since_startup.store(true, Ordering::Release);
let mut background_events = Vec::new();
mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events);
if background_events.is_empty() {
return NotifyOption::SkipPersistNoEvents;
}
for event in background_events.drain(..) {
match event {
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id,
funding_txo,
channel_id,
update,
} => {
self.apply_post_close_monitor_update(
counterparty_node_id,
channel_id,
funding_txo,
update,
);
},
BackgroundEvent::MonitorUpdatesComplete {
counterparty_node_id,
channel_id,
highest_update_id_completed,
} => {
self.channel_monitor_updated(
&channel_id,
Some(highest_update_id_completed),
&counterparty_node_id,
);
},
}
}
NotifyOption::DoPersist
}
#[cfg(any(test, feature = "_test_utils"))]
pub fn test_process_background_events(&self) {
let _lck = self.total_consistency_lock.read().unwrap();
let _ = self.process_background_events();
}
#[rustfmt::skip]
fn update_channel_fee(&self, chan_id: &ChannelId, chan: &mut FundedChannel<SP>, new_feerate: u32) -> NotifyOption {
if !chan.funding.is_outbound() { return NotifyOption::SkipPersistNoEvents; }
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let current_feerate = chan.context.get_feerate_sat_per_1000_weight();
let update_fee_required = match new_feerate.cmp(¤t_feerate) {
cmp::Ordering::Greater => true,
cmp::Ordering::Equal => false,
cmp::Ordering::Less => new_feerate * 2 <= current_feerate,
};
if !update_fee_required {
return NotifyOption::SkipPersistNoEvents
}
if !chan.context.is_live() {
log_trace!(logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
return NotifyOption::SkipPersistNoEvents;
}
log_trace!(logger, "Channel {} qualifies for a feerate change from {} to {}.",
&chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
chan.queue_update_fee(new_feerate, &self.fee_estimator, &&logger);
NotifyOption::DoPersist
}
#[cfg(any(test, fuzzing, feature = "_externalize_tests"))]
#[rustfmt::skip]
pub fn maybe_update_chan_fees(&self) {
PersistenceNotifierGuard::optionally_notify(self, || {
let mut should_persist = NotifyOption::SkipPersistNoEvents;
let mut feerate_cache = new_hash_map();
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for (chan_id, chan) in peer_state.channel_by_id.iter_mut()
.filter_map(|(chan_id, chan)| chan.as_funded_mut().map(|chan| (chan_id, chan)))
{
let channel_type = chan.funding.get_channel_type();
let new_feerate = feerate_cache.get(channel_type).copied().or_else(|| {
let feerate = selected_commitment_sat_per_1000_weight(&self.fee_estimator, &channel_type);
feerate_cache.insert(channel_type.clone(), feerate);
Some(feerate)
}).unwrap();
let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
}
}
should_persist
});
}
pub fn timer_tick_occurred(&self) {
PersistenceNotifierGuard::optionally_notify(self, || {
let mut should_persist = NotifyOption::SkipPersistNoEvents;
let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
let mut timed_out_mpp_htlcs = Vec::new();
let mut pending_peers_awaiting_removal = Vec::new();
let mut feerate_cache = new_hash_map();
{
let per_peer_state = self.per_peer_state.read().unwrap();
for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
let counterparty_node_id = *counterparty_node_id;
peer_state.channel_by_id.retain(|chan_id, chan| {
match chan.as_funded_mut() {
Some(funded_chan) => {
let channel_type = funded_chan.funding.get_channel_type();
let new_feerate = feerate_cache.get(channel_type).copied().or_else(|| {
let feerate = selected_commitment_sat_per_1000_weight(&self.fee_estimator, &channel_type);
feerate_cache.insert(channel_type.clone(), feerate);
Some(feerate)
}).unwrap();
let chan_needs_persist = self.update_channel_fee(chan_id, funded_chan, new_feerate);
if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
if let Err(e) = funded_chan.timer_check_closing_negotiation_progress() {
let (needs_close, err) = convert_channel_err!(self, peer_state, e, funded_chan, FUNDED_CHANNEL);
handle_errors.push((Err(err), counterparty_node_id));
if needs_close { return false; }
}
match funded_chan.channel_update_status() {
ChannelUpdateStatus::Enabled if !funded_chan.context.is_live() => funded_chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)),
ChannelUpdateStatus::Disabled if funded_chan.context.is_live() => funded_chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)),
ChannelUpdateStatus::DisabledStaged(_) if funded_chan.context.is_live()
=> funded_chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
ChannelUpdateStatus::EnabledStaged(_) if !funded_chan.context.is_live()
=> funded_chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
ChannelUpdateStatus::DisabledStaged(mut n) if !funded_chan.context.is_live() => {
n += 1;
if n >= DISABLE_GOSSIP_TICKS {
funded_chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
if let Ok(update) = self.get_channel_update_for_broadcast(&funded_chan) {
let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
should_persist = NotifyOption::DoPersist;
} else {
funded_chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(n));
}
},
ChannelUpdateStatus::EnabledStaged(mut n) if funded_chan.context.is_live() => {
n += 1;
if n >= ENABLE_GOSSIP_TICKS {
funded_chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
if let Ok(update) = self.get_channel_update_for_broadcast(&funded_chan) {
let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
should_persist = NotifyOption::DoPersist;
} else {
funded_chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(n));
}
},
_ => {},
}
funded_chan.context.maybe_expire_prev_config();
if peer_state.is_connected {
if funded_chan.should_disconnect_peer_awaiting_response() {
let logger = WithChannelContext::from(&self.logger, &funded_chan.context, None);
log_debug!(logger, "Disconnecting peer {} due to not making any progress on channel {}",
counterparty_node_id, chan_id);
pending_msg_events.push(MessageSendEvent::HandleError {
node_id: counterparty_node_id,
action: msgs::ErrorAction::DisconnectPeerWithWarning {
msg: msgs::WarningMessage {
channel_id: *chan_id,
data: "Disconnecting due to timeout awaiting response".to_owned(),
},
},
});
}
}
true
},
None => {
chan.context_mut().maybe_expire_prev_config();
let unfunded_context = chan.unfunded_context_mut().expect("channel should be unfunded");
if unfunded_context.should_expire_unfunded_channel() {
let context = chan.context();
let logger = WithChannelContext::from(&self.logger, context, None);
log_error!(logger,
"Force-closing pending channel with ID {} for not establishing in a timely manner",
context.channel_id());
let reason = ClosureReason::FundingTimedOut;
let msg = "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned();
let err = ChannelError::Close((msg, reason));
let (_, e) = convert_channel_err!(self, peer_state, err, chan);
handle_errors.push((Err(e), counterparty_node_id));
false
} else {
true
}
},
}
});
for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() {
if {
req.ticks_remaining -= 1;
req.ticks_remaining
} <= 0
{
let logger = WithContext::from(
&self.logger,
Some(counterparty_node_id),
Some(*chan_id),
None,
);
log_error!(logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id);
if peer_state.is_connected {
peer_state.pending_msg_events.push(MessageSendEvent::HandleError {
node_id: counterparty_node_id,
action: msgs::ErrorAction::SendErrorMessage {
msg: msgs::ErrorMessage {
channel_id: chan_id.clone(),
data: "Channel force-closed".to_owned(),
},
},
});
}
}
}
peer_state
.inbound_channel_request_by_id
.retain(|_, req| req.ticks_remaining > 0);
if peer_state.ok_to_remove(true) {
pending_peers_awaiting_removal.push(counterparty_node_id);
}
}
}
if pending_peers_awaiting_removal.len() > 0 {
let mut per_peer_state = self.per_peer_state.write().unwrap();
for counterparty_node_id in pending_peers_awaiting_removal {
match per_peer_state.entry(counterparty_node_id) {
hash_map::Entry::Occupied(entry) => {
let remove_entry = {
let peer_state = entry.get().lock().unwrap();
peer_state.ok_to_remove(true)
};
if remove_entry {
entry.remove_entry();
}
},
hash_map::Entry::Vacant(_) => {
},
}
}
}
self.claimable_payments.lock().unwrap().claimable_payments.retain(
|payment_hash, payment| {
if payment.htlcs.is_empty() {
debug_assert!(false);
return false;
}
if let OnionPayload::Invoice { .. } = payment.htlcs[0].onion_payload {
let htlc_total_msat =
payment.htlcs.iter().map(|h| h.sender_intended_value).sum();
if payment.htlcs[0].total_msat <= htlc_total_msat {
return true;
} else if payment.htlcs.iter_mut().any(|htlc| {
htlc.timer_ticks += 1;
return htlc.timer_ticks >= MPP_TIMEOUT_TICKS;
}) {
let htlcs = payment
.htlcs
.drain(..)
.map(|htlc: ClaimableHTLC| (htlc.prev_hop, *payment_hash));
timed_out_mpp_htlcs.extend(htlcs);
return false;
}
}
true
},
);
for htlc_source in timed_out_mpp_htlcs.drain(..) {
let source = HTLCSource::PreviousHopData(htlc_source.0.clone());
let failure_reason = LocalHTLCFailureReason::MPPTimeout;
let reason = HTLCFailReason::from_failure_code(failure_reason);
let receiver = HTLCHandlingFailureType::Receive { payment_hash: htlc_source.1 };
self.fail_htlc_backwards_internal(&source, &htlc_source.1, &reason, receiver, None);
}
for (err, counterparty_node_id) in handle_errors {
let _ = handle_error!(self, err, counterparty_node_id);
}
#[cfg(feature = "std")]
let duration_since_epoch = std::time::SystemTime::now()
.duration_since(std::time::SystemTime::UNIX_EPOCH)
.expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
#[cfg(not(feature = "std"))]
let duration_since_epoch = Duration::from_secs(
self.highest_seen_timestamp.load(Ordering::Acquire).saturating_sub(7200) as u64,
);
self.pending_outbound_payments
.remove_stale_payments(duration_since_epoch, &self.pending_events);
self.check_refresh_async_receive_offer_cache(true);
if self.check_free_holding_cells() {
should_persist = NotifyOption::DoPersist;
}
should_persist
});
}
pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
let failure_code = FailureCode::IncorrectOrUnknownPaymentDetails;
self.fail_htlc_backwards_with_reason(payment_hash, failure_code);
}
pub fn fail_htlc_backwards_with_reason(
&self, payment_hash: &PaymentHash, failure_code: FailureCode,
) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let removed_source =
self.claimable_payments.lock().unwrap().claimable_payments.remove(payment_hash);
if let Some(payment) = removed_source {
for htlc in payment.htlcs {
let reason = self.get_htlc_fail_reason_from_failure_code(failure_code, &htlc);
let source = HTLCSource::PreviousHopData(htlc.prev_hop);
let receiver = HTLCHandlingFailureType::Receive { payment_hash: *payment_hash };
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver, None);
}
}
}
fn get_htlc_fail_reason_from_failure_code(
&self, failure_code: FailureCode, htlc: &ClaimableHTLC,
) -> HTLCFailReason {
match failure_code {
FailureCode::TemporaryNodeFailure => {
HTLCFailReason::from_failure_code(failure_code.into())
},
FailureCode::RequiredNodeFeatureMissing => {
HTLCFailReason::from_failure_code(failure_code.into())
},
FailureCode::IncorrectOrUnknownPaymentDetails => {
let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
htlc_msat_height_data
.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes());
HTLCFailReason::reason(failure_code.into(), htlc_msat_height_data)
},
FailureCode::InvalidOnionPayload(data) => {
let fail_data = match data {
Some((typ, offset)) => [BigSize(typ).encode(), offset.encode()].concat(),
None => Vec::new(),
};
HTLCFailReason::reason(failure_code.into(), fail_data)
},
}
}
fn get_htlc_inbound_temp_fail_data(&self, reason: LocalHTLCFailureReason) -> Vec<u8> {
debug_assert!(reason.is_temporary());
debug_assert!(reason != LocalHTLCFailureReason::AmountBelowMinimum);
debug_assert!(reason != LocalHTLCFailureReason::FeeInsufficient);
debug_assert!(reason != LocalHTLCFailureReason::IncorrectCLTVExpiry);
let mut enc = VecWriter(Vec::with_capacity(4));
if reason == LocalHTLCFailureReason::ChannelDisabled {
0u16.write(&mut enc).expect("Writes cannot fail");
}
(0u16).write(&mut enc).expect("Writes cannot fail");
enc.0
}
fn fail_holding_cell_htlcs(
&self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: ChannelId,
counterparty_node_id: &PublicKey,
) {
let (failure_reason, onion_failure_data) = {
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(channel_id) {
hash_map::Entry::Occupied(chan_entry) => {
if let Some(_chan) = chan_entry.get().as_funded() {
let reason = LocalHTLCFailureReason::TemporaryChannelFailure;
let data = self.get_htlc_inbound_temp_fail_data(reason);
(reason, data)
} else {
debug_assert!(false);
(LocalHTLCFailureReason::UnknownNextPeer, Vec::new())
}
},
hash_map::Entry::Vacant(_) => {
(LocalHTLCFailureReason::UnknownNextPeer, Vec::new())
},
}
} else {
(LocalHTLCFailureReason::UnknownNextPeer, Vec::new())
}
};
for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
let reason = HTLCFailReason::reason(failure_reason, onion_failure_data.clone());
let receiver = HTLCHandlingFailureType::Forward {
node_id: Some(counterparty_node_id.clone()),
channel_id,
};
self.fail_htlc_backwards_internal(&htlc_src, &payment_hash, &reason, receiver, None);
}
}
fn fail_htlc_backwards_internal(
&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason,
failure_type: HTLCHandlingFailureType,
mut from_monitor_update_completion: Option<PaymentCompleteUpdate>,
) {
#[cfg(debug_assertions)]
for (_, peer) in self.per_peer_state.read().unwrap().iter() {
debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
}
match source {
HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => {
let logger = WithContext::from(&self.logger, None, None, Some(*payment_hash));
self.pending_outbound_payments.fail_htlc(
source,
payment_hash,
onion_error,
path,
session_priv,
payment_id,
self.probing_cookie_secret,
&self.secp_ctx,
&self.pending_events,
&mut from_monitor_update_completion,
&logger,
);
if let Some(update) = from_monitor_update_completion {
let action =
EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(update);
let have_action = {
let pending_events = self.pending_events.lock().unwrap();
pending_events.iter().any(|(_, act)| act.as_ref() == Some(&action))
};
if !have_action {
self.handle_post_event_actions([action]);
}
}
},
HTLCSource::PreviousHopData(HTLCPreviousHopData {
ref prev_outbound_scid_alias,
ref htlc_id,
ref incoming_packet_shared_secret,
ref phantom_shared_secret,
outpoint: _,
ref blinded_failure,
ref channel_id,
..
}) => {
log_trace!(
WithContext::from(&self.logger, None, Some(*channel_id), Some(*payment_hash)),
"Failing {}HTLC with payment_hash {} backwards from us: {:?}",
if blinded_failure.is_some() { "blinded " } else { "" },
&payment_hash,
onion_error
);
let failure = match blinded_failure {
Some(BlindedFailure::FromIntroductionNode) => {
let blinded_onion_error = HTLCFailReason::reason(
LocalHTLCFailureReason::InvalidOnionBlinding,
vec![0; 32],
);
let err_packet = blinded_onion_error.get_encrypted_failure_packet(
incoming_packet_shared_secret,
phantom_shared_secret,
);
HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }
},
Some(BlindedFailure::FromBlindedNode) => HTLCForwardInfo::FailMalformedHTLC {
htlc_id: *htlc_id,
failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(),
sha256_of_onion: [0; 32],
},
None => {
let err_packet = onion_error.get_encrypted_failure_packet(
incoming_packet_shared_secret,
phantom_shared_secret,
);
HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }
},
};
let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
match forward_htlcs.entry(*prev_outbound_scid_alias) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().push(failure);
},
hash_map::Entry::Vacant(entry) => {
entry.insert(vec![failure]);
},
}
mem::drop(forward_htlcs);
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push_back((
events::Event::HTLCHandlingFailed {
prev_channel_id: *channel_id,
failure_type,
failure_reason: Some(onion_error.into()),
},
None,
));
},
}
}
pub fn claim_funds(&self, payment_preimage: PaymentPreimage) {
self.claim_payment_internal(payment_preimage, false);
}
pub fn claim_funds_with_known_custom_tlvs(&self, payment_preimage: PaymentPreimage) {
self.claim_payment_internal(payment_preimage, true);
}
fn claim_payment_internal(&self, payment_preimage: PaymentPreimage, custom_tlvs_known: bool) {
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).to_byte_array());
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let (sources, claiming_payment) = {
let res = self.claimable_payments.lock().unwrap().begin_claiming_payment(
payment_hash,
&self.node_signer,
&self.logger,
&self.inbound_payment_id_secret,
custom_tlvs_known,
);
match res {
Ok((htlcs, payment_info)) => (htlcs, payment_info),
Err(htlcs) => {
for htlc in htlcs {
let reason = self.get_htlc_fail_reason_from_failure_code(
FailureCode::InvalidOnionPayload(None),
&htlc,
);
let source = HTLCSource::PreviousHopData(htlc.prev_hop);
let receiver = HTLCHandlingFailureType::Receive { payment_hash };
self.fail_htlc_backwards_internal(
&source,
&payment_hash,
&reason,
receiver,
None,
);
}
return;
},
}
};
debug_assert!(!sources.is_empty());
let mut claimable_amt_msat = 0;
let mut prev_total_msat = None;
let mut expected_amt_msat = None;
let mut valid_mpp = true;
let mut errs = Vec::new();
let per_peer_state = self.per_peer_state.read().unwrap();
for htlc in sources.iter() {
if prev_total_msat.is_some() && prev_total_msat != Some(htlc.total_msat) {
log_error!(self.logger, "Somehow ended up with an MPP payment with different expected total amounts - this should not be reachable!");
debug_assert!(false);
valid_mpp = false;
break;
}
prev_total_msat = Some(htlc.total_msat);
if expected_amt_msat.is_some() && expected_amt_msat != htlc.total_value_received {
log_error!(self.logger, "Somehow ended up with an MPP payment with different received total amounts - this should not be reachable!");
debug_assert!(false);
valid_mpp = false;
break;
}
expected_amt_msat = htlc.total_value_received;
claimable_amt_msat += htlc.value;
}
mem::drop(per_peer_state);
if sources.is_empty() || expected_amt_msat.is_none() {
self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
log_info!(
self.logger,
"Attempted to claim an incomplete payment which no longer had any available HTLCs!"
);
return;
}
if claimable_amt_msat != expected_amt_msat.unwrap() {
self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.",
expected_amt_msat.unwrap(), claimable_amt_msat);
return;
}
if valid_mpp {
let mpp_parts: Vec<_> = sources
.iter()
.filter_map(|htlc| {
if let Some(cp_id) = htlc.prev_hop.counterparty_node_id {
Some(MPPClaimHTLCSource {
counterparty_node_id: cp_id,
funding_txo: htlc.prev_hop.outpoint,
channel_id: htlc.prev_hop.channel_id,
htlc_id: htlc.prev_hop.htlc_id,
})
} else {
None
}
})
.collect();
let pending_mpp_claim_ptr_opt = if sources.len() > 1 {
let mut channels_without_preimage = Vec::with_capacity(mpp_parts.len());
for part in mpp_parts.iter() {
let chan = (part.counterparty_node_id, part.channel_id);
if !channels_without_preimage.contains(&chan) {
channels_without_preimage.push(chan);
}
}
Some(Arc::new(Mutex::new(PendingMPPClaim {
channels_without_preimage,
channels_with_preimage: Vec::new(),
})))
} else {
None
};
let payment_info = Some(PaymentClaimDetails { mpp_parts, claiming_payment });
for htlc in sources {
let this_mpp_claim =
pending_mpp_claim_ptr_opt.as_ref().map(|pending_mpp_claim| {
let counterparty_id = htlc.prev_hop.counterparty_node_id;
let counterparty_id = counterparty_id
.expect("Prior to upgrading to LDK 0.1, all pending HTLCs forwarded by LDK 0.0.123 or before must be resolved. It appears at least one claimable payment was not resolved. Please downgrade to LDK 0.0.125 and resolve the HTLC by claiming the payment prior to upgrading.");
let claim_ptr = PendingMPPClaimPointer(Arc::clone(pending_mpp_claim));
(counterparty_id, htlc.prev_hop.channel_id, claim_ptr)
});
let raa_blocker = pending_mpp_claim_ptr_opt.as_ref().map(|pending_claim| {
RAAMonitorUpdateBlockingAction::ClaimedMPPPayment {
pending_claim: PendingMPPClaimPointer(Arc::clone(pending_claim)),
}
});
let attribution_data =
if let Some(phantom_secret) = htlc.prev_hop.phantom_shared_secret {
let attribution_data =
process_fulfill_attribution_data(None, &phantom_secret, 0);
Some(attribution_data)
} else {
None
};
let attribution_data = process_fulfill_attribution_data(
attribution_data,
&htlc.prev_hop.incoming_packet_shared_secret,
0,
);
self.claim_funds_from_hop(
htlc.prev_hop,
payment_preimage,
payment_info.clone(),
Some(attribution_data),
|_, definitely_duplicate| {
debug_assert!(
!definitely_duplicate,
"We shouldn't claim duplicatively from a payment"
);
(
Some(MonitorUpdateCompletionAction::PaymentClaimed {
payment_hash,
pending_mpp_claim: this_mpp_claim,
}),
raa_blocker,
)
},
);
}
} else {
for htlc in sources {
let err_data =
invalid_payment_err_data(htlc.value, self.best_block.read().unwrap().height);
let source = HTLCSource::PreviousHopData(htlc.prev_hop);
let reason = HTLCFailReason::reason(
LocalHTLCFailureReason::IncorrectPaymentDetails,
err_data,
);
let receiver = HTLCHandlingFailureType::Receive { payment_hash };
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver, None);
}
self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
}
for (counterparty_node_id, err) in errs.drain(..) {
let res: Result<(), _> = Err(err);
let _ = handle_error!(self, res, counterparty_node_id);
}
}
fn claim_funds_from_hop<
ComplFunc: FnOnce(
Option<u64>,
bool,
) -> (Option<MonitorUpdateCompletionAction>, Option<RAAMonitorUpdateBlockingAction>),
>(
&self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage,
payment_info: Option<PaymentClaimDetails>, attribution_data: Option<AttributionData>,
completion_action: ComplFunc,
) {
let counterparty_node_id = prev_hop.counterparty_node_id.or_else(|| {
let short_to_chan_info = self.short_to_chan_info.read().unwrap();
short_to_chan_info.get(&prev_hop.prev_outbound_scid_alias).map(|(cp_id, _)| *cp_id)
});
let counterparty_node_id = if let Some(node_id) = counterparty_node_id {
node_id
} else {
let payment_hash: PaymentHash = payment_preimage.into();
panic!(
"Prior to upgrading to LDK 0.1, all pending HTLCs forwarded by LDK 0.0.123 or before must be resolved. It appears at least the HTLC with payment_hash {payment_hash} (preimage {payment_preimage}) was not resolved. Please downgrade to LDK 0.0.125 and resolve the HTLC prior to upgrading.",
);
};
let htlc_source = HTLCClaimSource {
counterparty_node_id,
funding_txo: prev_hop.outpoint,
channel_id: prev_hop.channel_id,
htlc_id: prev_hop.htlc_id,
};
self.claim_mpp_part(
htlc_source,
payment_preimage,
payment_info,
attribution_data,
completion_action,
)
}
fn claim_mpp_part<
ComplFunc: FnOnce(
Option<u64>,
bool,
) -> (Option<MonitorUpdateCompletionAction>, Option<RAAMonitorUpdateBlockingAction>),
>(
&self, prev_hop: HTLCClaimSource, payment_preimage: PaymentPreimage,
payment_info: Option<PaymentClaimDetails>, attribution_data: Option<AttributionData>,
completion_action: ComplFunc,
) {
let during_init = !self.background_events_processed_since_startup.load(Ordering::Acquire);
debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread);
let per_peer_state = self.per_peer_state.read().unwrap();
let chan_id = prev_hop.channel_id;
const MISSING_MON_ERROR: &'static str =
"If we're going to claim an HTLC against a channel, we should always have *some* state for the channel, even if just the latest ChannelMonitor update_id. This failure indicates we need to claim an HTLC from a channel for which we did not have a ChannelMonitor at startup and didn't create one while running.";
let mut peer_state_lock = per_peer_state
.get(&prev_hop.counterparty_node_id)
.map(|peer_mutex| peer_mutex.lock().unwrap())
.expect(MISSING_MON_ERROR);
{
let peer_state = &mut *peer_state_lock;
if let hash_map::Entry::Occupied(mut chan_entry) =
peer_state.channel_by_id.entry(chan_id)
{
if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let fulfill_res = chan.get_update_fulfill_htlc_and_commit(
prev_hop.htlc_id,
payment_preimage,
payment_info,
attribution_data,
&&logger,
);
match fulfill_res {
UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } => {
let (action_opt, raa_blocker_opt) =
completion_action(Some(htlc_value_msat), false);
if let Some(action) = action_opt {
log_trace!(logger, "Tracking monitor update completion action for channel {}: {:?}",
chan_id, action);
peer_state
.monitor_update_blocked_actions
.entry(chan_id)
.or_insert(Vec::new())
.push(action);
}
if let Some(raa_blocker) = raa_blocker_opt {
peer_state
.actions_blocking_raa_monitor_updates
.entry(chan_id)
.or_insert_with(Vec::new)
.push(raa_blocker);
}
handle_new_monitor_update!(
self,
prev_hop.funding_txo,
monitor_update,
peer_state_lock,
peer_state,
per_peer_state,
chan
);
},
UpdateFulfillCommitFetch::DuplicateClaim {} => {
let (action_opt, raa_blocker_opt) = completion_action(None, true);
if let Some(raa_blocker) = raa_blocker_opt {
let actions = &mut peer_state.actions_blocking_raa_monitor_updates;
let actions_list = actions.entry(chan_id).or_insert_with(Vec::new);
if !actions_list.contains(&raa_blocker) {
debug_assert!(during_init);
actions_list.push(raa_blocker);
}
}
let action = if let Some(action) = action_opt {
action
} else {
return;
};
let in_flight_mons = peer_state.in_flight_monitor_updates.get(&chan_id);
if in_flight_mons.map(|(_, mons)| !mons.is_empty()).unwrap_or(false) {
peer_state
.monitor_update_blocked_actions
.entry(chan_id)
.or_insert_with(Vec::new)
.push(action);
return;
}
mem::drop(peer_state_lock);
log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
chan_id, action);
if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
downstream_counterparty_node_id: node_id,
blocking_action: blocker,
downstream_channel_id: channel_id,
} = action
{
if let Some(peer_state_mtx) = per_peer_state.get(&node_id) {
let mut peer_state = peer_state_mtx.lock().unwrap();
if let Some(blockers) = peer_state
.actions_blocking_raa_monitor_updates
.get_mut(&channel_id)
{
let mut found_blocker = false;
blockers.retain(|iter| {
let first_blocker = !found_blocker;
if *iter == blocker {
found_blocker = true;
}
*iter != blocker || !first_blocker
});
debug_assert!(found_blocker);
}
} else {
debug_assert!(false);
}
} else if matches!(
action,
MonitorUpdateCompletionAction::PaymentClaimed { .. }
) {
debug_assert!(during_init,
"Duplicate claims should always either be for forwarded payments(freeing another channel immediately) or during init (for claim replay)");
mem::drop(per_peer_state);
self.handle_monitor_update_completion_actions([action]);
} else {
debug_assert!(false,
"Duplicate claims should always either be for forwarded payments(freeing another channel immediately) or during init (for claim replay)");
return;
};
},
}
}
return;
}
}
let peer_state = &mut *peer_state_lock;
let update_id = if let Some(latest_update_id) =
peer_state.closed_channel_monitor_update_ids.get_mut(&chan_id)
{
*latest_update_id = latest_update_id.saturating_add(1);
*latest_update_id
} else {
let err = "We need the latest ChannelMonitorUpdate ID to build a new update.
This should have been checked for availability on startup but somehow it is no longer available.
This indicates a bug inside LDK. Please report this error at https://github.com/lightningdevkit/rust-lightning/issues/new";
log_error!(self.logger, "{}", err);
panic!("{}", err);
};
let preimage_update = ChannelMonitorUpdate {
update_id,
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
payment_preimage,
payment_info,
}],
channel_id: Some(prev_hop.channel_id),
};
let (action_opt, raa_blocker_opt) = completion_action(None, false);
if let Some(raa_blocker) = raa_blocker_opt {
peer_state
.actions_blocking_raa_monitor_updates
.entry(prev_hop.channel_id)
.or_default()
.push(raa_blocker);
}
let payment_hash = payment_preimage.into();
let logger = WithContext::from(
&self.logger,
Some(prev_hop.counterparty_node_id),
Some(chan_id),
Some(payment_hash),
);
if let Some(action) = action_opt {
log_trace!(
logger,
"Tracking monitor update completion action for closed channel {}: {:?}",
chan_id,
action
);
peer_state
.monitor_update_blocked_actions
.entry(chan_id)
.or_insert(Vec::new())
.push(action);
}
handle_new_monitor_update!(
self,
prev_hop.funding_txo,
preimage_update,
peer_state_lock,
peer_state,
per_peer_state,
prev_hop.counterparty_node_id,
chan_id,
POST_CHANNEL_CLOSE
);
}
fn finalize_claims(&self, sources: Vec<(HTLCSource, Option<AttributionData>)>) {
let hold_times = sources.into_iter().filter_map(|(source, attribution_data)| {
if let HTLCSource::OutboundRoute { ref session_priv, ref path, .. } = source {
let derived_key;
let session_priv = if path.has_trampoline_hops() {
let session_priv_hash =
Sha256::hash(&session_priv.secret_bytes()).to_byte_array();
derived_key = SecretKey::from_slice(&session_priv_hash[..]).unwrap();
&derived_key
} else {
session_priv
};
let hold_times = attribution_data.map_or(Vec::new(), |attribution_data| {
decode_fulfill_attribution_data(
&self.secp_ctx,
&self.logger,
path,
session_priv,
attribution_data,
)
});
Some((source, hold_times))
} else {
None
}
});
self.pending_outbound_payments.finalize_claims(hold_times, &self.pending_events);
}
fn claim_funds_internal(
&self, source: HTLCSource, payment_preimage: PaymentPreimage,
forwarded_htlc_value_msat: Option<u64>, skimmed_fee_msat: Option<u64>, from_onchain: bool,
next_channel_counterparty_node_id: PublicKey, next_channel_outpoint: OutPoint,
next_channel_id: ChannelId, next_user_channel_id: Option<u128>,
attribution_data: Option<AttributionData>, send_timestamp: Option<Duration>,
) {
let startup_replay =
!self.background_events_processed_since_startup.load(Ordering::Acquire);
let htlc_id = SentHTLCId::from_source(&source);
match source {
HTLCSource::OutboundRoute {
session_priv, payment_id, path, bolt12_invoice, ..
} => {
debug_assert!(!startup_replay,
"We don't support claim_htlc claims during startup - monitors may not be available yet");
debug_assert_eq!(next_channel_counterparty_node_id, path.hops[0].pubkey);
let mut ev_completion_action = if from_onchain {
let release = PaymentCompleteUpdate {
counterparty_node_id: next_channel_counterparty_node_id,
channel_funding_outpoint: next_channel_outpoint,
channel_id: next_channel_id,
htlc_id,
};
Some(EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(release))
} else {
Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint: Some(next_channel_outpoint),
channel_id: next_channel_id,
counterparty_node_id: path.hops[0].pubkey,
})
};
self.pending_outbound_payments.claim_htlc(
payment_id,
payment_preimage,
bolt12_invoice,
session_priv,
path,
from_onchain,
&mut ev_completion_action,
&self.pending_events,
&WithContext::from(&self.logger, None, None, Some(payment_preimage.into())),
);
let have_action = if ev_completion_action.is_some() {
let pending_events = self.pending_events.lock().unwrap();
pending_events.iter().any(|(_, act)| *act == ev_completion_action)
} else {
false
};
if !have_action {
self.handle_post_event_actions(ev_completion_action);
}
},
HTLCSource::PreviousHopData(hop_data) => {
let prev_channel_id = hop_data.channel_id;
let prev_user_channel_id = hop_data.user_channel_id;
let prev_node_id = hop_data.counterparty_node_id;
let completed_blocker =
RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
let hold_time = hold_time_since(send_timestamp).unwrap_or(0);
let attribution_data = process_fulfill_attribution_data(
attribution_data,
&hop_data.incoming_packet_shared_secret,
hold_time,
);
#[cfg(test)]
let claiming_chan_funding_outpoint = hop_data.outpoint;
self.claim_funds_from_hop(
hop_data,
payment_preimage,
None,
Some(attribution_data),
|htlc_claim_value_msat, definitely_duplicate| {
let chan_to_release = Some(EventUnblockedChannel {
counterparty_node_id: next_channel_counterparty_node_id,
funding_txo: next_channel_outpoint,
channel_id: next_channel_id,
blocking_action: completed_blocker,
});
if definitely_duplicate && startup_replay {
#[cfg(test)]
{
let per_peer_state = self.per_peer_state.deadlocking_read();
let channel_closed = per_peer_state
.get(&next_channel_counterparty_node_id)
.map(|lck| lck.deadlocking_lock())
.map(|peer| !peer.channel_by_id.contains_key(&next_channel_id))
.unwrap_or(true);
let background_events =
self.pending_background_events.lock().unwrap();
let matching_bg_event =
background_events.iter().any(|ev| {
match ev {
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
funding_txo, update, ..
} => {
if *funding_txo == claiming_chan_funding_outpoint {
assert!(update.updates.iter().any(|upd|
if let ChannelMonitorUpdateStep::PaymentPreimage {
payment_preimage: update_preimage, ..
} = upd {
payment_preimage == *update_preimage
} else { false }
), "{:?}", update);
true
} else { false }
},
BackgroundEvent::MonitorUpdatesComplete {
channel_id, ..
} =>
*channel_id == prev_channel_id,
}
});
assert!(
channel_closed || matching_bg_event,
"{:?}",
*background_events
);
}
(None, None)
} else if definitely_duplicate {
if let Some(other_chan) = chan_to_release {
(Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
downstream_counterparty_node_id: other_chan.counterparty_node_id,
downstream_channel_id: other_chan.channel_id,
blocking_action: other_chan.blocking_action,
}), None)
} else {
(None, None)
}
} else {
let total_fee_earned_msat =
if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
if let Some(claimed_htlc_value) = htlc_claim_value_msat {
Some(claimed_htlc_value - forwarded_htlc_value)
} else {
None
}
} else {
None
};
debug_assert!(
skimmed_fee_msat <= total_fee_earned_msat,
"skimmed_fee_msat must always be included in total_fee_earned_msat"
);
(
Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
event: events::Event::PaymentForwarded {
prev_channel_id: Some(prev_channel_id),
next_channel_id: Some(next_channel_id),
prev_user_channel_id,
next_user_channel_id,
prev_node_id,
next_node_id: Some(next_channel_counterparty_node_id),
total_fee_earned_msat,
skimmed_fee_msat,
claim_from_onchain_tx: from_onchain,
outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
},
downstream_counterparty_and_funding_outpoint: chan_to_release,
}),
None,
)
}
},
);
},
}
}
pub fn get_our_node_id(&self) -> PublicKey {
self.our_network_pubkey
}
#[rustfmt::skip]
fn handle_monitor_update_completion_actions<I: IntoIterator<Item=MonitorUpdateCompletionAction>>(&self, actions: I) {
debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread);
debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
let mut freed_channels = Vec::new();
for action in actions.into_iter() {
match action {
MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim } => {
if let Some((counterparty_node_id, chan_id, claim_ptr)) = pending_mpp_claim {
let per_peer_state = self.per_peer_state.read().unwrap();
per_peer_state.get(&counterparty_node_id).map(|peer_state_mutex| {
let mut peer_state = peer_state_mutex.lock().unwrap();
let blockers_entry = peer_state.actions_blocking_raa_monitor_updates.entry(chan_id);
if let btree_map::Entry::Occupied(mut blockers) = blockers_entry {
blockers.get_mut().retain(|blocker|
if let &RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { pending_claim } = &blocker {
if *pending_claim == claim_ptr {
let mut pending_claim_state_lock = pending_claim.0.lock().unwrap();
let pending_claim_state = &mut *pending_claim_state_lock;
pending_claim_state.channels_without_preimage.retain(|(cp, cid)| {
let this_claim =
*cp == counterparty_node_id && *cid == chan_id;
if this_claim {
pending_claim_state.channels_with_preimage.push((*cp, *cid));
false
} else { true }
});
if pending_claim_state.channels_without_preimage.is_empty() {
for (cp, cid) in pending_claim_state.channels_with_preimage.iter() {
let freed_chan = (*cp, *cid, blocker.clone());
freed_channels.push(freed_chan);
}
}
!pending_claim_state.channels_without_preimage.is_empty()
} else { true }
} else { true }
);
if blockers.get().is_empty() {
blockers.remove();
}
}
});
}
let payment = self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
if let Some(ClaimingPayment {
amount_msat,
payment_purpose: purpose,
receiver_node_id,
htlcs,
sender_intended_value: sender_intended_total_msat,
onion_fields,
payment_id,
durable_preimage_channel,
}) = payment {
let event = events::Event::PaymentClaimed {
payment_hash,
purpose,
amount_msat,
receiver_node_id: Some(receiver_node_id),
htlcs,
sender_intended_total_msat,
onion_fields,
payment_id,
};
let action = if let Some((outpoint, counterparty_node_id, channel_id))
= durable_preimage_channel
{
Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint: Some(outpoint),
counterparty_node_id,
channel_id,
})
} else {
None
};
let event_action = (event, action);
let mut pending_events = self.pending_events.lock().unwrap();
if !pending_events.contains(&event_action) {
pending_events.push_back(event_action);
}
}
},
MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
event, downstream_counterparty_and_funding_outpoint
} => {
self.pending_events.lock().unwrap().push_back((event, None));
if let Some(unblocked) = downstream_counterparty_and_funding_outpoint {
self.handle_monitor_update_release(
unblocked.counterparty_node_id,
unblocked.channel_id,
Some(unblocked.blocking_action),
);
}
},
MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
downstream_counterparty_node_id, downstream_channel_id, blocking_action,
} => {
self.handle_monitor_update_release(
downstream_counterparty_node_id,
downstream_channel_id,
Some(blocking_action),
);
},
}
}
for (node_id, channel_id, blocker) in freed_channels {
self.handle_monitor_update_release(node_id, channel_id, Some(blocker));
}
}
#[rustfmt::skip]
fn handle_channel_resumption(&self, pending_msg_events: &mut Vec<MessageSendEvent>,
channel: &mut FundedChannel<SP>, raa: Option<msgs::RevokeAndACK>,
commitment_update: Option<msgs::CommitmentUpdate>, commitment_order: RAACommitmentOrder,
pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_update_adds: Vec<msgs::UpdateAddHTLC>,
funding_broadcastable: Option<Transaction>,
channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>,
tx_signatures: Option<msgs::TxSignatures>, tx_abort: Option<msgs::TxAbort>,
channel_ready_order: ChannelReadyOrder,
) -> (Option<(u64, PublicKey, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec<msgs::UpdateAddHTLC>)>) {
let logger = WithChannelContext::from(&self.logger, &channel.context, None);
log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort",
&channel.context.channel_id(),
if raa.is_some() { "an" } else { "no" },
if commitment_update.is_some() { "a" } else { "no" },
pending_forwards.len(), pending_update_adds.len(),
if funding_broadcastable.is_some() { "" } else { "not " },
if channel_ready.is_some() { "sending" } else { "without" },
if announcement_sigs.is_some() { "sending" } else { "without" },
if tx_signatures.is_some() { "sending" } else { "without" },
if tx_abort.is_some() { "sending" } else { "without" },
);
let counterparty_node_id = channel.context.get_counterparty_node_id();
let outbound_scid_alias = channel.context.outbound_scid_alias();
let mut htlc_forwards = None;
if !pending_forwards.is_empty() {
htlc_forwards = Some((
outbound_scid_alias, channel.context.get_counterparty_node_id(),
channel.funding.get_funding_txo().unwrap(), channel.context.channel_id(),
channel.context.get_user_id(), pending_forwards
));
}
let mut decode_update_add_htlcs = None;
if !pending_update_adds.is_empty() {
decode_update_add_htlcs = Some((outbound_scid_alias, pending_update_adds));
}
if channel.context.is_connected() {
if let ChannelReadyOrder::ChannelReadyFirst = channel_ready_order {
if let Some(msg) = &channel_ready {
send_channel_ready!(self, pending_msg_events, channel, msg.clone());
}
if let Some(msg) = &announcement_sigs {
pending_msg_events.push(MessageSendEvent::SendAnnouncementSignatures {
node_id: counterparty_node_id,
msg: msg.clone(),
});
}
}
macro_rules! handle_cs { () => {
if let Some(update) = commitment_update {
pending_msg_events.push(MessageSendEvent::UpdateHTLCs {
node_id: counterparty_node_id,
channel_id: channel.context.channel_id(),
updates: update,
});
}
} }
macro_rules! handle_raa { () => {
if let Some(revoke_and_ack) = raa {
pending_msg_events.push(MessageSendEvent::SendRevokeAndACK {
node_id: counterparty_node_id,
msg: revoke_and_ack,
});
}
} }
match commitment_order {
RAACommitmentOrder::CommitmentFirst => {
handle_cs!();
handle_raa!();
},
RAACommitmentOrder::RevokeAndACKFirst => {
handle_raa!();
handle_cs!();
},
}
if let Some(msg) = tx_signatures {
pending_msg_events.push(MessageSendEvent::SendTxSignatures {
node_id: counterparty_node_id,
msg,
});
}
if let Some(msg) = tx_abort {
pending_msg_events.push(MessageSendEvent::SendTxAbort {
node_id: counterparty_node_id,
msg,
});
}
if let ChannelReadyOrder::SignaturesFirst = channel_ready_order {
if let Some(msg) = channel_ready {
send_channel_ready!(self, pending_msg_events, channel, msg);
}
if let Some(msg) = announcement_sigs {
pending_msg_events.push(MessageSendEvent::SendAnnouncementSignatures {
node_id: counterparty_node_id,
msg,
});
}
}
} else if let Some(msg) = channel_ready {
send_channel_ready!(self, pending_msg_events, channel, msg);
}
if let Some(tx) = funding_broadcastable {
if channel.context.is_manual_broadcast() {
log_info!(logger, "Not broadcasting funding transaction with txid {} as it is manually managed", tx.compute_txid());
let mut pending_events = self.pending_events.lock().unwrap();
match channel.funding.get_funding_txo() {
Some(funding_txo) => {
emit_funding_tx_broadcast_safe_event!(pending_events, channel, funding_txo.into_bitcoin_outpoint())
},
None => {
debug_assert!(false, "Channel resumed without a funding txo, this should never happen!");
return (htlc_forwards, decode_update_add_htlcs);
}
};
} else {
log_info!(logger, "Broadcasting funding transaction with txid {}", tx.compute_txid());
self.tx_broadcaster.broadcast_transactions(&[&tx]);
}
}
if let Some(signing_session) = (!channel.is_awaiting_monitor_update())
.then(|| ())
.and_then(|_| channel.context.interactive_tx_signing_session.as_mut())
.filter(|signing_session| signing_session.has_received_commitment_signed())
.filter(|signing_session| signing_session.holder_tx_signatures().is_none())
{
if signing_session.has_local_contribution() {
let mut pending_events = self.pending_events.lock().unwrap();
let unsigned_transaction = signing_session.unsigned_tx().tx().clone();
let event_action = (
Event::FundingTransactionReadyForSigning {
unsigned_transaction,
counterparty_node_id,
channel_id: channel.context.channel_id(),
user_channel_id: channel.context.get_user_id(),
},
None,
);
if !pending_events.contains(&event_action) {
pending_events.push_back(event_action);
}
} else {
let txid = signing_session.unsigned_tx().compute_txid();
let best_block_height = self.best_block.read().unwrap().height;
match channel.funding_transaction_signed(txid, vec![], best_block_height, &self.logger) {
Ok(FundingTxSigned {
tx_signatures: Some(tx_signatures),
funding_tx,
splice_negotiated,
splice_locked,
}) => {
if let Some(funding_tx) = funding_tx {
self.broadcast_interactive_funding(channel, &funding_tx, &self.logger);
}
if let Some(splice_negotiated) = splice_negotiated {
self.pending_events.lock().unwrap().push_back((
events::Event::SplicePending {
channel_id: channel.context.channel_id(),
counterparty_node_id,
user_channel_id: channel.context.get_user_id(),
new_funding_txo: splice_negotiated.funding_txo,
channel_type: splice_negotiated.channel_type,
new_funding_redeem_script: splice_negotiated.funding_redeem_script,
},
None,
));
}
if channel.context.is_connected() {
pending_msg_events.push(MessageSendEvent::SendTxSignatures {
node_id: counterparty_node_id,
msg: tx_signatures,
});
if let Some(splice_locked) = splice_locked {
pending_msg_events.push(MessageSendEvent::SendSpliceLocked {
node_id: counterparty_node_id,
msg: splice_locked,
});
}
}
},
Ok(FundingTxSigned { tx_signatures: None, .. }) => {
debug_assert!(false, "If our tx_signatures is empty, then we should send it first!");
},
Err(err) => {
log_warn!(logger, "Failed signing interactive funding transaction: {err:?}");
},
}
}
}
{
let mut pending_events = self.pending_events.lock().unwrap();
emit_channel_pending_event!(pending_events, channel);
emit_initial_channel_ready_event!(pending_events, channel);
}
(htlc_forwards, decode_update_add_htlcs)
}
#[rustfmt::skip]
fn channel_monitor_updated(&self, channel_id: &ChannelId, highest_applied_update_id: Option<u64>, counterparty_node_id: &PublicKey) {
debug_assert!(self.total_consistency_lock.try_write().is_err());
let per_peer_state = self.per_peer_state.read().unwrap();
let mut peer_state_lock;
let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
if peer_state_mutex_opt.is_none() { return }
peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*channel_id), None);
let remaining_in_flight =
if let Some((_, pending)) = peer_state.in_flight_monitor_updates.get_mut(channel_id) {
if let Some(highest_applied_update_id) = highest_applied_update_id {
pending.retain(|upd| upd.update_id > highest_applied_update_id);
log_trace!(
logger,
"ChannelMonitor updated to {highest_applied_update_id}. {} pending in-flight updates.",
pending.len()
);
} else if let Some(update) = pending.get(0) {
log_trace!(
logger,
"ChannelMonitor updated to {}. {} pending in-flight updates.",
update.update_id - 1,
pending.len()
);
} else {
log_trace!(
logger,
"ChannelMonitor updated. {} pending in-flight updates.",
pending.len()
);
}
pending.len()
} else { 0 };
if remaining_in_flight != 0 {
return;
}
if let Some(chan) = peer_state.channel_by_id
.get_mut(channel_id)
.and_then(Channel::as_funded_mut)
{
if chan.is_awaiting_monitor_update() {
handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
} else {
log_trace!(logger, "Channel is open but not awaiting update");
}
} else {
let update_actions = peer_state.monitor_update_blocked_actions
.remove(channel_id).unwrap_or(Vec::new());
log_trace!(logger, "Channel is closed, applying {} post-update actions", update_actions.len());
mem::drop(peer_state_lock);
mem::drop(per_peer_state);
self.handle_monitor_update_completion_actions(update_actions);
}
}
pub fn accept_inbound_channel(
&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey,
user_channel_id: u128, config_overrides: Option<ChannelConfigOverrides>,
) -> Result<(), APIError> {
self.do_accept_inbound_channel(
temporary_channel_id,
counterparty_node_id,
false,
user_channel_id,
config_overrides,
)
}
pub fn accept_inbound_channel_from_trusted_peer_0conf(
&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey,
user_channel_id: u128, config_overrides: Option<ChannelConfigOverrides>,
) -> Result<(), APIError> {
self.do_accept_inbound_channel(
temporary_channel_id,
counterparty_node_id,
true,
user_channel_id,
config_overrides,
)
}
#[rustfmt::skip]
fn do_accept_inbound_channel(
&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool,
user_channel_id: u128, config_overrides: Option<ChannelConfigOverrides>
) -> Result<(), APIError> {
let mut config = self.config.read().unwrap().clone();
if let Some(overrides) = config_overrides {
config.apply(&overrides);
};
let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id), None);
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let peers_without_funded_channels =
self.peers_without_funded_channels(|peer| { peer.total_channel_count() > 0 });
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
let err_str = format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}");
log_error!(logger, "{}", err_str);
APIError::ChannelUnavailable { err: err_str }
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let is_only_peer_channel = peer_state.total_channel_count() == 1;
let res = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) {
Some(unaccepted_channel) => {
let best_block_height = self.best_block.read().unwrap().height;
match unaccepted_channel.open_channel_msg {
OpenChannelMessage::V1(open_channel_msg) => {
InboundV1Channel::new(
&self.fee_estimator, &self.entropy_source, &self.signer_provider, *counterparty_node_id,
&self.channel_type_features(), &peer_state.latest_features, &open_channel_msg,
user_channel_id, &config, best_block_height, &self.logger, accept_0conf
).map_err(|err| MsgHandleErrInternal::from_chan_no_close(err, *temporary_channel_id)
).map(|mut channel| {
let logger = WithChannelContext::from(&self.logger, &channel.context, None);
let message_send_event = channel.accept_inbound_channel(&&logger).map(|msg| {
MessageSendEvent::SendAcceptChannel {
node_id: *counterparty_node_id,
msg,
}
});
(*temporary_channel_id, Channel::from(channel), message_send_event)
})
},
OpenChannelMessage::V2(open_channel_msg) => {
PendingV2Channel::new_inbound(
&self.fee_estimator, &self.entropy_source, &self.signer_provider,
self.get_our_node_id(), *counterparty_node_id,
&self.channel_type_features(), &peer_state.latest_features,
&open_channel_msg,
user_channel_id, &config, best_block_height,
&self.logger,
).map_err(|e| {
let channel_id = open_channel_msg.common_fields.temporary_channel_id;
MsgHandleErrInternal::from_chan_no_close(e, channel_id)
}).map(|channel| {
let message_send_event = MessageSendEvent::SendAcceptChannelV2 {
node_id: channel.context.get_counterparty_node_id(),
msg: channel.accept_inbound_dual_funded_channel()
};
(channel.context.channel_id(), Channel::from(channel), Some(message_send_event))
})
},
}
},
None => {
let err_str = "No such channel awaiting to be accepted.".to_owned();
log_error!(logger, "{}", err_str);
return Err(APIError::APIMisuseError { err: err_str });
}
};
let (channel_id, mut channel, message_send_event) = match res {
Ok(res) => res,
Err(err) => {
mem::drop(peer_state_lock);
mem::drop(per_peer_state);
match handle_error!(self, Result::<(), MsgHandleErrInternal>::Err(err), *counterparty_node_id) {
Ok(_) => unreachable!("`handle_error` only returns Err as we've passed in an Err"),
Err(e) => {
return Err(APIError::ChannelUnavailable { err: e.err });
},
}
}
};
if accept_0conf {
debug_assert!(channel.minimum_depth().unwrap() == 0);
} else if channel.funding().get_channel_type().requires_zero_conf() {
let send_msg_err_event = MessageSendEvent::HandleError {
node_id: channel.context().get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage{
msg: msgs::ErrorMessage { channel_id: *temporary_channel_id, data: "No zero confirmation channels accepted".to_owned(), }
}
};
debug_assert!(peer_state.is_connected);
peer_state.pending_msg_events.push(send_msg_err_event);
let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
log_error!(logger, "{}", err_str);
return Err(APIError::APIMisuseError { err: err_str });
} else {
if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
let send_msg_err_event = MessageSendEvent::HandleError {
node_id: channel.context().get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage{
msg: msgs::ErrorMessage { channel_id: *temporary_channel_id, data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
}
};
debug_assert!(peer_state.is_connected);
peer_state.pending_msg_events.push(send_msg_err_event);
let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
log_error!(logger, "{}", err_str);
return Err(APIError::APIMisuseError { err: err_str });
}
}
let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
channel.context_mut().set_outbound_scid_alias(outbound_scid_alias);
if let Some(message_send_event) = message_send_event {
debug_assert!(peer_state.is_connected);
peer_state.pending_msg_events.push(message_send_event);
}
peer_state.channel_by_id.insert(channel_id, channel);
Ok(())
}
fn peers_without_funded_channels<Filter>(&self, maybe_count_peer: Filter) -> usize
where
Filter: Fn(&PeerState<SP>) -> bool,
{
let mut peers_without_funded_channels = 0;
let best_block_height = self.best_block.read().unwrap().height;
{
let peer_state_lock = self.per_peer_state.read().unwrap();
for (_, peer_mtx) in peer_state_lock.iter() {
let peer = peer_mtx.lock().unwrap();
if !maybe_count_peer(&*peer) {
continue;
}
let num_unfunded_channels = Self::unfunded_channel_count(&peer, best_block_height);
if num_unfunded_channels == peer.total_channel_count() {
peers_without_funded_channels += 1;
}
}
}
return peers_without_funded_channels;
}
#[rustfmt::skip]
fn unfunded_channel_count(
peer: &PeerState<SP>, best_block_height: u32
) -> usize {
let mut num_unfunded_channels = 0;
for (_, chan) in peer.channel_by_id.iter() {
match chan.as_funded() {
Some(funded_chan) => {
if !funded_chan.funding.is_outbound() && chan.minimum_depth().unwrap_or(1) != 0 &&
funded_chan.funding.get_funding_tx_confirmations(best_block_height) == 0
{
num_unfunded_channels += 1;
}
},
None => {
if chan.funding().is_outbound() {
continue;
}
if chan.minimum_depth().unwrap_or(1) == 0 {
continue;
}
if let Some(unfunded_chan) = chan.as_unfunded_v2() {
if unfunded_chan.funding_negotiation_context.our_funding_contribution > SignedAmount::ZERO {
continue;
}
}
num_unfunded_channels += 1;
},
}
}
num_unfunded_channels + peer.inbound_channel_request_by_id.len()
}
#[rustfmt::skip]
fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: OpenChannelMessageRef<'_>) -> Result<(), MsgHandleErrInternal> {
let common_fields = match msg {
OpenChannelMessageRef::V1(msg) => &msg.common_fields,
OpenChannelMessageRef::V2(msg) => &msg.common_fields,
};
if common_fields.chain_hash != self.chain_hash {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(),
common_fields.temporary_channel_id));
}
if !self.config.read().unwrap().accept_inbound_channels {
return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(),
common_fields.temporary_channel_id));
}
let channeled_peers_without_funding =
self.peers_without_funded_channels(|node| node.total_channel_count() > 0);
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(
format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"),
common_fields.temporary_channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if peer_state.total_channel_count() == 0 &&
channeled_peers_without_funding >= MAX_UNFUNDED_CHANNEL_PEERS &&
!self.config.read().unwrap().manually_accept_inbound_channels
{
return Err(MsgHandleErrInternal::send_err_msg_no_close(
"Have too many peers with unfunded channels, not accepting new ones".to_owned(),
common_fields.temporary_channel_id));
}
let best_block_height = self.best_block.read().unwrap().height;
if Self::unfunded_channel_count(peer_state, best_block_height) >= MAX_UNFUNDED_CHANS_PER_PEER {
return Err(MsgHandleErrInternal::send_err_msg_no_close(
format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER),
common_fields.temporary_channel_id));
}
let channel_id = common_fields.temporary_channel_id;
let channel_exists = peer_state.has_channel(&channel_id);
if channel_exists {
return Err(MsgHandleErrInternal::send_err_msg_no_close(
"temporary_channel_id collision for the same peer!".to_owned(),
common_fields.temporary_channel_id));
}
let channel_type = channel::channel_type_from_open_channel(
common_fields, &self.channel_type_features()
).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, common_fields.temporary_channel_id))?;
if self.config.read().unwrap().manually_accept_inbound_channels {
let mut pending_events = self.pending_events.lock().unwrap();
let is_announced = (common_fields.channel_flags & 1) == 1;
pending_events.push_back((events::Event::OpenChannelRequest {
temporary_channel_id: common_fields.temporary_channel_id,
counterparty_node_id: *counterparty_node_id,
funding_satoshis: common_fields.funding_satoshis,
channel_negotiation_type: match msg {
OpenChannelMessageRef::V1(msg) => InboundChannelFunds::PushMsat(msg.push_msat),
OpenChannelMessageRef::V2(_) => InboundChannelFunds::DualFunded,
},
channel_type,
is_announced,
params: common_fields.channel_parameters(),
}, None));
peer_state.inbound_channel_request_by_id.insert(channel_id, InboundChannelRequest {
open_channel_msg: match msg {
OpenChannelMessageRef::V1(msg) => OpenChannelMessage::V1(msg.clone()),
OpenChannelMessageRef::V2(msg) => OpenChannelMessage::V2(msg.clone()),
},
ticks_remaining: UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS,
});
return Ok(());
}
let mut random_bytes = [0u8; 16];
random_bytes.copy_from_slice(&self.entropy_source.get_secure_random_bytes()[..16]);
let user_channel_id = u128::from_be_bytes(random_bytes);
if channel_type.requires_zero_conf() {
return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), common_fields.temporary_channel_id));
}
if channel_type.requires_anchors_zero_fee_htlc_tx() || channel_type.requires_anchor_zero_fee_commitments() {
return Err(MsgHandleErrInternal::send_err_msg_no_close("No channels with anchor outputs accepted".to_owned(), common_fields.temporary_channel_id));
}
let (mut channel, message_send_event) = match msg {
OpenChannelMessageRef::V1(msg) => {
let mut channel = InboundV1Channel::new(
&self.fee_estimator, &self.entropy_source, &self.signer_provider, *counterparty_node_id,
&self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id,
&self.config.read().unwrap(), best_block_height, &self.logger, false
).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id))?;
let logger = WithChannelContext::from(&self.logger, &channel.context, None);
let message_send_event = channel.accept_inbound_channel(&&logger).map(|msg| {
MessageSendEvent::SendAcceptChannel {
node_id: *counterparty_node_id,
msg,
}
});
(Channel::from(channel), message_send_event)
},
OpenChannelMessageRef::V2(msg) => {
let channel = PendingV2Channel::new_inbound(
&self.fee_estimator, &self.entropy_source, &self.signer_provider,
self.get_our_node_id(), *counterparty_node_id, &self.channel_type_features(),
&peer_state.latest_features, msg, user_channel_id,
&self.config.read().unwrap(), best_block_height, &self.logger,
).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id))?;
let message_send_event = MessageSendEvent::SendAcceptChannelV2 {
node_id: *counterparty_node_id,
msg: channel.accept_inbound_dual_funded_channel(),
};
(Channel::from(channel), Some(message_send_event))
},
};
let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
channel.context_mut().set_outbound_scid_alias(outbound_scid_alias);
if let Some(message_send_event) = message_send_event {
peer_state.pending_msg_events.push(message_send_event);
}
peer_state.channel_by_id.insert(channel.context().channel_id(), channel);
Ok(())
}
#[rustfmt::skip]
fn internal_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
let (value, output_script, user_id) = {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.common_fields.temporary_channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.common_fields.temporary_channel_id) {
hash_map::Entry::Occupied(mut chan) => {
match chan.get_mut().as_unfunded_outbound_v1_mut() {
Some(unfunded_chan) => {
let res = unfunded_chan.accept_channel(
msg,
&self.config.read().unwrap().channel_handshake_limits,
&peer_state.latest_features,
);
try_channel_entry!(self, peer_state, res, chan);
(unfunded_chan.funding.get_value_satoshis(), unfunded_chan.funding.get_funding_redeemscript().to_p2wsh(), unfunded_chan.context.get_user_id())
},
None => {
return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id));
}
}
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id))
}
};
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push_back((events::Event::FundingGenerationReady {
temporary_channel_id: msg.common_fields.temporary_channel_id,
counterparty_node_id: *counterparty_node_id,
channel_value_satoshis: value,
output_script,
user_channel_id: user_id,
}, None));
Ok(())
}
#[rustfmt::skip]
fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
let best_block = *self.best_block.read().unwrap();
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.temporary_channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let (mut chan, funding_msg_opt, monitor) =
match peer_state.channel_by_id.remove(&msg.temporary_channel_id)
.map(Channel::into_unfunded_inbound_v1)
{
Some(Ok(inbound_chan)) => {
let logger = WithChannelContext::from(&self.logger, &inbound_chan.context, None);
match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) {
Ok(res) => res,
Err((inbound_chan, err)) => {
debug_assert!(matches!(err, ChannelError::Close(_)));
let mut chan = Channel::from(inbound_chan);
return Err(convert_channel_err!(self, peer_state, err, &mut chan).1);
},
}
},
Some(Err(mut chan)) => {
let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id);
let err = ChannelError::close(err_msg);
return Err(convert_channel_err!(self, peer_state, err, &mut chan).1);
},
None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
};
let funded_channel_id = chan.context.channel_id();
macro_rules! fail_chan { ($err: expr) => { {
let err = ChannelError::close($err.to_owned());
chan.unset_funding_info();
let mut chan = Channel::from(chan);
return Err(convert_channel_err!(self, peer_state, err, &mut chan, UNFUNDED_CHANNEL).1);
} } }
match peer_state.channel_by_id.entry(funded_channel_id) {
hash_map::Entry::Occupied(_) => {
fail_chan!("Already had channel with the new channel_id");
},
hash_map::Entry::Vacant(e) => {
let monitor_res = self.chain_monitor.watch_channel(monitor.channel_id(), monitor);
if let Ok(persist_state) = monitor_res {
if let Some(msg) = funding_msg_opt {
peer_state.pending_msg_events.push(MessageSendEvent::SendFundingSigned {
node_id: *counterparty_node_id,
msg,
});
}
if let Some(funded_chan) = e.insert(Channel::from(chan)).as_funded_mut() {
handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
per_peer_state, funded_chan, INITIAL_MONITOR);
} else {
unreachable!("This must be a funded channel as we just inserted it.");
}
Ok(())
} else {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_error!(logger, "Persisting initial ChannelMonitor failed, implying the channel ID was duplicated");
fail_chan!("Duplicate channel ID");
}
}
}
}
fn internal_peer_storage_retrieval(
&self, peer_node_id: PublicKey, msg: msgs::PeerStorageRetrieval,
) -> Result<(), MsgHandleErrInternal> {
let logger = WithContext::from(&self.logger, Some(peer_node_id), None, None);
let err = || {
MsgHandleErrInternal::from_chan_no_close(
ChannelError::Ignore("Invalid PeerStorageRetrieval message received.".into()),
ChannelId([0; 32]),
)
};
let encrypted_ops = match EncryptedOurPeerStorage::new(msg.data) {
Ok(encrypted_ops) => encrypted_ops,
Err(()) => {
log_debug!(logger, "Received a peer backup which wasn't long enough to be valid");
return Err(err());
},
};
let decrypted = match encrypted_ops.decrypt(&self.node_signer.get_peer_storage_key()) {
Ok(decrypted_ops) => decrypted_ops.into_vec(),
Err(()) => {
log_debug!(logger, "Received a peer backup which was corrupted");
return Err(err());
},
};
log_trace!(logger, "Got valid {}-byte peer backup from {}", decrypted.len(), peer_node_id);
let per_peer_state = self.per_peer_state.read().unwrap();
let mut cursor = io::Cursor::new(decrypted);
let mon_list = <Vec<PeerStorageMonitorHolder> as Readable>::read(&mut cursor)
.unwrap_or_else(|e| {
debug_assert!(false);
log_debug!(self.logger, "Unable to unpack the retrieved peer storage {:?}", e);
Vec::new()
});
for mon_holder in mon_list.iter() {
let peer_state_mutex = match per_peer_state.get(&mon_holder.counterparty_node_id) {
Some(mutex) => mutex,
None => {
log_debug!(
logger,
"Not able to find peer_state for the counterparty {}, channel_id {}",
log_pubkey!(mon_holder.counterparty_node_id),
mon_holder.channel_id
);
continue;
},
};
let peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &*peer_state_lock;
match peer_state.channel_by_id.get(&mon_holder.channel_id) {
Some(chan) => {
if let Some(funded_chan) = chan.as_funded() {
if funded_chan.get_revoked_counterparty_commitment_transaction_number()
> mon_holder.min_seen_secret
{
panic!(
"Lost channel state for channel {}.\n\
Received peer storage with a more recent state than what our node had.\n\
Use the FundRecoverer to initiate a force close and sweep the funds.",
&mon_holder.channel_id
);
}
}
},
None => {
log_debug!(logger, "Found an unknown channel {}", &mon_holder.channel_id);
},
}
}
Ok(())
}
#[rustfmt::skip]
fn internal_peer_storage(&self, counterparty_node_id: PublicKey, msg: msgs::PeerStorage) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), ChannelId([0; 32]))
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None);
if !peer_state.channel_by_id.values().any(|phase| phase.is_funded()) {
log_debug!(logger, "Ignoring peer storage request from {} as we don't have any funded channels with them.", log_pubkey!(counterparty_node_id));
return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(
"Ignoring peer_storage message, as peer storage is currently supported only for \
peers with an active funded channel.".into(),
), ChannelId([0; 32])));
}
#[cfg(not(test))]
if msg.data.len() > MAX_PEER_STORAGE_SIZE {
log_debug!(logger, "Sending warning to peer and ignoring peer storage request from {} as its over 1KiB", log_pubkey!(counterparty_node_id));
return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(
format!("Supports only data up to {} bytes in peer storage.", MAX_PEER_STORAGE_SIZE)
), ChannelId([0; 32])));
}
log_trace!(logger, "Received peer_storage from {}", log_pubkey!(counterparty_node_id));
peer_state.peer_storage = msg.data;
Ok(())
}
#[rustfmt::skip]
fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
let best_block = *self.best_block.read().unwrap();
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_entry) => {
let chan = chan_entry.get_mut();
match chan
.funding_signed(&msg, best_block, &self.signer_provider, &self.logger)
.and_then(|(funded_chan, monitor)| {
self.chain_monitor
.watch_channel(funded_chan.context.channel_id(), monitor)
.map_err(|()| {
funded_chan.unset_funding_info();
ChannelError::close("Channel ID was a duplicate".to_owned())
})
.map(|persist_status| (funded_chan, persist_status))
})
{
Ok((funded_chan, persist_status)) => {
handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, funded_chan, INITIAL_MONITOR);
Ok(())
},
Err(e) => try_channel_entry!(self, peer_state, Err(e), chan_entry),
}
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
}
fn internal_tx_msg<
HandleTxMsgFn: Fn(
&mut Channel<SP>,
) -> Result<InteractiveTxMessageSend, (ChannelError, Option<SpliceFundingFailed>)>,
>(
&self, counterparty_node_id: &PublicKey, channel_id: ChannelId,
tx_msg_handler: HandleTxMsgFn,
) -> Result<NotifyOption, MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(
format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"),
channel_id,
)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(channel_id) {
hash_map::Entry::Occupied(mut chan_entry) => {
let channel = chan_entry.get_mut();
match tx_msg_handler(channel) {
Ok(msg_send) => {
let msg_send_event = msg_send.into_msg_send_event(*counterparty_node_id);
peer_state.pending_msg_events.push(msg_send_event);
Ok(NotifyOption::SkipPersistHandleEvents)
},
Err((error, splice_funding_failed)) => {
if let Some(splice_funding_failed) = splice_funding_failed {
let pending_events = &mut self.pending_events.lock().unwrap();
pending_events.push_back((events::Event::SpliceFailed {
channel_id,
counterparty_node_id: *counterparty_node_id,
user_channel_id: channel.context().get_user_id(),
abandoned_funding_txo: splice_funding_failed.funding_txo,
channel_type: splice_funding_failed.channel_type.clone(),
contributed_inputs: splice_funding_failed.contributed_inputs,
contributed_outputs: splice_funding_failed.contributed_outputs,
}, None));
}
Err(MsgHandleErrInternal::from_chan_no_close(error, channel_id))
},
}
},
hash_map::Entry::Vacant(_) => {
Err(MsgHandleErrInternal::send_err_msg_no_close(format!(
"Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}",
counterparty_node_id), channel_id)
)
}
}
}
fn internal_tx_add_input(
&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddInput,
) -> Result<NotifyOption, MsgHandleErrInternal> {
self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel<SP>| {
channel.tx_add_input(msg, &self.logger)
})
}
fn internal_tx_add_output(
&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddOutput,
) -> Result<NotifyOption, MsgHandleErrInternal> {
self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel<SP>| {
channel.tx_add_output(msg, &self.logger)
})
}
fn internal_tx_remove_input(
&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveInput,
) -> Result<NotifyOption, MsgHandleErrInternal> {
self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel<SP>| {
channel.tx_remove_input(msg, &self.logger)
})
}
fn internal_tx_remove_output(
&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveOutput,
) -> Result<NotifyOption, MsgHandleErrInternal> {
self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel<SP>| {
channel.tx_remove_output(msg, &self.logger)
})
}
#[rustfmt::skip]
fn internal_tx_complete(&self, counterparty_node_id: PublicKey, msg: &msgs::TxComplete) -> Result<NotifyOption, MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(
format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"),
msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_entry) => {
let chan = chan_entry.get_mut();
match chan.tx_complete(msg, &self.logger) {
Ok((interactive_tx_msg_send, commitment_signed)) => {
let persist = if interactive_tx_msg_send.is_some() || commitment_signed.is_some() {
NotifyOption::SkipPersistHandleEvents
} else {
NotifyOption::SkipPersistNoEvents
};
if let Some(interactive_tx_msg_send) = interactive_tx_msg_send {
let msg_send_event = interactive_tx_msg_send.into_msg_send_event(counterparty_node_id);
peer_state.pending_msg_events.push(msg_send_event);
};
if let Some(commitment_signed) = commitment_signed {
peer_state.pending_msg_events.push(MessageSendEvent::UpdateHTLCs {
node_id: counterparty_node_id,
channel_id: msg.channel_id,
updates: CommitmentUpdate {
commitment_signed: vec![commitment_signed],
update_add_htlcs: vec![],
update_fulfill_htlcs: vec![],
update_fail_htlcs: vec![],
update_fail_malformed_htlcs: vec![],
update_fee: None,
},
});
}
Ok(persist)
},
Err((error, splice_funding_failed)) => {
if let Some(splice_funding_failed) = splice_funding_failed {
let pending_events = &mut self.pending_events.lock().unwrap();
pending_events.push_back((events::Event::SpliceFailed {
channel_id: msg.channel_id,
counterparty_node_id,
user_channel_id: chan.context().get_user_id(),
abandoned_funding_txo: splice_funding_failed.funding_txo,
channel_type: splice_funding_failed.channel_type.clone(),
contributed_inputs: splice_funding_failed.contributed_inputs,
contributed_outputs: splice_funding_failed.contributed_outputs,
}, None));
}
Err(MsgHandleErrInternal::from_chan_no_close(error, msg.channel_id))
},
}
},
hash_map::Entry::Vacant(_) => {
Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
}
}
#[rustfmt::skip]
fn internal_tx_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxSignatures)
-> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(
format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"),
msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_entry) => {
match chan_entry.get_mut().as_funded_mut() {
Some(chan) => {
let best_block_height = self.best_block.read().unwrap().height;
let FundingTxSigned {
tx_signatures,
funding_tx,
splice_negotiated,
splice_locked,
} = try_channel_entry!(
self,
peer_state,
chan.tx_signatures(msg, best_block_height, &self.logger),
chan_entry
);
if let Some(tx_signatures) = tx_signatures {
peer_state.pending_msg_events.push(MessageSendEvent::SendTxSignatures {
node_id: *counterparty_node_id,
msg: tx_signatures,
});
}
if let Some(splice_locked) = splice_locked {
peer_state.pending_msg_events.push(MessageSendEvent::SendSpliceLocked {
node_id: *counterparty_node_id,
msg: splice_locked,
});
}
if let Some(ref funding_tx) = funding_tx {
self.broadcast_interactive_funding(chan, funding_tx, &self.logger);
}
if let Some(splice_negotiated) = splice_negotiated {
self.pending_events.lock().unwrap().push_back((
events::Event::SplicePending {
channel_id: msg.channel_id,
counterparty_node_id: *counterparty_node_id,
user_channel_id: chan.context.get_user_id(),
new_funding_txo: splice_negotiated.funding_txo,
channel_type: splice_negotiated.channel_type,
new_funding_redeem_script: splice_negotiated.funding_redeem_script,
},
None,
));
}
},
None => {
let msg = "Got an unexpected tx_signatures message";
let reason = ClosureReason::ProcessingError { err: msg.to_owned() };
let err = ChannelError::Close((msg.to_owned(), reason));
try_channel_entry!(self, peer_state, Err(err), chan_entry)
},
}
Ok(())
},
hash_map::Entry::Vacant(_) => {
Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
}
}
#[rustfmt::skip]
fn internal_tx_abort(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAbort)
-> Result<NotifyOption, MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(
format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"),
msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_entry) => {
let res = chan_entry.get_mut().tx_abort(msg, &self.logger);
let (tx_abort, splice_failed) = try_channel_entry!(self, peer_state, res, chan_entry);
let persist = if tx_abort.is_some() || splice_failed.is_some() {
NotifyOption::DoPersist
} else {
NotifyOption::SkipPersistNoEvents
};
if let Some(tx_abort_msg) = tx_abort {
peer_state.pending_msg_events.push(MessageSendEvent::SendTxAbort {
node_id: *counterparty_node_id,
msg: tx_abort_msg,
});
}
if let Some(splice_funding_failed) = splice_failed {
let pending_events = &mut self.pending_events.lock().unwrap();
pending_events.push_back((events::Event::SpliceFailed {
channel_id: msg.channel_id,
counterparty_node_id: *counterparty_node_id,
user_channel_id: chan_entry.get().context().get_user_id(),
abandoned_funding_txo: splice_funding_failed.funding_txo,
channel_type: splice_funding_failed.channel_type,
contributed_inputs: splice_funding_failed.contributed_inputs,
contributed_outputs: splice_funding_failed.contributed_outputs,
}, None));
}
Ok(persist)
},
hash_map::Entry::Vacant(_) => {
Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
}
}
#[rustfmt::skip]
fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_entry) => {
if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let res = chan.channel_ready(
&msg,
&self.node_signer,
self.chain_hash,
&self.config.read().unwrap(),
&self.best_block.read().unwrap(),
&&logger
);
let announcement_sigs_opt =
try_channel_entry!(self, peer_state, res, chan_entry);
if let Some(announcement_sigs) = announcement_sigs_opt {
log_trace!(logger, "Sending announcement_signatures for channel {}", chan.context.channel_id());
peer_state.pending_msg_events.push(MessageSendEvent::SendAnnouncementSignatures {
node_id: counterparty_node_id.clone(),
msg: announcement_sigs,
});
} else if chan.context.is_usable() {
log_trace!(logger, "Sending private initial channel_update for our counterparty on channel {}", chan.context.channel_id());
if let Ok(msg) = self.get_channel_update_for_unicast(chan) {
peer_state.pending_msg_events.push(MessageSendEvent::SendChannelUpdate {
node_id: counterparty_node_id.clone(),
msg,
});
}
}
{
let mut pending_events = self.pending_events.lock().unwrap();
emit_initial_channel_ready_event!(pending_events, chan);
}
Ok(())
} else {
try_channel_entry!(self, peer_state, Err(ChannelError::close(
"Got a channel_ready message for an unfunded channel!".into())), chan_entry)
}
},
hash_map::Entry::Vacant(_) => {
Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
}
}
fn internal_shutdown(
&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown,
) -> Result<(), MsgHandleErrInternal> {
let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>;
{
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(
format!(
"Can't find a peer matching the passed counterparty node_id {}",
counterparty_node_id
),
msg.channel_id,
)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let hash_map::Entry::Occupied(mut chan_entry) =
peer_state.channel_by_id.entry(msg.channel_id.clone())
{
match chan_entry.get_mut().as_funded_mut() {
Some(chan) => {
if !chan.received_shutdown() {
let logger =
WithChannelContext::from(&self.logger, &chan.context, None);
log_info!(logger, "Received a shutdown message from our counterparty for channel {}{}.",
msg.channel_id,
if chan.sent_shutdown() { " after we initiated shutdown" } else { "" });
}
let funding_txo_opt = chan.funding.get_funding_txo();
let (shutdown, monitor_update_opt, htlcs) = try_channel_entry!(
self,
peer_state,
chan.shutdown(&self.signer_provider, &peer_state.latest_features, &msg),
chan_entry
);
dropped_htlcs = htlcs;
if let Some(msg) = shutdown {
peer_state.pending_msg_events.push(MessageSendEvent::SendShutdown {
node_id: *counterparty_node_id,
msg,
});
}
if let Some(monitor_update) = monitor_update_opt {
handle_new_monitor_update!(
self,
funding_txo_opt.unwrap(),
monitor_update,
peer_state_lock,
peer_state,
per_peer_state,
chan
);
}
},
None => {
let logger = WithChannelContext::from(
&self.logger,
chan_entry.get().context(),
None,
);
log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
let reason = ClosureReason::CounterpartyCoopClosedUnfundedChannel;
let err = ChannelError::Close((reason.to_string(), reason));
let mut chan = chan_entry.remove();
let (_, mut e) = convert_channel_err!(self, peer_state, err, &mut chan);
e.dont_send_error_message();
return Err(e);
},
}
} else {
return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id));
}
}
for htlc_source in dropped_htlcs.drain(..) {
let receiver = HTLCHandlingFailureType::Forward {
node_id: Some(counterparty_node_id.clone()),
channel_id: msg.channel_id,
};
let reason = HTLCFailReason::from_failure_code(LocalHTLCFailureReason::ChannelClosed);
let (source, hash) = htlc_source;
self.fail_htlc_backwards_internal(&source, &hash, &reason, receiver, None);
}
Ok(())
}
fn internal_closing_signed(
&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned,
) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(
format!(
"Can't find a peer matching the passed counterparty node_id {}",
counterparty_node_id
),
msg.channel_id,
)
})?;
let logger;
let tx_err: Option<(_, Result<Infallible, _>)> = {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
hash_map::Entry::Occupied(mut chan_entry) => {
if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
logger = WithChannelContext::from(&self.logger, &chan.context, None);
let res = chan.closing_signed(&self.fee_estimator, &msg, &&logger);
let (closing_signed, tx_shutdown_result) =
try_channel_entry!(self, peer_state, res, chan_entry);
debug_assert_eq!(tx_shutdown_result.is_some(), chan.is_shutdown());
if let Some(msg) = closing_signed {
peer_state.pending_msg_events.push(MessageSendEvent::SendClosingSigned {
node_id: counterparty_node_id.clone(),
msg,
});
}
if let Some((tx, close_res)) = tx_shutdown_result {
let err = convert_channel_err!(self, peer_state, close_res, chan, COOP_CLOSED);
chan_entry.remove();
Some((tx, Err(err)))
} else {
None
}
} else {
return try_channel_entry!(self, peer_state, Err(ChannelError::close(
"Got a closing_signed message for an unfunded channel!".into())), chan_entry);
}
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
mem::drop(per_peer_state);
if let Some((broadcast_tx, err)) = tx_err {
log_info!(logger, "Broadcasting {}", log_tx!(broadcast_tx));
self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
let _ = handle_error!(self, err, *counterparty_node_id);
}
Ok(())
}
#[cfg(simple_close)]
fn internal_closing_complete(
&self, _counterparty_node_id: PublicKey, _msg: msgs::ClosingComplete,
) -> Result<(), MsgHandleErrInternal> {
unimplemented!("Handling ClosingComplete is not implemented");
}
#[cfg(simple_close)]
fn internal_closing_sig(
&self, _counterparty_node_id: PublicKey, _msg: msgs::ClosingSig,
) -> Result<(), MsgHandleErrInternal> {
unimplemented!("Handling ClosingSig is not implemented");
}
#[rustfmt::skip]
fn internal_update_add_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_entry) => {
if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
try_channel_entry!(self, peer_state, chan.update_add_htlc(&msg, &self.fee_estimator), chan_entry);
} else {
return try_channel_entry!(self, peer_state, Err(ChannelError::close(
"Got an update_add_htlc message for an unfunded channel!".into())), chan_entry);
}
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
Ok(())
}
fn internal_update_fulfill_htlc(
&self, counterparty_node_id: &PublicKey, msg: msgs::UpdateFulfillHTLC,
) -> Result<(), MsgHandleErrInternal> {
let funding_txo;
let next_user_channel_id;
let (htlc_source, forwarded_htlc_value, skimmed_fee_msat, send_timestamp) = {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(
format!(
"Can't find a peer matching the passed counterparty node_id {}",
counterparty_node_id
),
msg.channel_id,
)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_entry) => {
if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
let res = try_channel_entry!(self, peer_state, chan.update_fulfill_htlc(&msg), chan_entry);
if let HTLCSource::PreviousHopData(prev_hop) = &res.0 {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger,
"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
msg.channel_id);
peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id)
.or_insert_with(Vec::new)
.push(RAAMonitorUpdateBlockingAction::from_prev_hop_data(&prev_hop));
}
funding_txo = chan.funding.get_funding_txo().expect("We won't accept a fulfill until funded");
next_user_channel_id = chan.context.get_user_id();
res
} else {
return try_channel_entry!(self, peer_state, Err(ChannelError::close(
"Got an update_fulfill_htlc message for an unfunded channel!".into())), chan_entry);
}
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
self.claim_funds_internal(
htlc_source,
msg.payment_preimage.clone(),
Some(forwarded_htlc_value),
skimmed_fee_msat,
false,
*counterparty_node_id,
funding_txo,
msg.channel_id,
Some(next_user_channel_id),
msg.attribution_data,
send_timestamp,
);
Ok(())
}
#[rustfmt::skip]
fn internal_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_entry) => {
if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
try_channel_entry!(self, peer_state, chan.update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan_entry);
} else {
return try_channel_entry!(self, peer_state, Err(ChannelError::close(
"Got an update_fail_htlc message for an unfunded channel!".into())), chan_entry);
}
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
Ok(())
}
#[rustfmt::skip]
fn internal_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_entry) => {
if (msg.failure_code & 0x8000) == 0 {
let chan_err = ChannelError::close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
try_channel_entry!(self, peer_state, Err(chan_err), chan_entry);
}
if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
try_channel_entry!(self, peer_state, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code.into(), msg.sha256_of_onion.to_vec())), chan_entry);
} else {
return try_channel_entry!(self, peer_state, Err(ChannelError::close(
"Got an update_fail_malformed_htlc message for an unfunded channel!".into())), chan_entry);
}
Ok(())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
}
#[rustfmt::skip]
fn internal_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> {
let best_block = *self.best_block.read().unwrap();
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_entry) => {
let chan = chan_entry.get_mut();
let logger = WithChannelContext::from(&self.logger, &chan.context(), None);
let funding_txo = chan.funding().get_funding_txo();
let (monitor_opt, monitor_update_opt) = try_channel_entry!(
self, peer_state, chan.commitment_signed(msg, best_block, &self.signer_provider, &self.fee_estimator, &&logger),
chan_entry);
if let Some(chan) = chan.as_funded_mut() {
if let Some(monitor) = monitor_opt {
let monitor_res = self.chain_monitor.watch_channel(monitor.channel_id(), monitor);
if let Ok(persist_state) = monitor_res {
handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
per_peer_state, chan, INITIAL_MONITOR);
} else {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_error!(logger, "Persisting initial ChannelMonitor failed, implying the channel ID was duplicated");
let msg = "Channel ID was a duplicate";
let reason = ClosureReason::ProcessingError { err: msg.to_owned() };
let err = ChannelError::Close((msg.to_owned(), reason));
try_channel_entry!(self, peer_state, Err(err), chan_entry)
}
} else if let Some(monitor_update) = monitor_update_opt {
handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
peer_state, per_peer_state, chan);
}
}
Ok(())
},
hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
}
#[rustfmt::skip]
fn internal_commitment_signed_batch(&self, counterparty_node_id: &PublicKey, channel_id: ChannelId, batch: Vec<msgs::CommitmentSigned>) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(channel_id) {
hash_map::Entry::Occupied(mut chan_entry) => {
let chan = chan_entry.get_mut();
let logger = WithChannelContext::from(&self.logger, &chan.context(), None);
let funding_txo = chan.funding().get_funding_txo();
if let Some(chan) = chan.as_funded_mut() {
let monitor_update_opt = try_channel_entry!(
self, peer_state, chan.commitment_signed_batch(batch, &self.fee_estimator, &&logger), chan_entry
);
if let Some(monitor_update) = monitor_update_opt {
handle_new_monitor_update!(
self, funding_txo.unwrap(), monitor_update, peer_state_lock, peer_state,
per_peer_state, chan
);
}
}
Ok(())
},
hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), channel_id))
}
}
fn push_decode_update_add_htlcs(&self, mut update_add_htlcs: (u64, Vec<msgs::UpdateAddHTLC>)) {
let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
let src_outbound_scid_alias = update_add_htlcs.0;
match decode_update_add_htlcs.entry(src_outbound_scid_alias) {
hash_map::Entry::Occupied(mut e) => {
e.get_mut().append(&mut update_add_htlcs.1);
},
hash_map::Entry::Vacant(e) => {
e.insert(update_add_htlcs.1);
},
}
}
#[inline]
fn forward_htlcs(&self, per_source_pending_forwards: &mut [PerSourcePendingForward]) {
for &mut (
prev_outbound_scid_alias,
prev_counterparty_node_id,
prev_funding_outpoint,
prev_channel_id,
prev_user_channel_id,
ref mut pending_forwards,
) in per_source_pending_forwards
{
let mut new_intercept_events = VecDeque::new();
let mut failed_intercept_forwards = Vec::new();
if !pending_forwards.is_empty() {
for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
let scid = match forward_info.routing {
PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
PendingHTLCRouting::TrampolineForward { .. } => 0,
PendingHTLCRouting::Receive { .. } => 0,
PendingHTLCRouting::ReceiveKeysend { .. } => 0,
};
let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid);
let payment_hash = forward_info.payment_hash;
let logger = WithContext::from(
&self.logger,
None,
Some(prev_channel_id),
Some(payment_hash),
);
let pending_add = PendingAddHTLCInfo {
prev_outbound_scid_alias,
prev_counterparty_node_id,
prev_funding_outpoint,
prev_channel_id,
prev_htlc_id,
prev_user_channel_id,
forward_info,
};
let mut fail_intercepted_htlc = |pending_add: PendingAddHTLCInfo| {
let htlc_source =
HTLCSource::PreviousHopData(pending_add.htlc_previous_hop_data());
let reason = HTLCFailReason::from_failure_code(
LocalHTLCFailureReason::UnknownNextPeer,
);
let failure_type = HTLCHandlingFailureType::InvalidForward {
requested_forward_scid: scid,
};
failed_intercept_forwards.push((
htlc_source,
payment_hash,
reason,
failure_type,
));
};
if pending_add.forward_info.routing.should_hold_htlc() {
let intercept_id = InterceptId::from_htlc_id_and_chan_id(
prev_htlc_id,
&prev_channel_id,
&prev_counterparty_node_id,
);
let mut held_htlcs = self.pending_intercepted_htlcs.lock().unwrap();
match held_htlcs.entry(intercept_id) {
hash_map::Entry::Vacant(entry) => {
log_trace!(
logger,
"Intercepted held HTLC with id {}, holding until the recipient is online",
intercept_id
);
entry.insert(pending_add);
},
hash_map::Entry::Occupied(_) => {
debug_assert!(false, "Should never have two HTLCs with the same channel id and htlc id");
fail_intercepted_htlc(pending_add);
},
}
} else if !is_our_scid
&& pending_add.forward_info.incoming_amt_msat.is_some()
&& fake_scid::is_valid_intercept(
&self.fake_scid_rand_bytes,
scid,
&self.chain_hash,
) {
let intercept_id = InterceptId::from_incoming_shared_secret(
&pending_add.forward_info.incoming_shared_secret,
);
let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
match pending_intercepts.entry(intercept_id) {
hash_map::Entry::Vacant(entry) => {
new_intercept_events.push_back((
events::Event::HTLCIntercepted {
requested_next_hop_scid: scid,
payment_hash,
inbound_amount_msat: pending_add
.forward_info
.incoming_amt_msat
.unwrap(),
expected_outbound_amount_msat: pending_add
.forward_info
.outgoing_amt_msat,
intercept_id,
},
None,
));
entry.insert(pending_add);
},
hash_map::Entry::Occupied(_) => {
log_info!(
logger,
"Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}",
scid
);
fail_intercepted_htlc(pending_add);
},
}
} else {
match self.forward_htlcs.lock().unwrap().entry(scid) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().push(HTLCForwardInfo::AddHTLC(pending_add));
},
hash_map::Entry::Vacant(entry) => {
entry.insert(vec![HTLCForwardInfo::AddHTLC(pending_add)]);
},
}
}
}
}
for (htlc_source, payment_hash, failure_reason, destination) in
failed_intercept_forwards.drain(..)
{
self.fail_htlc_backwards_internal(
&htlc_source,
&payment_hash,
&failure_reason,
destination,
None,
);
}
if !new_intercept_events.is_empty() {
let mut events = self.pending_events.lock().unwrap();
events.append(&mut new_intercept_events);
}
}
}
#[rustfmt::skip]
fn raa_monitor_updates_held(&self,
actions_blocking_raa_monitor_updates: &BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
channel_id: ChannelId, counterparty_node_id: PublicKey,
) -> bool {
actions_blocking_raa_monitor_updates
.get(&channel_id).map(|v| !v.is_empty()).unwrap_or(false)
|| self.pending_events.lock().unwrap().iter().any(|(_, action)| {
if let Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint: _,
channel_id: ev_channel_id,
counterparty_node_id: ev_counterparty_node_id
}) = action {
*ev_channel_id == channel_id && *ev_counterparty_node_id == counterparty_node_id
} else {
false
}
})
}
#[cfg(any(test, feature = "_test_utils"))]
pub(crate) fn test_raa_monitor_updates_held(
&self, counterparty_node_id: PublicKey, channel_id: ChannelId,
) -> bool {
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state_lck = peer_state_mtx.lock().unwrap();
let peer_state = &mut *peer_state_lck;
assert!(peer_state.channel_by_id.contains_key(&channel_id));
return self.raa_monitor_updates_held(
&peer_state.actions_blocking_raa_monitor_updates,
channel_id,
counterparty_node_id,
);
}
false
}
#[rustfmt::skip]
fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
let (htlcs_to_fail, static_invoices) = {
let per_peer_state = self.per_peer_state.read().unwrap();
let mut peer_state_lock = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id)
}).map(|mtx| mtx.lock().unwrap())?;
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_entry) => {
if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let funding_txo_opt = chan.funding.get_funding_txo();
let mon_update_blocked = self.raa_monitor_updates_held(
&peer_state.actions_blocking_raa_monitor_updates, msg.channel_id,
*counterparty_node_id);
let (htlcs_to_fail, static_invoices, monitor_update_opt) = try_channel_entry!(self, peer_state,
chan.revoke_and_ack(&msg, &self.fee_estimator, &&logger, mon_update_blocked), chan_entry);
if let Some(monitor_update) = monitor_update_opt {
let funding_txo = funding_txo_opt
.expect("Funding outpoint must have been set for RAA handling to succeed");
handle_new_monitor_update!(self, funding_txo, monitor_update,
peer_state_lock, peer_state, per_peer_state, chan);
}
(htlcs_to_fail, static_invoices)
} else {
return try_channel_entry!(self, peer_state, Err(ChannelError::close(
"Got a revoke_and_ack message for an unfunded channel!".into())), chan_entry);
}
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id);
for (static_invoice, reply_path) in static_invoices {
let res = self.flow.enqueue_held_htlc_available(&static_invoice, HeldHtlcReplyPath::ToCounterparty { path: reply_path });
debug_assert!(res.is_ok(), "enqueue_held_htlc_available can only fail for non-async senders");
}
Ok(())
}
#[rustfmt::skip]
fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_entry) => {
if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
try_channel_entry!(self, peer_state, chan.update_fee(&self.fee_estimator, &msg, &&logger), chan_entry);
} else {
return try_channel_entry!(self, peer_state, Err(ChannelError::close(
"Got an update_fee message for an unfunded channel!".into())), chan_entry);
}
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
Ok(())
}
#[rustfmt::skip]
fn internal_stfu(&self, counterparty_node_id: &PublicKey, msg: &msgs::Stfu) -> Result<bool, MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(
format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"),
msg.channel_id
)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if !self.init_features().supports_quiescence() {
return Err(MsgHandleErrInternal::from_chan_no_close(
ChannelError::Warn("Quiescense not supported".to_string()), msg.channel_id
));
}
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_entry) => {
if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
let logger = WithContext::from(
&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), None
);
let res = chan.stfu(&msg, &&logger);
let resp = try_channel_entry!(self, peer_state, res, chan_entry);
match resp {
None => Ok(false),
Some(StfuResponse::Stfu(msg)) => {
peer_state.pending_msg_events.push(MessageSendEvent::SendStfu {
node_id: *counterparty_node_id,
msg,
});
Ok(true)
},
Some(StfuResponse::SpliceInit(msg)) => {
peer_state.pending_msg_events.push(MessageSendEvent::SendSpliceInit {
node_id: *counterparty_node_id,
msg,
});
Ok(true)
},
}
} else {
let msg = "Peer sent `stfu` for an unfunded channel";
let err = Err(ChannelError::Close(
(msg.into(), ClosureReason::ProcessingError { err: msg.into() })
));
return try_channel_entry!(self, peer_state, err, chan_entry);
}
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(
format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id),
msg.channel_id
))
}
}
#[rustfmt::skip]
fn internal_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_entry) => {
if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
if !chan.context.is_usable() {
return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
}
let cur_height = self.best_block.read().unwrap().height;
let res = chan.announcement_signatures(
&self.node_signer,
self.chain_hash,
cur_height,
msg,
&self.config.read().unwrap(),
);
peer_state.pending_msg_events.push(MessageSendEvent::BroadcastChannelAnnouncement {
msg: try_channel_entry!(self, peer_state, res, chan_entry),
update_msg: Some(self.get_channel_update_for_broadcast(chan).unwrap()),
});
} else {
return try_channel_entry!(self, peer_state, Err(ChannelError::close(
"Got an announcement_signatures message for an unfunded channel!".into())), chan_entry);
}
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
Ok(())
}
#[rustfmt::skip]
fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
let (chan_counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&msg.contents.short_channel_id) {
Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
None => {
return Ok(NotifyOption::SkipPersistNoEvents)
}
};
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&chan_counterparty_node_id);
if peer_state_mutex_opt.is_none() {
return Ok(NotifyOption::SkipPersistNoEvents)
}
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(chan_id) {
hash_map::Entry::Occupied(mut chan_entry) => {
if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
if chan.context.get_counterparty_node_id() != *counterparty_node_id {
if chan.context.should_announce() {
return Ok(NotifyOption::SkipPersistNoEvents);
}
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
}
let were_node_one = self.get_our_node_id().serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
let msg_from_node_one = msg.contents.channel_flags & 1 == 0;
if were_node_one == msg_from_node_one {
return Ok(NotifyOption::SkipPersistNoEvents);
} else {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_debug!(logger, "Received channel_update {:?} for channel {}.", msg, chan_id);
let did_change = try_channel_entry!(self, peer_state, chan.channel_update(&msg), chan_entry);
if !did_change {
return Ok(NotifyOption::SkipPersistNoEvents);
}
}
} else {
return try_channel_entry!(self, peer_state, Err(ChannelError::close(
"Got a channel_update for an unfunded channel!".into())), chan_entry);
}
},
hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersistNoEvents)
}
Ok(NotifyOption::DoPersist)
}
#[rustfmt::skip]
fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
let (inferred_splice_locked, need_lnd_workaround) = {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(
format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"),
msg.channel_id
)
})?;
let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), None);
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_entry) => {
if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
let outbound_scid_alias = chan.context.outbound_scid_alias();
let res = chan.channel_reestablish(
msg,
&&logger,
&self.node_signer,
self.chain_hash,
&self.config.read().unwrap(),
&*self.best_block.read().unwrap(),
|htlc_id| self.path_for_release_held_htlc(htlc_id, outbound_scid_alias, &msg.channel_id, counterparty_node_id)
);
let responses = try_channel_entry!(self, peer_state, res, chan_entry);
let mut channel_update = None;
if let Some(msg) = responses.shutdown_msg {
peer_state.pending_msg_events.push(MessageSendEvent::SendShutdown {
node_id: counterparty_node_id.clone(),
msg,
});
} else if chan.context.is_usable() {
if let Ok(msg) = self.get_channel_update_for_unicast(chan) {
channel_update = Some(MessageSendEvent::SendChannelUpdate {
node_id: chan.context.get_counterparty_node_id(),
msg,
});
}
}
let need_lnd_workaround = chan.context.workaround_lnd_bug_4006.take();
let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption(
&mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.commitment_order,
Vec::new(), Vec::new(), None, responses.channel_ready, responses.announcement_sigs,
responses.tx_signatures, responses.tx_abort, responses.channel_ready_order,
);
debug_assert!(htlc_forwards.is_none());
debug_assert!(decode_update_add_htlcs.is_none());
if let Some(upd) = channel_update {
peer_state.pending_msg_events.push(upd);
}
(responses.inferred_splice_locked, need_lnd_workaround)
} else {
return try_channel_entry!(self, peer_state, Err(ChannelError::close(
"Got a channel_reestablish message for an unfunded channel!".into())), chan_entry);
}
},
hash_map::Entry::Vacant(_) => {
log_debug!(logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure",
msg.channel_id);
peer_state.pending_msg_events.push(MessageSendEvent::SendChannelReestablish {
node_id: *counterparty_node_id,
msg: msgs::ChannelReestablish {
channel_id: msg.channel_id,
next_local_commitment_number: 0,
next_remote_commitment_number: 0,
your_last_per_commitment_secret: [1u8; 32],
my_current_per_commitment_point: PublicKey::from_slice(&[2u8; 33]).unwrap(),
next_funding: None,
my_current_funding_locked: None,
},
});
return Err(MsgHandleErrInternal::send_err_msg_no_close(
format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}",
counterparty_node_id), msg.channel_id)
)
}
}
};
if let Some(channel_ready_msg) = need_lnd_workaround {
self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
}
if let Some(splice_locked) = inferred_splice_locked {
self.internal_splice_locked(counterparty_node_id, &splice_locked)?;
}
Ok(())
}
#[rustfmt::skip]
fn internal_splice_init(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceInit) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let our_funding_contribution = 0i64;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!(
"Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}, channel_id {}",
counterparty_node_id, msg.channel_id,
), msg.channel_id)),
hash_map::Entry::Occupied(mut chan_entry) => {
if self.config.read().unwrap().reject_inbound_splices {
let err = ChannelError::WarnAndDisconnect(
"Inbound channel splices are currently not allowed".to_owned()
);
return Err(MsgHandleErrInternal::from_chan_no_close(err, msg.channel_id));
}
if let Some(ref mut funded_channel) = chan_entry.get_mut().as_funded_mut() {
let init_res = funded_channel.splice_init(
msg, our_funding_contribution, &self.signer_provider, &self.entropy_source,
&self.get_our_node_id(), &self.logger
);
let splice_ack_msg = try_channel_entry!(self, peer_state, init_res, chan_entry);
peer_state.pending_msg_events.push(MessageSendEvent::SendSpliceAck {
node_id: *counterparty_node_id,
msg: splice_ack_msg,
});
Ok(())
} else {
try_channel_entry!(self, peer_state, Err(ChannelError::close("Channel is not funded, cannot be spliced".into())), chan_entry)
}
},
}
}
#[rustfmt::skip]
fn internal_splice_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceAck) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!(
"Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}",
counterparty_node_id
), msg.channel_id)),
hash_map::Entry::Occupied(mut chan_entry) => {
if let Some(ref mut funded_channel) = chan_entry.get_mut().as_funded_mut() {
let splice_ack_res = funded_channel.splice_ack(
msg, &self.signer_provider, &self.entropy_source,
&self.get_our_node_id(), &self.logger
);
let tx_msg_opt = try_channel_entry!(self, peer_state, splice_ack_res, chan_entry);
if let Some(tx_msg) = tx_msg_opt {
peer_state.pending_msg_events.push(tx_msg.into_msg_send_event(counterparty_node_id.clone()));
}
Ok(())
} else {
try_channel_entry!(self, peer_state, Err(ChannelError::close("Channel is not funded, cannot be spliced".into())), chan_entry)
}
},
}
}
fn internal_splice_locked(
&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceLocked,
) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(
format!(
"Can't find a peer matching the passed counterparty node_id {}",
counterparty_node_id
),
msg.channel_id,
)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Vacant(_) => {
let err = format!(
"Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}",
counterparty_node_id,
);
return Err(MsgHandleErrInternal::send_err_msg_no_close(err, msg.channel_id));
},
hash_map::Entry::Occupied(mut chan_entry) => {
if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let result = chan.splice_locked(
msg,
&self.node_signer,
self.chain_hash,
&self.config.read().unwrap(),
self.best_block.read().unwrap().height,
&&logger,
);
let splice_promotion = try_channel_entry!(self, peer_state, result, chan_entry);
if let Some(splice_promotion) = splice_promotion {
{
let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
insert_short_channel_id!(short_to_chan_info, chan);
}
{
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push_back((
events::Event::ChannelReady {
channel_id: chan.context.channel_id(),
user_channel_id: chan.context.get_user_id(),
counterparty_node_id: chan.context.get_counterparty_node_id(),
funding_txo: Some(
splice_promotion.funding_txo.into_bitcoin_outpoint(),
),
channel_type: chan.funding.get_channel_type().clone(),
},
None,
));
splice_promotion.discarded_funding.into_iter().for_each(
|funding_info| {
let event = Event::DiscardFunding {
channel_id: chan.context.channel_id(),
funding_info,
};
pending_events.push_back((event, None));
},
);
}
if let Some(announcement_sigs) = splice_promotion.announcement_sigs {
log_trace!(
logger,
"Sending announcement_signatures for channel {}",
chan.context.channel_id()
);
peer_state.pending_msg_events.push(
MessageSendEvent::SendAnnouncementSignatures {
node_id: counterparty_node_id.clone(),
msg: announcement_sigs,
},
);
}
if let Some(monitor_update) = splice_promotion.monitor_update {
handle_new_monitor_update!(
self,
splice_promotion.funding_txo,
monitor_update,
peer_state_lock,
peer_state,
per_peer_state,
chan
);
}
}
} else {
return Err(MsgHandleErrInternal::send_err_msg_no_close(
"Channel is not funded, cannot splice".to_owned(),
msg.channel_id,
));
}
},
};
Ok(())
}
fn process_pending_monitor_events(&self) -> bool {
debug_assert!(self.total_consistency_lock.try_write().is_err());
let mut failed_channels: Vec<(Result<Infallible, _>, _)> = Vec::new();
let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
let has_pending_monitor_events = !pending_monitor_events.is_empty();
for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in
pending_monitor_events.drain(..)
{
for monitor_event in monitor_events.drain(..) {
match monitor_event {
MonitorEvent::HTLCEvent(htlc_update) => {
let logger = WithContext::from(
&self.logger,
Some(counterparty_node_id),
Some(channel_id),
Some(htlc_update.payment_hash),
);
if let Some(preimage) = htlc_update.payment_preimage {
log_trace!(
logger,
"Claiming HTLC with preimage {} from our monitor",
preimage
);
self.claim_funds_internal(
htlc_update.source,
preimage,
htlc_update.htlc_value_satoshis.map(|v| v * 1000),
None,
true,
counterparty_node_id,
funding_outpoint,
channel_id,
None,
None,
None,
);
} else {
log_trace!(
logger,
"Failing HTLC with hash {} from our monitor",
&htlc_update.payment_hash
);
let failure_reason = LocalHTLCFailureReason::OnChainTimeout;
let receiver = HTLCHandlingFailureType::Forward {
node_id: Some(counterparty_node_id),
channel_id,
};
let reason = HTLCFailReason::from_failure_code(failure_reason);
let completion_update = Some(PaymentCompleteUpdate {
counterparty_node_id,
channel_funding_outpoint: funding_outpoint,
channel_id,
htlc_id: SentHTLCId::from_source(&htlc_update.source),
});
self.fail_htlc_backwards_internal(
&htlc_update.source,
&htlc_update.payment_hash,
&reason,
receiver,
completion_update,
);
}
},
MonitorEvent::HolderForceClosed(_)
| MonitorEvent::HolderForceClosedWithInfo { .. } => {
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let hash_map::Entry::Occupied(chan_entry) =
peer_state.channel_by_id.entry(channel_id)
{
let reason = if let MonitorEvent::HolderForceClosedWithInfo {
reason,
..
} = monitor_event
{
reason
} else {
ClosureReason::HolderForceClosed {
broadcasted_latest_txn: Some(true),
message: "Legacy ChannelMonitor closure".to_owned(),
}
};
let err = ChannelError::Close((reason.to_string(), reason));
let mut chan = chan_entry.remove();
let (_, e) = convert_channel_err!(self, peer_state, err, &mut chan);
failed_channels.push((Err(e), counterparty_node_id));
}
}
},
MonitorEvent::CommitmentTxConfirmed(_) => {
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let hash_map::Entry::Occupied(chan_entry) =
peer_state.channel_by_id.entry(channel_id)
{
let reason = ClosureReason::CommitmentTxConfirmed;
let err = ChannelError::Close((reason.to_string(), reason));
let mut chan = chan_entry.remove();
let (_, e) = convert_channel_err!(self, peer_state, err, &mut chan);
failed_channels.push((Err(e), counterparty_node_id));
}
}
},
MonitorEvent::Completed { channel_id, monitor_update_id, .. } => {
self.channel_monitor_updated(
&channel_id,
Some(monitor_update_id),
&counterparty_node_id,
);
},
}
}
}
for (err, counterparty_node_id) in failed_channels {
let _ = handle_error!(self, err, counterparty_node_id);
}
has_pending_monitor_events
}
fn check_free_holding_cells(&self) -> bool {
let mut has_monitor_update = false;
let mut failed_htlcs = Vec::new();
'peer_loop: loop {
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
'chan_loop: loop {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
for (channel_id, chan) in
peer_state.channel_by_id.iter_mut().filter_map(|(chan_id, chan)| {
chan.as_funded_mut().map(|chan| (chan_id, chan))
}) {
let counterparty_node_id = chan.context.get_counterparty_node_id();
let funding_txo = chan.funding.get_funding_txo();
let (monitor_opt, holding_cell_failed_htlcs) = chan
.maybe_free_holding_cell_htlcs(
&self.fee_estimator,
&&WithChannelContext::from(&self.logger, &chan.context, None),
);
if !holding_cell_failed_htlcs.is_empty() {
failed_htlcs.push((
holding_cell_failed_htlcs,
*channel_id,
counterparty_node_id,
));
}
if let Some(monitor_update) = monitor_opt {
has_monitor_update = true;
handle_new_monitor_update!(
self,
funding_txo.unwrap(),
monitor_update,
peer_state_lock,
peer_state,
per_peer_state,
chan
);
continue 'peer_loop;
}
}
break 'chan_loop;
}
}
break 'peer_loop;
}
let has_update = has_monitor_update || !failed_htlcs.is_empty();
for (failures, channel_id, counterparty_node_id) in failed_htlcs.drain(..) {
self.fail_holding_cell_htlcs(failures, channel_id, &counterparty_node_id);
}
has_update
}
#[rustfmt::skip]
pub fn signer_unblocked(&self, channel_opt: Option<(PublicKey, ChannelId)>) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let unblock_chan = |chan: &mut Channel<SP>, pending_msg_events: &mut Vec<MessageSendEvent>| -> Option<ShutdownResult> {
let channel_id = chan.context().channel_id();
let outbound_scid_alias = chan.context().outbound_scid_alias();
let logger = WithChannelContext::from(&self.logger, &chan.context(), None);
let node_id = chan.context().get_counterparty_node_id();
if let Some(msgs) = chan.signer_maybe_unblocked(
self.chain_hash, &&logger,
|htlc_id| self.path_for_release_held_htlc(htlc_id, outbound_scid_alias, &channel_id, &node_id)
) {
if chan.context().is_connected() {
if let Some(msg) = msgs.open_channel {
pending_msg_events.push(MessageSendEvent::SendOpenChannel {
node_id,
msg,
});
}
if let Some(msg) = msgs.funding_created {
pending_msg_events.push(MessageSendEvent::SendFundingCreated {
node_id,
msg,
});
}
if let Some(msg) = msgs.accept_channel {
pending_msg_events.push(MessageSendEvent::SendAcceptChannel {
node_id,
msg,
});
}
let cu_msg = msgs.commitment_update.map(|updates| MessageSendEvent::UpdateHTLCs {
node_id,
channel_id,
updates,
});
let raa_msg = msgs.revoke_and_ack.map(|msg| MessageSendEvent::SendRevokeAndACK {
node_id,
msg,
});
match (cu_msg, raa_msg) {
(Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::CommitmentFirst => {
pending_msg_events.push(cu);
pending_msg_events.push(raa);
},
(Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::RevokeAndACKFirst => {
pending_msg_events.push(raa);
pending_msg_events.push(cu);
},
(Some(cu), _) => pending_msg_events.push(cu),
(_, Some(raa)) => pending_msg_events.push(raa),
(_, _) => {},
}
if let Some(msg) = msgs.funding_signed {
pending_msg_events.push(MessageSendEvent::SendFundingSigned {
node_id,
msg,
});
}
if let Some(msg) = msgs.closing_signed {
pending_msg_events.push(MessageSendEvent::SendClosingSigned {
node_id,
msg,
});
}
}
if let Some(funded_chan) = chan.as_funded() {
if let Some(msg) = msgs.channel_ready {
send_channel_ready!(self, pending_msg_events, funded_chan, msg);
}
if let Some(broadcast_tx) = msgs.signed_closing_tx {
log_info!(logger, "Broadcasting closing tx {}", log_tx!(broadcast_tx));
self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
}
} else {
debug_assert!(msgs.channel_ready.is_none());
debug_assert!(msgs.signed_closing_tx.is_none());
}
msgs.shutdown_result
} else {
None
}
};
let mut shutdown_results: Vec<(Result<Infallible, _>, _)> = Vec::new();
let per_peer_state = self.per_peer_state.read().unwrap();
let per_peer_state_iter = per_peer_state.iter().filter(|(cp_id, _)| {
if let Some((counterparty_node_id, _)) = channel_opt {
**cp_id == counterparty_node_id
} else { true }
});
for (cp_id, peer_state_mutex) in per_peer_state_iter {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
peer_state.channel_by_id.retain(|_, chan| {
let shutdown_result = match channel_opt {
Some((_, channel_id)) if chan.context().channel_id() != channel_id => None,
_ => unblock_chan(chan, &mut peer_state.pending_msg_events),
};
if let Some(shutdown) = shutdown_result {
let context = chan.context();
let logger = WithChannelContext::from(&self.logger, context, None);
let chan_id = context.channel_id();
log_trace!(logger, "Removing channel {} now that the signer is unblocked", chan_id);
let (remove, err) = if let Some(funded) = chan.as_funded_mut() {
let err =
convert_channel_err!(self, peer_state, shutdown, funded, COOP_CLOSED);
(true, err)
} else {
debug_assert!(false);
let reason = shutdown.closure_reason.clone();
let err = ChannelError::Close((reason.to_string(), reason));
convert_channel_err!(self, peer_state, err, chan, UNFUNDED_CHANNEL)
};
debug_assert!(remove);
shutdown_results.push((Err(err), *cp_id));
false
} else {
true
}
});
}
drop(per_peer_state);
for (err, counterparty_node_id) in shutdown_results {
let _ = handle_error!(self, err, counterparty_node_id);
}
}
#[rustfmt::skip]
fn maybe_generate_initial_closing_signed(&self) -> bool {
let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new();
let mut has_update = false;
{
let per_peer_state = self.per_peer_state.read().unwrap();
for (cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
peer_state.channel_by_id.retain(|_, chan| {
if !chan.context().is_connected() {
return true;
}
match chan.as_funded_mut() {
Some(funded_chan) => {
let logger = WithChannelContext::from(&self.logger, &funded_chan.context, None);
match funded_chan.maybe_propose_closing_signed(&self.fee_estimator, &&logger) {
Ok((msg_opt, tx_shutdown_result_opt)) => {
if let Some(msg) = msg_opt {
has_update = true;
pending_msg_events.push(MessageSendEvent::SendClosingSigned {
node_id: funded_chan.context.get_counterparty_node_id(), msg,
});
}
debug_assert_eq!(tx_shutdown_result_opt.is_some(), funded_chan.is_shutdown());
if let Some((tx, shutdown_res)) = tx_shutdown_result_opt {
let err = convert_channel_err!(self, peer_state, shutdown_res, funded_chan, COOP_CLOSED);
handle_errors.push((*cp_id, Err(err)));
log_info!(logger, "Broadcasting {}", log_tx!(tx));
self.tx_broadcaster.broadcast_transactions(&[&tx]);
false
} else { true }
},
Err(e) => {
has_update = true;
let (close_channel, res) = convert_channel_err!(self, peer_state, e, funded_chan, FUNDED_CHANNEL);
handle_errors.push((funded_chan.context.get_counterparty_node_id(), Err(res)));
!close_channel
}
}
},
None => true, }
});
}
}
for (counterparty_node_id, err) in handle_errors {
let _ = handle_error!(self, err, counterparty_node_id);
}
has_update
}
#[rustfmt::skip]
fn maybe_send_stfu(&self) {
let per_peer_state = self.per_peer_state.read().unwrap();
for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
for (channel_id, chan) in &mut peer_state.channel_by_id {
if let Some(funded_chan) = chan.as_funded_mut() {
let logger = WithContext::from(
&self.logger, Some(*counterparty_node_id), Some(*channel_id), None
);
match funded_chan.try_send_stfu(&&logger) {
Ok(None) => {},
Ok(Some(stfu)) => {
pending_msg_events.push(MessageSendEvent::SendStfu {
node_id: chan.context().get_counterparty_node_id(),
msg: stfu,
});
},
Err(e) => {
log_debug!(logger, "Could not advance quiescence handshake: {}", e);
}
}
}
}
}
}
#[cfg(any(test, fuzzing))]
#[rustfmt::skip]
pub fn maybe_propose_quiescence(&self, counterparty_node_id: &PublicKey, channel_id: &ChannelId) -> Result<(), APIError> {
let mut result = Ok(());
PersistenceNotifierGuard::optionally_notify(self, || {
let mut notify = NotifyOption::SkipPersistNoEvents;
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
if peer_state_mutex_opt.is_none() {
result = Err(APIError::ChannelUnavailable {
err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}")
});
return notify;
}
let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
if !peer_state.latest_features.supports_quiescence() {
result = Err(APIError::ChannelUnavailable { err: "Peer does not support quiescence".to_owned() });
return notify;
}
match peer_state.channel_by_id.entry(channel_id.clone()) {
hash_map::Entry::Occupied(mut chan_entry) => {
if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
let logger = WithContext::from(
&self.logger, Some(*counterparty_node_id), Some(*channel_id), None
);
match chan.propose_quiescence(&&logger, QuiescentAction::DoNothing) {
Ok(None) => {},
Ok(Some(stfu)) => {
peer_state.pending_msg_events.push(MessageSendEvent::SendStfu {
node_id: *counterparty_node_id, msg: stfu
});
notify = NotifyOption::SkipPersistHandleEvents;
},
Err(msg) => log_trace!(logger, "{}", msg),
}
} else {
result = Err(APIError::APIMisuseError {
err: format!("Unfunded channel {} cannot be quiescent", channel_id),
});
}
},
hash_map::Entry::Vacant(_) => {
result = Err(APIError::ChannelUnavailable {
err: format!("Channel with id {} not found for the passed counterparty node_id {}",
channel_id, counterparty_node_id),
});
},
}
notify
});
result
}
#[cfg(any(test, fuzzing))]
#[rustfmt::skip]
pub fn exit_quiescence(&self, counterparty_node_id: &PublicKey, channel_id: &ChannelId) -> Result<bool, APIError> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| APIError::ChannelUnavailable {
err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}")
})?;
let mut peer_state = peer_state_mutex.lock().unwrap();
let initiator = match peer_state.channel_by_id.entry(*channel_id) {
hash_map::Entry::Occupied(mut chan_entry) => {
if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
chan.exit_quiescence()
} else {
return Err(APIError::APIMisuseError {
err: format!("Unfunded channel {} cannot be quiescent", channel_id),
})
}
},
hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable {
err: format!("Channel with id {} not found for the passed counterparty node_id {}",
channel_id, counterparty_node_id),
}),
};
Ok(initiator)
}
#[rustfmt::skip]
pub fn create_bolt11_invoice(
&self, params: Bolt11InvoiceParameters,
) -> Result<Bolt11Invoice, SignOrCreationError<()>> {
let Bolt11InvoiceParameters {
amount_msats, description, invoice_expiry_delta_secs, min_final_cltv_expiry_delta,
payment_hash,
} = params;
let currency =
Network::from_chain_hash(self.chain_hash).map(Into::into).unwrap_or(Currency::Bitcoin);
#[cfg(feature = "std")]
let duration_since_epoch = {
use std::time::SystemTime;
SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
.expect("SystemTime::now() should be after SystemTime::UNIX_EPOCH")
};
#[cfg(not(feature = "std"))]
let duration_since_epoch =
Duration::from_secs(self.highest_seen_timestamp.load(Ordering::Acquire) as u64);
if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta {
if min_final_cltv_expiry_delta.saturating_add(3) < MIN_FINAL_CLTV_EXPIRY_DELTA {
return Err(SignOrCreationError::CreationError(CreationError::MinFinalCltvExpiryDeltaTooShort));
}
}
let (payment_hash, payment_secret) = match payment_hash {
Some(payment_hash) => {
let payment_secret = self
.create_inbound_payment_for_hash(
payment_hash, amount_msats,
invoice_expiry_delta_secs.unwrap_or(DEFAULT_EXPIRY_TIME as u32),
min_final_cltv_expiry_delta,
)
.map_err(|()| SignOrCreationError::CreationError(CreationError::InvalidAmount))?;
(payment_hash, payment_secret)
},
None => {
self
.create_inbound_payment(
amount_msats, invoice_expiry_delta_secs.unwrap_or(DEFAULT_EXPIRY_TIME as u32),
min_final_cltv_expiry_delta,
)
.map_err(|()| SignOrCreationError::CreationError(CreationError::InvalidAmount))?
},
};
log_trace!(self.logger, "Creating invoice with payment hash {}", &payment_hash);
let invoice = Bolt11InvoiceBuilder::new(currency);
let invoice = match description {
Bolt11InvoiceDescription::Direct(description) => invoice.description(description.into_inner().0),
Bolt11InvoiceDescription::Hash(hash) => invoice.description_hash(hash.0),
};
let mut invoice = invoice
.duration_since_epoch(duration_since_epoch)
.payee_pub_key(self.get_our_node_id())
.payment_hash(Hash::from_slice(&payment_hash.0).unwrap())
.payment_secret(payment_secret)
.basic_mpp()
.min_final_cltv_expiry_delta(
min_final_cltv_expiry_delta.map(|x| x.saturating_add(3)).unwrap_or(MIN_FINAL_CLTV_EXPIRY_DELTA).into()
);
if let Some(invoice_expiry_delta_secs) = invoice_expiry_delta_secs{
invoice = invoice.expiry_time(Duration::from_secs(invoice_expiry_delta_secs.into()));
}
if let Some(amount_msats) = amount_msats {
invoice = invoice.amount_milli_satoshis(amount_msats);
}
let channels = self.list_channels();
let route_hints = super::invoice_utils::sort_and_filter_channels(channels, amount_msats, &self.logger);
for hint in route_hints {
invoice = invoice.private_route(hint);
}
let raw_invoice = invoice.build_raw().map_err(|e| SignOrCreationError::CreationError(e))?;
let signature = self.node_signer.sign_invoice(&raw_invoice, Recipient::Node);
raw_invoice
.sign(|_| signature)
.map(|invoice| Bolt11Invoice::from_signed(invoice).unwrap())
.map_err(|e| SignOrCreationError::SignError(e))
}
}
pub struct Bolt11InvoiceParameters {
pub amount_msats: Option<u64>,
pub description: Bolt11InvoiceDescription,
pub invoice_expiry_delta_secs: Option<u32>,
pub min_final_cltv_expiry_delta: Option<u16>,
pub payment_hash: Option<PaymentHash>,
}
impl Default for Bolt11InvoiceParameters {
fn default() -> Self {
Self {
amount_msats: None,
description: Bolt11InvoiceDescription::Direct(Description::empty()),
invoice_expiry_delta_secs: None,
min_final_cltv_expiry_delta: None,
payment_hash: None,
}
}
}
macro_rules! create_offer_builder { ($self: ident, $builder: ty) => {
pub fn create_offer_builder(&$self) -> Result<$builder, Bolt12SemanticError> {
let builder = $self.flow.create_offer_builder(
&*$self.entropy_source, $self.get_peers_for_blinded_path()
)?;
Ok(builder.into())
}
pub fn create_offer_builder_using_router<ME: Deref>(
&$self,
router: ME,
) -> Result<$builder, Bolt12SemanticError>
where
ME::Target: MessageRouter,
{
let builder = $self.flow.create_offer_builder_using_router(
router, &*$self.entropy_source, $self.get_peers_for_blinded_path()
)?;
Ok(builder.into())
}
} }
macro_rules! create_refund_builder { ($self: ident, $builder: ty) => {
pub fn create_refund_builder(
&$self, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId,
retry_strategy: Retry, route_params_config: RouteParametersConfig
) -> Result<$builder, Bolt12SemanticError> {
let entropy = &*$self.entropy_source;
let builder = $self.flow.create_refund_builder(
entropy, amount_msats, absolute_expiry,
payment_id, $self.get_peers_for_blinded_path()
)?;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop($self);
let expiration = StaleExpiration::AbsoluteTimeout(absolute_expiry);
$self.pending_outbound_payments
.add_new_awaiting_invoice(
payment_id, expiration, retry_strategy, route_params_config, None,
)
.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?;
Ok(builder.into())
}
pub fn create_refund_builder_using_router<ME: Deref>(
&$self, router: ME, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId,
retry_strategy: Retry, route_params_config: RouteParametersConfig
) -> Result<$builder, Bolt12SemanticError>
where
ME::Target: MessageRouter,
{
let entropy = &*$self.entropy_source;
let builder = $self.flow.create_refund_builder_using_router(
router, entropy, amount_msats, absolute_expiry,
payment_id, $self.get_peers_for_blinded_path()
)?;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop($self);
let expiration = StaleExpiration::AbsoluteTimeout(absolute_expiry);
$self.pending_outbound_payments
.add_new_awaiting_invoice(
payment_id, expiration, retry_strategy, route_params_config, None,
)
.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?;
Ok(builder.into())
}
} }
impl<
M: Deref,
T: Deref,
ES: Deref,
NS: Deref,
SP: Deref,
F: Deref,
R: Deref,
MR: Deref,
L: Deref,
> ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
#[cfg(not(c_bindings))]
create_offer_builder!(self, OfferBuilder<'_, DerivedMetadata, secp256k1::All>);
#[cfg(not(c_bindings))]
create_refund_builder!(self, RefundBuilder<'_, secp256k1::All>);
#[cfg(c_bindings)]
create_offer_builder!(self, OfferWithDerivedMetadataBuilder);
#[cfg(c_bindings)]
create_refund_builder!(self, RefundMaybeWithDerivedMetadataBuilder);
pub fn get_async_receive_offer(&self) -> Result<Offer, ()> {
let (offer, needs_persist) = self.flow.get_async_receive_offer()?;
if needs_persist {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
}
Ok(offer)
}
pub fn set_paths_to_static_invoice_server(
&self, paths_to_static_invoice_server: Vec<BlindedMessagePath>,
) -> Result<(), ()> {
let peers = self.get_peers_for_blinded_path();
self.flow.set_paths_to_static_invoice_server(paths_to_static_invoice_server, peers)?;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
Ok(())
}
pub fn pay_for_offer(
&self, offer: &Offer, amount_msats: Option<u64>, payment_id: PaymentId,
optional_params: OptionalOfferPaymentParams,
) -> Result<(), Bolt12SemanticError> {
let create_pending_payment_fn = |retryable_invoice_request: RetryableInvoiceRequest| {
self.pending_outbound_payments
.add_new_awaiting_invoice(
payment_id,
StaleExpiration::TimerTicks(1),
optional_params.retry_strategy,
optional_params.route_params_config,
Some(retryable_invoice_request),
)
.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
};
self.pay_for_offer_intern(
offer,
if offer.expects_quantity() { Some(1) } else { None },
amount_msats,
optional_params.payer_note,
payment_id,
None,
create_pending_payment_fn,
)
}
pub fn pay_for_offer_from_hrn(
&self, offer: &OfferFromHrn, amount_msats: u64, payment_id: PaymentId,
optional_params: OptionalOfferPaymentParams,
) -> Result<(), Bolt12SemanticError> {
let create_pending_payment_fn = |retryable_invoice_request: RetryableInvoiceRequest| {
self.pending_outbound_payments
.add_new_awaiting_invoice(
payment_id,
StaleExpiration::TimerTicks(1),
optional_params.retry_strategy,
optional_params.route_params_config,
Some(retryable_invoice_request),
)
.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
};
self.pay_for_offer_intern(
&offer.offer,
if offer.offer.expects_quantity() { Some(1) } else { None },
Some(amount_msats),
optional_params.payer_note,
payment_id,
Some(offer.hrn),
create_pending_payment_fn,
)
}
pub fn pay_for_offer_with_quantity(
&self, offer: &Offer, amount_msats: Option<u64>, payment_id: PaymentId,
optional_params: OptionalOfferPaymentParams, quantity: u64,
) -> Result<(), Bolt12SemanticError> {
let create_pending_payment_fn = |retryable_invoice_request: RetryableInvoiceRequest| {
self.pending_outbound_payments
.add_new_awaiting_invoice(
payment_id,
StaleExpiration::TimerTicks(1),
optional_params.retry_strategy,
optional_params.route_params_config,
Some(retryable_invoice_request),
)
.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
};
self.pay_for_offer_intern(
offer,
Some(quantity),
amount_msats,
optional_params.payer_note,
payment_id,
None,
create_pending_payment_fn,
)
}
#[rustfmt::skip]
fn pay_for_offer_intern<CPP: FnOnce(RetryableInvoiceRequest) -> Result<(), Bolt12SemanticError>>(
&self, offer: &Offer, quantity: Option<u64>, amount_msats: Option<u64>,
payer_note: Option<String>, payment_id: PaymentId,
human_readable_name: Option<HumanReadableName>, create_pending_payment: CPP,
) -> Result<(), Bolt12SemanticError> {
let entropy = &*self.entropy_source;
let nonce = Nonce::from_entropy_source(entropy);
let builder = self.flow.create_invoice_request_builder(
offer, nonce, payment_id,
)?;
let builder = match quantity {
None => builder,
Some(quantity) => builder.quantity(quantity)?,
};
let builder = match amount_msats {
None => builder,
Some(amount_msats) => builder.amount_msats(amount_msats)?,
};
let builder = match payer_note {
None => builder,
Some(payer_note) => builder.payer_note(payer_note),
};
let builder = match human_readable_name {
None => builder,
Some(hrn) => builder.sourced_from_human_readable_name(hrn),
};
let invoice_request = builder.build_and_sign()?;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.flow.enqueue_invoice_request(
invoice_request.clone(), payment_id, nonce,
self.get_peers_for_blinded_path()
)?;
let retryable_invoice_request = RetryableInvoiceRequest {
invoice_request: invoice_request.clone(),
nonce,
needs_retry: true,
};
create_pending_payment(retryable_invoice_request)
}
#[rustfmt::skip]
pub fn request_refund_payment(
&self, refund: &Refund
) -> Result<Bolt12Invoice, Bolt12SemanticError> {
let secp_ctx = &self.secp_ctx;
let amount_msats = refund.amount_msats();
let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
match self.create_inbound_payment(Some(amount_msats), relative_expiry, None) {
Ok((payment_hash, payment_secret)) => {
let entropy = &*self.entropy_source;
let builder = self.flow.create_invoice_builder_from_refund(
&self.router, entropy, refund, payment_hash,
payment_secret, self.list_usable_channels()
)?;
let invoice = builder.allow_mpp().build_and_sign(secp_ctx)?;
self.flow.enqueue_invoice(invoice.clone(), refund, self.get_peers_for_blinded_path())?;
Ok(invoice)
},
Err(()) => Err(Bolt12SemanticError::InvalidAmount),
}
}
#[cfg(feature = "dnssec")]
#[deprecated(note = "Use bitcoin-payment-instructions and pay_for_offer_from_hrn instead")]
pub fn pay_for_offer_from_human_readable_name(
&self, name: HumanReadableName, amount_msats: u64, payment_id: PaymentId,
optional_params: OptionalOfferPaymentParams, dns_resolvers: Vec<Destination>,
) -> Result<(), ()> {
let (onion_message, context) =
self.flow.hrn_resolver.resolve_name(payment_id, name, &*self.entropy_source)?;
let expiration = StaleExpiration::TimerTicks(1);
self.pending_outbound_payments.add_new_awaiting_offer(
payment_id,
expiration,
optional_params.retry_strategy,
optional_params.route_params_config,
amount_msats,
optional_params.payer_note,
)?;
self.flow
.enqueue_dns_onion_message(
onion_message,
context,
dns_resolvers,
self.get_peers_for_blinded_path(),
)
.map_err(|_| ())
}
pub fn create_inbound_payment(
&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32,
min_final_cltv_expiry_delta: Option<u16>,
) -> Result<(PaymentHash, PaymentSecret), ()> {
inbound_payment::create(
&self.inbound_payment_key,
min_value_msat,
invoice_expiry_delta_secs,
&self.entropy_source,
self.highest_seen_timestamp.load(Ordering::Acquire) as u64,
min_final_cltv_expiry_delta,
)
}
pub fn create_inbound_payment_for_hash(
&self, payment_hash: PaymentHash, min_value_msat: Option<u64>,
invoice_expiry_delta_secs: u32, min_final_cltv_expiry: Option<u16>,
) -> Result<PaymentSecret, ()> {
inbound_payment::create_from_hash(
&self.inbound_payment_key,
min_value_msat,
payment_hash,
invoice_expiry_delta_secs,
self.highest_seen_timestamp.load(Ordering::Acquire) as u64,
min_final_cltv_expiry,
)
}
pub fn get_payment_preimage(
&self, payment_hash: PaymentHash, payment_secret: PaymentSecret,
) -> Result<PaymentPreimage, APIError> {
let expanded_key = &self.inbound_payment_key;
inbound_payment::get_payment_preimage(payment_hash, payment_secret, expanded_key)
}
pub fn blinded_paths_for_async_recipient(
&self, recipient_id: Vec<u8>, relative_expiry: Option<Duration>,
) -> Result<Vec<BlindedMessagePath>, ()> {
let peers = self.get_peers_for_blinded_path();
self.flow.blinded_paths_for_async_recipient(recipient_id, relative_expiry, peers)
}
pub(super) fn duration_since_epoch(&self) -> Duration {
#[cfg(not(feature = "std"))]
let now = Duration::from_secs(self.highest_seen_timestamp.load(Ordering::Acquire) as u64);
#[cfg(feature = "std")]
let now = std::time::SystemTime::now()
.duration_since(std::time::SystemTime::UNIX_EPOCH)
.expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
now
}
fn get_peers_for_blinded_path(&self) -> Vec<MessageForwardNode> {
let per_peer_state = self.per_peer_state.read().unwrap();
per_peer_state
.iter()
.map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
.filter(|(_, peer)| peer.is_connected)
.filter(|(_, peer)| peer.latest_features.supports_onion_messages())
.map(|(node_id, peer)| MessageForwardNode {
node_id: *node_id,
short_channel_id: peer
.channel_by_id
.iter()
.filter(|(_, channel)| channel.context().is_usable())
.filter_map(|(_, channel)| channel.as_funded())
.min_by_key(|funded_channel| funded_channel.context.channel_creation_height)
.and_then(|funded_channel| funded_channel.get_inbound_scid()),
})
.collect::<Vec<_>>()
}
#[cfg(test)]
pub(super) fn test_get_peers_for_blinded_path(&self) -> Vec<MessageForwardNode> {
self.get_peers_for_blinded_path()
}
#[cfg(test)]
pub(super) fn test_create_blinded_payment_paths(
&self, amount_msats: Option<u64>, payment_secret: PaymentSecret,
payment_context: PaymentContext, relative_expiry_seconds: u32,
) -> Result<Vec<BlindedPaymentPath>, ()> {
let entropy = &*self.entropy_source;
self.flow.test_create_blinded_payment_paths(
&self.router,
entropy,
self.list_usable_channels(),
amount_msats,
payment_secret,
payment_context,
relative_expiry_seconds,
)
}
pub fn get_phantom_scid(&self) -> u64 {
let best_block_height = self.best_block.read().unwrap().height;
let short_to_chan_info = self.short_to_chan_info.read().unwrap();
loop {
let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(
best_block_height,
&self.chain_hash,
&self.fake_scid_rand_bytes,
&self.entropy_source,
);
match short_to_chan_info.get(&scid_candidate) {
Some(_) => continue,
None => return scid_candidate,
}
}
}
pub fn get_phantom_route_hints(&self) -> PhantomRouteHints {
PhantomRouteHints {
channels: self.list_usable_channels(),
phantom_scid: self.get_phantom_scid(),
real_node_pubkey: self.get_our_node_id(),
}
}
pub fn get_intercept_scid(&self) -> u64 {
let best_block_height = self.best_block.read().unwrap().height;
let short_to_chan_info = self.short_to_chan_info.read().unwrap();
loop {
let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(
best_block_height,
&self.chain_hash,
&self.fake_scid_rand_bytes,
&self.entropy_source,
);
if short_to_chan_info.contains_key(&scid_candidate) {
continue;
}
return scid_candidate;
}
}
pub fn compute_inflight_htlcs(&self) -> InFlightHtlcs {
let mut inflight_htlcs = InFlightHtlcs::new();
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for chan in peer_state.channel_by_id.values().filter_map(Channel::as_funded) {
for (htlc_source, _) in chan.inflight_htlc_sources() {
if let HTLCSource::OutboundRoute { path, .. } = htlc_source {
inflight_htlcs.process_path(path, self.get_our_node_id());
}
}
}
}
inflight_htlcs
}
#[cfg(any(test, feature = "_test_utils"))]
pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
let events = core::cell::RefCell::new(Vec::new());
let event_handler = |event: events::Event| Ok(events.borrow_mut().push(event));
self.process_pending_events(&event_handler);
let collected_events = events.into_inner();
for event in &collected_events {
let ser = event.encode();
if let Some(deser) =
events::Event::read(&mut &ser[..]).expect("event should deserialize")
{
assert_eq!(&deser, event, "event should roundtrip correctly");
}
}
collected_events
}
#[cfg(feature = "_test_utils")]
pub fn push_pending_event(&self, event: events::Event) {
let mut events = self.pending_events.lock().unwrap();
events.push_back((event, None));
}
#[cfg(test)]
pub fn pop_pending_event(&self) -> Option<events::Event> {
let mut events = self.pending_events.lock().unwrap();
events.pop_front().map(|(e, _)| e)
}
#[cfg(test)]
pub fn has_pending_payments(&self) -> bool {
self.pending_outbound_payments.has_pending_payments()
}
#[cfg(test)]
pub fn clear_pending_payments(&self) {
self.pending_outbound_payments.clear_pending_payments()
}
#[cfg(any(test, feature = "_test_utils"))]
pub(crate) fn get_and_clear_pending_raa_blockers(
&self,
) -> Vec<(ChannelId, Vec<RAAMonitorUpdateBlockingAction>)> {
let per_peer_state = self.per_peer_state.read().unwrap();
let mut pending_blockers = Vec::new();
for (_peer_pubkey, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state = peer_state_mutex.lock().unwrap();
for (chan_id, actions) in peer_state.actions_blocking_raa_monitor_updates.iter() {
if !actions.is_empty() {
pending_blockers.push((chan_id.clone(), actions.clone()));
}
}
peer_state.actions_blocking_raa_monitor_updates.clear();
}
pending_blockers
}
#[rustfmt::skip]
fn handle_monitor_update_release(
&self, counterparty_node_id: PublicKey, channel_id: ChannelId,
mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>,
) {
let logger = WithContext::from(
&self.logger, Some(counterparty_node_id), Some(channel_id), None
);
loop {
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state_lck = peer_state_mtx.lock().unwrap();
let peer_state = &mut *peer_state_lck;
if let Some(blocker) = completed_blocker.take() {
if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
.get_mut(&channel_id)
{
blockers.retain(|iter| iter != &blocker);
}
}
if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
channel_id, counterparty_node_id) {
log_trace!(logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
&channel_id);
break;
}
if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(
channel_id) {
if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
let channel_funding_outpoint = chan.funding_outpoint();
if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor",
channel_id);
handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
peer_state_lck, peer_state, per_peer_state, chan);
if further_update_exists {
continue;
}
} else {
log_trace!(logger, "Unlocked monitor updating for channel {} without monitors to update",
channel_id);
}
}
}
} else {
log_debug!(logger,
"Got a release post-RAA monitor update for peer {} but the channel is gone",
log_pubkey!(counterparty_node_id));
}
break;
}
}
fn handle_post_event_actions<I: IntoIterator<Item = EventCompletionAction>>(&self, actions: I) {
for action in actions.into_iter() {
match action {
EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint: _,
channel_id,
counterparty_node_id,
} => {
let startup_complete =
self.background_events_processed_since_startup.load(Ordering::Acquire);
debug_assert!(startup_complete);
self.handle_monitor_update_release(counterparty_node_id, channel_id, None);
},
EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(
PaymentCompleteUpdate {
counterparty_node_id,
channel_funding_outpoint,
channel_id,
htlc_id,
},
) => {
let per_peer_state = self.per_peer_state.read().unwrap();
let mut peer_state = per_peer_state
.get(&counterparty_node_id)
.map(|state| state.lock().unwrap())
.expect("Channels originating a payment resolution must have peer state");
let update_id = peer_state
.closed_channel_monitor_update_ids
.get_mut(&channel_id)
.expect("Channels originating a payment resolution must have a monitor");
*update_id = update_id.saturating_add(1);
let update = ChannelMonitorUpdate {
update_id: *update_id,
channel_id: Some(channel_id),
updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete {
htlc: htlc_id,
}],
};
let during_startup =
!self.background_events_processed_since_startup.load(Ordering::Acquire);
if during_startup {
let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id,
funding_txo: channel_funding_outpoint,
channel_id,
update,
};
self.pending_background_events.lock().unwrap().push(event);
} else {
handle_new_monitor_update!(
self,
channel_funding_outpoint,
update,
peer_state,
peer_state,
per_peer_state,
counterparty_node_id,
channel_id,
POST_CHANNEL_CLOSE
);
}
},
}
}
}
pub async fn process_pending_events_async<
Future: core::future::Future<Output = Result<(), ReplayEvent>>,
H: Fn(Event) -> Future,
>(
&self, handler: H,
) {
let mut ev;
process_events_body!(self, ev, { handler(ev).await });
}
}
impl<
M: Deref,
T: Deref,
ES: Deref,
NS: Deref,
SP: Deref,
F: Deref,
R: Deref,
MR: Deref,
L: Deref,
> BaseMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
fn provided_node_features(&self) -> NodeFeatures {
provided_node_features(&self.config.read().unwrap())
}
fn provided_init_features(&self, _their_init_features: PublicKey) -> InitFeatures {
provided_init_features(&self.config.read().unwrap())
}
#[rustfmt::skip]
fn peer_disconnected(&self, counterparty_node_id: PublicKey) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let mut splice_failed_events = Vec::new();
let mut failed_channels: Vec<(Result<Infallible, _>, _)> = Vec::new();
let mut per_peer_state = self.per_peer_state.write().unwrap();
let remove_peer = {
log_debug!(
WithContext::from(&self.logger, Some(counterparty_node_id), None, None),
"Marking channels with {} disconnected and generating channel_updates.",
log_pubkey!(counterparty_node_id)
);
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
peer_state.channel_by_id.retain(|_, chan| {
let logger = WithChannelContext::from(&self.logger, &chan.context(), None);
let DisconnectResult { is_resumable, splice_funding_failed } =
chan.peer_disconnected_is_resumable(&&logger);
if let Some(splice_funding_failed) = splice_funding_failed {
splice_failed_events.push(events::Event::SpliceFailed {
channel_id: chan.context().channel_id(),
counterparty_node_id,
user_channel_id: chan.context().get_user_id(),
abandoned_funding_txo: splice_funding_failed.funding_txo,
channel_type: splice_funding_failed.channel_type,
contributed_inputs: splice_funding_failed.contributed_inputs,
contributed_outputs: splice_funding_failed.contributed_outputs,
});
}
if is_resumable {
return true;
}
let reason = ClosureReason::DisconnectedPeer;
let err = ChannelError::Close((reason.to_string(), reason));
let (_, e) = convert_channel_err!(self, peer_state, err, chan);
failed_channels.push((Err(e), counterparty_node_id));
false
});
peer_state.inbound_channel_request_by_id.clear();
pending_msg_events.retain(|msg| {
match msg {
&MessageSendEvent::SendAcceptChannel { .. } => false,
&MessageSendEvent::SendOpenChannel { .. } => false,
&MessageSendEvent::SendFundingCreated { .. } => false,
&MessageSendEvent::SendFundingSigned { .. } => false,
&MessageSendEvent::SendAcceptChannelV2 { .. } => false,
&MessageSendEvent::SendOpenChannelV2 { .. } => false,
&MessageSendEvent::SendChannelReady { .. } => false,
&MessageSendEvent::SendAnnouncementSignatures { .. } => false,
&MessageSendEvent::SendStfu { .. } => false,
&MessageSendEvent::SendSpliceInit { .. } => false,
&MessageSendEvent::SendSpliceAck { .. } => false,
&MessageSendEvent::SendSpliceLocked { .. } => false,
&MessageSendEvent::SendTxAddInput { .. } => false,
&MessageSendEvent::SendTxAddOutput { .. } => false,
&MessageSendEvent::SendTxRemoveInput { .. } => false,
&MessageSendEvent::SendTxRemoveOutput { .. } => false,
&MessageSendEvent::SendTxComplete { .. } => false,
&MessageSendEvent::SendTxSignatures { .. } => false,
&MessageSendEvent::SendTxInitRbf { .. } => false,
&MessageSendEvent::SendTxAckRbf { .. } => false,
&MessageSendEvent::SendTxAbort { .. } => false,
&MessageSendEvent::UpdateHTLCs { .. } => false,
&MessageSendEvent::SendRevokeAndACK { .. } => false,
&MessageSendEvent::SendClosingSigned { .. } => false,
&MessageSendEvent::SendClosingComplete { .. } => false,
&MessageSendEvent::SendClosingSig { .. } => false,
&MessageSendEvent::SendShutdown { .. } => false,
&MessageSendEvent::SendChannelReestablish { .. } => false,
&MessageSendEvent::HandleError { .. } => false,
&MessageSendEvent::SendChannelAnnouncement { .. } => false,
&MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
&MessageSendEvent::BroadcastChannelUpdate { .. } => {
debug_assert!(false, "This event shouldn't have been here");
false
},
&MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
&MessageSendEvent::SendChannelUpdate { .. } => false,
&MessageSendEvent::SendChannelRangeQuery { .. } => false,
&MessageSendEvent::SendShortIdsQuery { .. } => false,
&MessageSendEvent::SendReplyChannelRange { .. } => false,
&MessageSendEvent::SendGossipTimestampFilter { .. } => false,
&MessageSendEvent::SendPeerStorage { .. } => false,
&MessageSendEvent::SendPeerStorageRetrieval { .. } => false,
}
});
debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect");
peer_state.is_connected = false;
peer_state.ok_to_remove(true)
} else { debug_assert!(false, "Unconnected peer disconnected"); true }
};
if remove_peer {
per_peer_state.remove(&counterparty_node_id);
}
mem::drop(per_peer_state);
let persist = if splice_failed_events.is_empty() {
NotifyOption::SkipPersistHandleEvents
} else {
let mut pending_events = self.pending_events.lock().unwrap();
for event in splice_failed_events {
pending_events.push_back((event, None));
}
NotifyOption::DoPersist
};
for (err, counterparty_node_id) in failed_channels.drain(..) {
let _ = handle_error!(self, err, counterparty_node_id);
}
persist
});
}
#[rustfmt::skip]
fn peer_connected(&self, counterparty_node_id: PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> {
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None);
if !init_msg.features.supports_static_remote_key() {
log_debug!(logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
return Err(());
}
let mut res = Ok(());
PersistenceNotifierGuard::optionally_notify(self, || {
let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected);
let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
{
let mut peer_state_lock = self.per_peer_state.write().unwrap();
match peer_state_lock.entry(counterparty_node_id) {
hash_map::Entry::Vacant(e) => {
if inbound_peer_limited {
res = Err(());
return NotifyOption::SkipPersistNoEvents;
}
e.insert(Mutex::new(PeerState {
channel_by_id: new_hash_map(),
inbound_channel_request_by_id: new_hash_map(),
latest_features: init_msg.features.clone(),
pending_msg_events: Vec::new(),
in_flight_monitor_updates: BTreeMap::new(),
monitor_update_blocked_actions: BTreeMap::new(),
actions_blocking_raa_monitor_updates: BTreeMap::new(),
closed_channel_monitor_update_ids: BTreeMap::new(),
is_connected: true,
peer_storage: Vec::new(),
}));
},
hash_map::Entry::Occupied(e) => {
let mut peer_state = e.get().lock().unwrap();
peer_state.latest_features = init_msg.features.clone();
let best_block_height = self.best_block.read().unwrap().height;
if inbound_peer_limited &&
Self::unfunded_channel_count(&*peer_state, best_block_height) ==
peer_state.channel_by_id.len()
{
res = Err(());
return NotifyOption::SkipPersistNoEvents;
}
debug_assert!(peer_state.pending_msg_events.is_empty());
peer_state.pending_msg_events.clear();
debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
peer_state.is_connected = true;
},
}
}
log_debug!(logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
if !peer_state.peer_storage.is_empty() {
pending_msg_events.push(MessageSendEvent::SendPeerStorageRetrieval {
node_id: counterparty_node_id.clone(),
msg: msgs::PeerStorageRetrieval {
data: peer_state.peer_storage.clone()
},
});
}
for (_, chan) in peer_state.channel_by_id.iter_mut() {
let logger = WithChannelContext::from(&self.logger, &chan.context(), None);
match chan.peer_connected_get_handshake(self.chain_hash, &&logger) {
ReconnectionMsg::Reestablish(msg) =>
pending_msg_events.push(MessageSendEvent::SendChannelReestablish {
node_id: chan.context().get_counterparty_node_id(),
msg,
}),
ReconnectionMsg::Open(OpenChannelMessage::V1(msg)) =>
pending_msg_events.push(MessageSendEvent::SendOpenChannel {
node_id: chan.context().get_counterparty_node_id(),
msg,
}),
ReconnectionMsg::Open(OpenChannelMessage::V2(msg)) =>
pending_msg_events.push(MessageSendEvent::SendOpenChannelV2 {
node_id: chan.context().get_counterparty_node_id(),
msg,
}),
ReconnectionMsg::None => {},
}
}
}
return NotifyOption::SkipPersistHandleEvents;
});
self.check_refresh_async_receive_offer_cache(false);
res
}
fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
let events = RefCell::new(Vec::new());
PersistenceNotifierGuard::optionally_notify(self, || {
let mut result = NotifyOption::SkipPersistNoEvents;
if self.process_pending_monitor_events() {
result = NotifyOption::DoPersist;
}
if self.check_free_holding_cells() {
result = NotifyOption::DoPersist;
}
if self.maybe_generate_initial_closing_signed() {
result = NotifyOption::DoPersist;
}
self.maybe_send_stfu();
let mut is_any_peer_connected = false;
let mut pending_events = Vec::new();
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if peer_state.pending_msg_events.len() > 0 {
pending_events.append(&mut peer_state.pending_msg_events);
}
if peer_state.is_connected {
is_any_peer_connected = true
}
}
if is_any_peer_connected {
let mut broadcast_msgs = self.pending_broadcast_messages.lock().unwrap();
pending_events.append(&mut broadcast_msgs);
}
if !pending_events.is_empty() {
events.replace(pending_events);
}
result
});
events.into_inner()
}
}
impl<
M: Deref,
T: Deref,
ES: Deref,
NS: Deref,
SP: Deref,
F: Deref,
R: Deref,
MR: Deref,
L: Deref,
> EventsProvider for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
fn process_pending_events<H: Deref>(&self, handler: H)
where
H::Target: EventHandler,
{
let mut ev;
process_events_body!(self, ev, handler.handle_event(ev));
}
}
impl<
M: Deref,
T: Deref,
ES: Deref,
NS: Deref,
SP: Deref,
F: Deref,
R: Deref,
MR: Deref,
L: Deref,
> chain::Listen for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
{
let best_block = self.best_block.read().unwrap();
assert_eq!(best_block.block_hash, header.prev_blockhash,
"Blocks must be connected in chain-order - the connected header must build on the last connected header");
assert_eq!(best_block.height, height - 1,
"Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
}
self.transactions_confirmed(header, txdata, height);
self.best_block_updated(header, height);
}
fn blocks_disconnected(&self, fork_point: BestBlock) {
let _persistence_guard =
PersistenceNotifierGuard::optionally_notify_skipping_background_events(
self,
|| -> NotifyOption { NotifyOption::DoPersist },
);
{
let mut best_block = self.best_block.write().unwrap();
assert!(best_block.height > fork_point.height,
"Blocks disconnected must indicate disconnection from the current best height, i.e. the new chain tip must be lower than the previous best height");
*best_block = fork_point;
}
self.do_chain_event(Some(fork_point.height), |channel| {
channel.best_block_updated(
fork_point.height,
None,
self.chain_hash,
&self.node_signer,
&self.config.read().unwrap(),
&&WithChannelContext::from(&self.logger, &channel.context, None),
)
});
}
}
impl<
M: Deref,
T: Deref,
ES: Deref,
NS: Deref,
SP: Deref,
F: Deref,
R: Deref,
MR: Deref,
L: Deref,
> chain::Confirm for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
#[rustfmt::skip]
fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) {
let block_hash = header.block_hash();
log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);
let _persistence_guard =
PersistenceNotifierGuard::optionally_notify_skipping_background_events(
self, || -> NotifyOption { NotifyOption::DoPersist });
self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.config.read().unwrap(), &&WithChannelContext::from(&self.logger, &channel.context, None))
.map(|(a, b)| (a, Vec::new(), b)));
let last_best_block_height = self.best_block.read().unwrap().height;
if height < last_best_block_height {
let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
let do_update = |channel: &mut FundedChannel<SP>| {
channel.best_block_updated(
last_best_block_height,
Some(timestamp as u32),
self.chain_hash,
&self.node_signer,
&self.config.read().unwrap(),
&&WithChannelContext::from(&self.logger, &channel.context, None),
)
};
self.do_chain_event(Some(last_best_block_height), do_update);
}
}
#[rustfmt::skip]
fn best_block_updated(&self, header: &Header, height: u32) {
let block_hash = header.block_hash();
log_trace!(self.logger, "New best block: {} at height {}", block_hash, height);
let _persistence_guard =
PersistenceNotifierGuard::optionally_notify_skipping_background_events(
self, || -> NotifyOption { NotifyOption::DoPersist });
*self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
let mut min_anchor_feerate = None;
let mut min_non_anchor_feerate = None;
if self.background_events_processed_since_startup.load(Ordering::Relaxed) {
let mut last_days_feerates = self.last_days_feerates.lock().unwrap();
if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS {
last_days_feerates.pop_front();
}
let anchor_feerate = self.fee_estimator
.bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedAnchorChannelRemoteFee);
let non_anchor_feerate = self.fee_estimator
.bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee);
last_days_feerates.push_back((anchor_feerate, non_anchor_feerate));
if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS {
min_anchor_feerate = last_days_feerates.iter().map(|(f, _)| f).min().copied();
min_non_anchor_feerate = last_days_feerates.iter().map(|(_, f)| f).min().copied();
}
}
self.do_chain_event(Some(height), |channel| {
let logger = WithChannelContext::from(&self.logger, &channel.context, None);
if channel.funding.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
if let Some(feerate) = min_anchor_feerate {
channel.check_for_stale_feerate(&logger, feerate)?;
}
} else {
if let Some(feerate) = min_non_anchor_feerate {
channel.check_for_stale_feerate(&logger, feerate)?;
}
}
{
let legacy_scids = channel.remove_legacy_scids_before_block(height);
if !legacy_scids.as_slice().is_empty() {
let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
for scid in legacy_scids {
short_to_chan_info.remove(&scid);
}
}
}
channel.best_block_updated(
height,
Some(header.time),
self.chain_hash,
&self.node_signer,
&self.config.read().unwrap(),
&&WithChannelContext::from(&self.logger, &channel.context, None),
)
});
macro_rules! max_time {
($timestamp: expr) => {
loop {
let old_serial = $timestamp.load(Ordering::Acquire);
if old_serial >= header.time as usize { break; }
if $timestamp.compare_exchange(old_serial, header.time as usize, Ordering::AcqRel, Ordering::Relaxed).is_ok() {
break;
}
}
}
}
max_time!(self.highest_seen_timestamp);
self.flow.best_block_updated(header, height);
}
fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option<BlockHash>)> {
let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
for (_cp_id, peer_state_mutex) in self.per_peer_state.read().unwrap().iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for chan in peer_state.channel_by_id.values().filter_map(Channel::as_funded) {
for (funding_txid, conf_height, block_hash) in chan.get_relevant_txids() {
res.push((funding_txid, conf_height, block_hash));
}
}
}
res
}
fn transaction_unconfirmed(&self, txid: &Txid) {
let _persistence_guard =
PersistenceNotifierGuard::optionally_notify_skipping_background_events(
self,
|| -> NotifyOption { NotifyOption::DoPersist },
);
self.do_chain_event(None, |channel| {
let logger = WithChannelContext::from(&self.logger, &channel.context, None);
channel.transaction_unconfirmed(txid, &&logger).map(|()| (None, Vec::new(), None))
});
}
}
pub(super) enum FundingConfirmedMessage {
Establishment(msgs::ChannelReady),
Splice(msgs::SpliceLocked, Option<OutPoint>, Option<ChannelMonitorUpdate>, Vec<FundingInfo>),
}
impl<
M: Deref,
T: Deref,
ES: Deref,
NS: Deref,
SP: Deref,
F: Deref,
R: Deref,
MR: Deref,
L: Deref,
> ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
#[rustfmt::skip]
fn do_chain_event<FN: Fn(&mut FundedChannel<SP>) -> Result<(Option<FundingConfirmedMessage>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>>
(&self, height_opt: Option<u32>, f: FN) {
let mut failed_channels: Vec<(Result<Infallible, _>, _)> = Vec::new();
let mut timed_out_htlcs = Vec::new();
let mut to_process_monitor_update_actions = Vec::new();
{
let per_peer_state = self.per_peer_state.read().unwrap();
for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
peer_state.channel_by_id.retain(|channel_id, chan| {
match chan.as_funded_mut() {
None => true,
Some(funded_channel) => {
let res = f(funded_channel);
if let Ok((funding_confirmed_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
let reason = LocalHTLCFailureReason::CLTVExpiryTooSoon;
let data = self.get_htlc_inbound_temp_fail_data(reason);
timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(reason, data),
HTLCHandlingFailureType::Forward { node_id: Some(funded_channel.context.get_counterparty_node_id()), channel_id: *channel_id }));
}
let logger = WithChannelContext::from(&self.logger, &funded_channel.context, None);
match funding_confirmed_opt {
Some(FundingConfirmedMessage::Establishment(channel_ready)) => {
send_channel_ready!(self, pending_msg_events, funded_channel, channel_ready);
if funded_channel.context.is_usable() && peer_state.is_connected {
log_trace!(logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", channel_id);
if let Ok(msg) = self.get_channel_update_for_unicast(funded_channel) {
pending_msg_events.push(MessageSendEvent::SendChannelUpdate {
node_id: funded_channel.context.get_counterparty_node_id(),
msg,
});
}
} else {
log_trace!(logger, "Sending channel_ready WITHOUT channel_update for {}", channel_id);
}
},
Some(FundingConfirmedMessage::Splice(splice_locked, funding_txo, monitor_update_opt, discarded_funding)) => {
let counterparty_node_id = funded_channel.context.get_counterparty_node_id();
let channel_id = funded_channel.context.channel_id();
if let Some(funding_txo) = funding_txo {
let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
insert_short_channel_id!(short_to_chan_info, funded_channel);
if let Some(monitor_update) = monitor_update_opt {
handle_new_monitor_update!(
self,
funding_txo,
monitor_update,
peer_state,
funded_channel.context,
REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER
);
to_process_monitor_update_actions.push((
counterparty_node_id, channel_id
));
}
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push_back((events::Event::ChannelReady {
channel_id,
user_channel_id: funded_channel.context.get_user_id(),
counterparty_node_id,
funding_txo: Some(funding_txo.into_bitcoin_outpoint()),
channel_type: funded_channel.funding.get_channel_type().clone(),
}, None));
discarded_funding.into_iter().for_each(|funding_info| {
let event = Event::DiscardFunding {
channel_id: funded_channel.context.channel_id(),
funding_info,
};
pending_events.push_back((event, None));
});
}
if funded_channel.context.is_connected() {
pending_msg_events.push(MessageSendEvent::SendSpliceLocked {
node_id: counterparty_node_id,
msg: splice_locked,
});
}
},
None => {},
}
{
let mut pending_events = self.pending_events.lock().unwrap();
emit_initial_channel_ready_event!(pending_events, funded_channel);
}
if let Some(height) = height_opt {
let funding_conf_height =
funded_channel.funding.get_funding_tx_confirmation_height().unwrap_or(height);
let rebroadcast_announcement = funding_conf_height < height + 1008
&& funding_conf_height % 6 == height % 6;
#[allow(unused_mut, unused_assignments)]
let mut should_announce = announcement_sigs.is_some() || rebroadcast_announcement;
#[cfg(any(test, feature = "_test_utils"))]
{
should_announce = announcement_sigs.is_some();
}
if should_announce {
if let Some(announcement) = funded_channel.get_signed_channel_announcement(
&self.node_signer, self.chain_hash, height, &self.config.read().unwrap(),
) {
pending_msg_events.push(MessageSendEvent::BroadcastChannelAnnouncement {
msg: announcement,
update_msg: Some(self.get_channel_update_for_broadcast(funded_channel).unwrap()),
});
}
}
}
if let Some(announcement_sigs) = announcement_sigs {
if peer_state.is_connected {
log_trace!(logger, "Sending announcement_signatures for channel {}", funded_channel.context.channel_id());
pending_msg_events.push(MessageSendEvent::SendAnnouncementSignatures {
node_id: funded_channel.context.get_counterparty_node_id(),
msg: announcement_sigs,
});
}
}
if funded_channel.is_our_channel_ready() {
if let Some(real_scid) = funded_channel.funding.get_short_channel_id() {
let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
let scid_insert = short_to_chan_info.insert(real_scid, (funded_channel.context.get_counterparty_node_id(), *channel_id));
assert!(scid_insert.is_none() || scid_insert.unwrap() == (funded_channel.context.get_counterparty_node_id(), *channel_id),
"SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
}
}
} else if let Err(reason) = res {
let err = ChannelError::Close((reason.to_string(), reason));
let (_, e) = convert_channel_err!(
self,
peer_state,
err,
funded_channel,
FUNDED_CHANNEL
);
failed_channels.push((Err(e), *counterparty_node_id));
return false;
}
true
}
}
});
}
}
for (counterparty_node_id, channel_id) in to_process_monitor_update_actions {
self.channel_monitor_updated(&channel_id, None, &counterparty_node_id);
}
if let Some(height) = height_opt {
self.claimable_payments.lock().unwrap().claimable_payments.retain(|payment_hash, payment| {
payment.htlcs.retain(|htlc| {
if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
let reason = LocalHTLCFailureReason::PaymentClaimBuffer;
timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(),
HTLCFailReason::reason(reason, invalid_payment_err_data(htlc.value, height)),
HTLCHandlingFailureType::Receive { payment_hash: payment_hash.clone() }));
false
} else { true }
});
!payment.htlcs.is_empty() });
let mut intercepted_htlcs = self.pending_intercepted_htlcs.lock().unwrap();
intercepted_htlcs.retain(|_, htlc| {
if height >= htlc.forward_info.outgoing_cltv_value - HTLC_FAIL_BACK_BUFFER {
let prev_hop_data = HTLCSource::PreviousHopData(htlc.htlc_previous_hop_data());
let requested_forward_scid = match htlc.forward_info.routing {
PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
_ => unreachable!(),
};
timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash,
HTLCFailReason::from_failure_code(LocalHTLCFailureReason::ForwardExpiryBuffer),
HTLCHandlingFailureType::InvalidForward { requested_forward_scid }));
let logger = WithContext::from(
&self.logger, None, Some(htlc.prev_channel_id), Some(htlc.forward_info.payment_hash)
);
log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
false
} else { true }
});
}
for (failure, counterparty_node_id) in failed_channels {
let _ = handle_error!(self, failure, counterparty_node_id);
}
for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) {
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, destination, None);
}
}
pub fn get_event_or_persistence_needed_future(&self) -> Future {
self.event_persist_notifier.get_future()
}
pub fn get_and_clear_needs_persistence(&self) -> bool {
self.needs_persist_flag.swap(false, Ordering::AcqRel)
}
#[cfg(any(test, feature = "_test_utils"))]
pub fn get_event_or_persist_condvar_value(&self) -> bool {
self.event_persist_notifier.notify_pending()
}
pub fn current_best_block(&self) -> BestBlock {
self.best_block.read().unwrap().clone()
}
pub fn node_features(&self) -> NodeFeatures {
provided_node_features(&self.config.read().unwrap())
}
#[cfg(any(feature = "_test_utils", test))]
pub fn bolt11_invoice_features(&self) -> Bolt11InvoiceFeatures {
provided_bolt11_invoice_features(&self.config.read().unwrap())
}
fn bolt12_invoice_features(&self) -> Bolt12InvoiceFeatures {
provided_bolt12_invoice_features(&self.config.read().unwrap())
}
pub fn channel_features(&self) -> ChannelFeatures {
provided_channel_features(&self.config.read().unwrap())
}
pub fn channel_type_features(&self) -> ChannelTypeFeatures {
provided_channel_type_features(&self.config.read().unwrap())
}
pub fn init_features(&self) -> InitFeatures {
provided_init_features(&self.config.read().unwrap())
}
}
impl<
M: Deref,
T: Deref,
ES: Deref,
NS: Deref,
SP: Deref,
F: Deref,
R: Deref,
MR: Deref,
L: Deref,
> ChannelMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
fn handle_open_channel(&self, counterparty_node_id: PublicKey, message: &msgs::OpenChannel) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let msg = OpenChannelMessageRef::V1(message);
let res = self.internal_open_channel(&counterparty_node_id, msg);
let persist = match &res {
Err(e) if e.closes_channel() => {
debug_assert!(false, "We shouldn't close a new channel");
NotifyOption::DoPersist
},
_ => NotifyOption::SkipPersistHandleEvents,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
#[rustfmt::skip]
fn handle_open_channel_v2(&self, counterparty_node_id: PublicKey, msg: &msgs::OpenChannelV2) {
if !self.init_features().supports_dual_fund() {
let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
"Dual-funded channels not supported".to_owned(),
msg.common_fields.temporary_channel_id.clone())), counterparty_node_id);
return;
}
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_open_channel(&counterparty_node_id, OpenChannelMessageRef::V2(msg));
let persist = match &res {
Err(e) if e.closes_channel() => {
debug_assert!(false, "We shouldn't close a new channel");
NotifyOption::DoPersist
},
_ => NotifyOption::SkipPersistHandleEvents,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
fn handle_accept_channel(&self, counterparty_node_id: PublicKey, msg: &msgs::AcceptChannel) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_accept_channel(&counterparty_node_id, msg);
let _ = handle_error!(self, res, counterparty_node_id);
NotifyOption::SkipPersistHandleEvents
});
}
fn handle_accept_channel_v2(
&self, counterparty_node_id: PublicKey, msg: &msgs::AcceptChannelV2,
) {
let err = Err(MsgHandleErrInternal::send_err_msg_no_close(
"Dual-funded channels not supported".to_owned(),
msg.common_fields.temporary_channel_id.clone(),
));
let _: Result<(), _> = handle_error!(self, err, counterparty_node_id);
}
fn handle_funding_created(&self, counterparty_node_id: PublicKey, msg: &msgs::FundingCreated) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let res = self.internal_funding_created(&counterparty_node_id, msg);
let _ = handle_error!(self, res, counterparty_node_id);
}
fn handle_funding_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::FundingSigned) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let res = self.internal_funding_signed(&counterparty_node_id, msg);
let _ = handle_error!(self, res, counterparty_node_id);
}
fn handle_peer_storage(&self, counterparty_node_id: PublicKey, msg: msgs::PeerStorage) {
let _persistence_guard =
PersistenceNotifierGuard::optionally_notify(self, || NotifyOption::SkipPersistNoEvents);
let res = self.internal_peer_storage(counterparty_node_id, msg);
let _ = handle_error!(self, res, counterparty_node_id);
}
fn handle_peer_storage_retrieval(
&self, counterparty_node_id: PublicKey, msg: msgs::PeerStorageRetrieval,
) {
let _persistence_guard =
PersistenceNotifierGuard::optionally_notify(self, || NotifyOption::SkipPersistNoEvents);
let res = self.internal_peer_storage_retrieval(counterparty_node_id, msg);
let _ = handle_error!(self, res, counterparty_node_id);
}
fn handle_channel_ready(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelReady) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_channel_ready(&counterparty_node_id, msg);
let persist = match &res {
Err(e) if e.closes_channel() => NotifyOption::DoPersist,
_ => NotifyOption::SkipPersistHandleEvents,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
fn handle_stfu(&self, counterparty_node_id: PublicKey, msg: &msgs::Stfu) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_stfu(&counterparty_node_id, msg);
let persist = match &res {
Err(e) if e.closes_channel() => NotifyOption::DoPersist,
Err(_) => NotifyOption::SkipPersistHandleEvents,
Ok(responded) => {
if *responded {
NotifyOption::SkipPersistHandleEvents
} else {
NotifyOption::SkipPersistNoEvents
}
},
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
fn handle_splice_init(&self, counterparty_node_id: PublicKey, msg: &msgs::SpliceInit) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_splice_init(&counterparty_node_id, msg);
let persist = match &res {
Err(e) if e.closes_channel() => NotifyOption::DoPersist,
Err(_) => NotifyOption::SkipPersistHandleEvents,
Ok(()) => NotifyOption::SkipPersistHandleEvents,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
fn handle_splice_ack(&self, counterparty_node_id: PublicKey, msg: &msgs::SpliceAck) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_splice_ack(&counterparty_node_id, msg);
let persist = match &res {
Err(e) if e.closes_channel() => NotifyOption::DoPersist,
Err(_) => NotifyOption::SkipPersistHandleEvents,
Ok(()) => NotifyOption::SkipPersistHandleEvents,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
#[rustfmt::skip]
fn handle_splice_locked(&self, counterparty_node_id: PublicKey, msg: &msgs::SpliceLocked) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_splice_locked(&counterparty_node_id, msg);
let persist = match &res {
Err(e) if e.closes_channel() => NotifyOption::DoPersist,
Err(_) => NotifyOption::SkipPersistHandleEvents,
Ok(()) => NotifyOption::DoPersist,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
fn handle_shutdown(&self, counterparty_node_id: PublicKey, msg: &msgs::Shutdown) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let res = self.internal_shutdown(&counterparty_node_id, msg);
let _ = handle_error!(self, res, counterparty_node_id);
}
fn handle_closing_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::ClosingSigned) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let res = self.internal_closing_signed(&counterparty_node_id, msg);
let _ = handle_error!(self, res, counterparty_node_id);
}
#[cfg(simple_close)]
fn handle_closing_complete(&self, counterparty_node_id: PublicKey, msg: msgs::ClosingComplete) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let res = self.internal_closing_complete(counterparty_node_id, msg);
let _ = handle_error!(self, res, counterparty_node_id);
}
#[cfg(simple_close)]
fn handle_closing_sig(&self, counterparty_node_id: PublicKey, msg: msgs::ClosingSig) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let res = self.internal_closing_sig(counterparty_node_id, msg);
let _ = handle_error!(self, res, counterparty_node_id);
}
fn handle_update_add_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateAddHTLC) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_update_add_htlc(&counterparty_node_id, msg);
let persist = match &res {
Err(e) if e.closes_channel() => NotifyOption::DoPersist,
Err(_) => NotifyOption::SkipPersistHandleEvents,
Ok(()) => NotifyOption::SkipPersistNoEvents,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
fn handle_update_fulfill_htlc(
&self, counterparty_node_id: PublicKey, msg: msgs::UpdateFulfillHTLC,
) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let res = self.internal_update_fulfill_htlc(&counterparty_node_id, msg);
let _ = handle_error!(self, res, counterparty_node_id);
}
fn handle_update_fail_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFailHTLC) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_update_fail_htlc(&counterparty_node_id, msg);
let persist = match &res {
Err(e) if e.closes_channel() => NotifyOption::DoPersist,
Err(_) => NotifyOption::SkipPersistHandleEvents,
Ok(()) => NotifyOption::SkipPersistNoEvents,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
fn handle_update_fail_malformed_htlc(
&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFailMalformedHTLC,
) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_update_fail_malformed_htlc(&counterparty_node_id, msg);
let persist = match &res {
Err(e) if e.closes_channel() => NotifyOption::DoPersist,
Err(_) => NotifyOption::SkipPersistHandleEvents,
Ok(()) => NotifyOption::SkipPersistNoEvents,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
fn handle_commitment_signed(
&self, counterparty_node_id: PublicKey, msg: &msgs::CommitmentSigned,
) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let res = self.internal_commitment_signed(&counterparty_node_id, msg);
let _ = handle_error!(self, res, counterparty_node_id);
}
fn handle_commitment_signed_batch(
&self, counterparty_node_id: PublicKey, channel_id: ChannelId,
batch: Vec<msgs::CommitmentSigned>,
) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let res = self.internal_commitment_signed_batch(&counterparty_node_id, channel_id, batch);
let _ = handle_error!(self, res, counterparty_node_id);
}
fn handle_revoke_and_ack(&self, counterparty_node_id: PublicKey, msg: &msgs::RevokeAndACK) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let res = self.internal_revoke_and_ack(&counterparty_node_id, msg);
let _ = handle_error!(self, res, counterparty_node_id);
}
fn handle_update_fee(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFee) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_update_fee(&counterparty_node_id, msg);
let persist = match &res {
Err(e) if e.closes_channel() => NotifyOption::DoPersist,
Err(_) => NotifyOption::SkipPersistHandleEvents,
Ok(()) => NotifyOption::SkipPersistNoEvents,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
fn handle_announcement_signatures(
&self, counterparty_node_id: PublicKey, msg: &msgs::AnnouncementSignatures,
) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let res = self.internal_announcement_signatures(&counterparty_node_id, msg);
let _ = handle_error!(self, res, counterparty_node_id);
}
fn handle_channel_update(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelUpdate) {
PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_channel_update(&counterparty_node_id, msg);
if let Ok(persist) = handle_error!(self, res, counterparty_node_id) {
persist
} else {
NotifyOption::DoPersist
}
});
}
fn handle_channel_reestablish(
&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelReestablish,
) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let res = self.internal_channel_reestablish(&counterparty_node_id, msg);
let _ = handle_error!(self, res, counterparty_node_id);
}
#[rustfmt::skip]
fn handle_error(&self, counterparty_node_id: PublicKey, msg: &msgs::ErrorMessage) {
match &msg.data as &str {
"cannot co-op close channel w/ active htlcs"|
"link failed to shutdown" =>
{
if !msg.channel_id.is_zero() {
PersistenceNotifierGuard::optionally_notify(
self,
|| -> NotifyOption {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
if peer_state_mutex_opt.is_none() { return NotifyOption::SkipPersistNoEvents; }
let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
if let Some(chan) = peer_state.channel_by_id
.get(&msg.channel_id)
.and_then(Channel::as_funded)
{
if let Some(msg) = chan.get_outbound_shutdown() {
peer_state.pending_msg_events.push(MessageSendEvent::SendShutdown {
node_id: counterparty_node_id,
msg,
});
}
peer_state.pending_msg_events.push(MessageSendEvent::HandleError {
node_id: counterparty_node_id,
action: msgs::ErrorAction::SendWarningMessage {
msg: msgs::WarningMessage {
channel_id: msg.channel_id,
data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned()
},
log_level: Level::Trace,
}
});
return NotifyOption::SkipPersistHandleEvents;
}
NotifyOption::SkipPersistNoEvents
}
);
}
return;
}
_ => {}
}
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let peer_msg = UntrustedString(msg.data.clone());
let reason = ClosureReason::CounterpartyForceClosed { peer_msg };
if msg.channel_id.is_zero() {
let channel_ids: Vec<ChannelId> = {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
if peer_state_mutex_opt.is_none() { return; }
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
peer_state.inbound_channel_request_by_id.clear();
peer_state.channel_by_id.keys().cloned().collect()
};
for channel_id in channel_ids {
let _ = self.force_close_channel_with_peer(&channel_id, &counterparty_node_id, reason.clone());
}
} else {
{
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
if peer_state_mutex_opt.is_none() { return; }
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.get_mut(&msg.channel_id) {
Some(chan) => match chan.maybe_handle_error_without_close(
self.chain_hash, &self.fee_estimator, &self.logger,
&self.config.read().unwrap(), &peer_state.latest_features,
) {
Ok(Some(OpenChannelMessage::V1(msg))) => {
peer_state.pending_msg_events.push(MessageSendEvent::SendOpenChannel {
node_id: counterparty_node_id,
msg,
});
return;
},
Ok(Some(OpenChannelMessage::V2(msg))) => {
peer_state.pending_msg_events.push(MessageSendEvent::SendOpenChannelV2 {
node_id: counterparty_node_id,
msg,
});
return;
},
Ok(None) | Err(()) => {},
},
None => {},
}
}
let _ = self.force_close_channel_with_peer(&msg.channel_id, &counterparty_node_id, reason);
}
}
fn get_chain_hashes(&self) -> Option<Vec<ChainHash>> {
Some(vec![self.chain_hash])
}
fn handle_tx_add_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddInput) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_tx_add_input(counterparty_node_id, msg);
let persist = match &res {
Err(_) => NotifyOption::DoPersist,
Ok(persist) => *persist,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
fn handle_tx_add_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddOutput) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_tx_add_output(counterparty_node_id, msg);
let persist = match &res {
Err(_) => NotifyOption::DoPersist,
Ok(persist) => *persist,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
fn handle_tx_remove_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveInput) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_tx_remove_input(counterparty_node_id, msg);
let persist = match &res {
Err(_) => NotifyOption::DoPersist,
Ok(persist) => *persist,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
fn handle_tx_remove_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveOutput) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_tx_remove_output(counterparty_node_id, msg);
let persist = match &res {
Err(_) => NotifyOption::DoPersist,
Ok(persist) => *persist,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
fn handle_tx_complete(&self, counterparty_node_id: PublicKey, msg: &msgs::TxComplete) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_tx_complete(counterparty_node_id, msg);
let persist = match &res {
Err(_) => NotifyOption::DoPersist,
Ok(persist) => *persist,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
fn handle_tx_signatures(&self, counterparty_node_id: PublicKey, msg: &msgs::TxSignatures) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let res = self.internal_tx_signatures(&counterparty_node_id, msg);
let _ = handle_error!(self, res, counterparty_node_id);
}
fn handle_tx_init_rbf(&self, counterparty_node_id: PublicKey, msg: &msgs::TxInitRbf) {
let err = Err(MsgHandleErrInternal::send_err_msg_no_close(
"Dual-funded channels not supported".to_owned(),
msg.channel_id.clone(),
));
let _: Result<(), _> = handle_error!(self, err, counterparty_node_id);
}
fn handle_tx_ack_rbf(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAckRbf) {
let err = Err(MsgHandleErrInternal::send_err_msg_no_close(
"Dual-funded channels not supported".to_owned(),
msg.channel_id.clone(),
));
let _: Result<(), _> = handle_error!(self, err, counterparty_node_id);
}
fn handle_tx_abort(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAbort) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_tx_abort(&counterparty_node_id, msg);
let persist = match &res {
Err(e) if e.closes_channel() => NotifyOption::DoPersist,
Err(_) => NotifyOption::SkipPersistHandleEvents,
Ok(persist) => *persist,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
fn message_received(&self) {
for (payment_id, retryable_invoice_request) in
self.pending_outbound_payments.release_invoice_requests_awaiting_invoice()
{
let RetryableInvoiceRequest { invoice_request, nonce, .. } = retryable_invoice_request;
let peers = self.get_peers_for_blinded_path();
let enqueue_invreq_res =
self.flow.enqueue_invoice_request(invoice_request, payment_id, nonce, peers);
if enqueue_invreq_res.is_err() {
log_warn!(
self.logger,
"Retry failed for invoice request with payment_id {}",
payment_id
);
}
}
}
}
impl<
M: Deref,
T: Deref,
ES: Deref,
NS: Deref,
SP: Deref,
F: Deref,
R: Deref,
MR: Deref,
L: Deref,
> OffersMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
#[rustfmt::skip]
fn handle_message(
&self, message: OffersMessage, context: Option<OffersContext>, responder: Option<Responder>,
) -> Option<(OffersMessage, ResponseInstruction)> {
macro_rules! handle_pay_invoice_res {
($res: expr, $invoice: expr, $logger: expr) => {{
let error = match $res {
Err(Bolt12PaymentError::UnknownRequiredFeatures) => {
log_trace!(
$logger, "Invoice requires unknown features: {:?}",
$invoice.invoice_features()
);
InvoiceError::from(Bolt12SemanticError::UnknownRequiredFeatures)
},
Err(Bolt12PaymentError::SendingFailed(e)) => {
log_trace!($logger, "Failed paying invoice: {:?}", e);
InvoiceError::from_string(format!("{:?}", e))
},
Err(Bolt12PaymentError::BlindedPathCreationFailed) => {
let err_msg = "Failed to create a blinded path back to ourselves";
log_trace!($logger, "{}", err_msg);
InvoiceError::from_string(err_msg.to_string())
},
Err(Bolt12PaymentError::UnexpectedInvoice)
| Err(Bolt12PaymentError::DuplicateInvoice)
| Ok(()) => return None,
};
match responder {
Some(responder) => return Some((OffersMessage::InvoiceError(error), responder.respond())),
None => {
log_trace!($logger, "No reply path to send error: {:?}", error);
return None
},
}
}}
}
match message {
OffersMessage::InvoiceRequest(invoice_request) => {
let responder = match responder {
Some(responder) => responder,
None => return None,
};
let invoice_request = match self.flow.verify_invoice_request(invoice_request, context) {
Ok(InvreqResponseInstructions::SendInvoice(invoice_request)) => invoice_request,
Ok(InvreqResponseInstructions::SendStaticInvoice { recipient_id, invoice_slot, invoice_request }) => {
self.pending_events.lock().unwrap().push_back((Event::StaticInvoiceRequested {
recipient_id, invoice_slot, reply_path: responder, invoice_request,
}, None));
return None
},
Err(_) => return None,
};
let amount_msats = match InvoiceBuilder::<DerivedSigningPubkey>::amount_msats(
&invoice_request.inner
) {
Ok(amount_msats) => amount_msats,
Err(error) => return Some((OffersMessage::InvoiceError(error.into()), responder.respond())),
};
let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
let (payment_hash, payment_secret) = match self.create_inbound_payment(
Some(amount_msats), relative_expiry, None
) {
Ok((payment_hash, payment_secret)) => (payment_hash, payment_secret),
Err(()) => {
let error = Bolt12SemanticError::InvalidAmount;
return Some((OffersMessage::InvoiceError(error.into()), responder.respond()));
},
};
let entropy = &*self.entropy_source;
let (response, context) = self.flow.create_response_for_invoice_request(
&self.node_signer, &self.router, entropy, invoice_request, amount_msats,
payment_hash, payment_secret, self.list_usable_channels()
);
match context {
Some(context) => Some((response, responder.respond_with_reply_path(context))),
None => Some((response, responder.respond()))
}
},
OffersMessage::Invoice(invoice) => {
let payment_id = match self.flow.verify_bolt12_invoice(&invoice, context.as_ref()) {
Ok(payment_id) => payment_id,
Err(()) => return None,
};
let logger = WithContext::from(
&self.logger, None, None, Some(invoice.payment_hash()),
);
if self.config.read().unwrap().manually_handle_bolt12_invoices {
self.pending_outbound_payments.mark_invoice_received(&invoice, payment_id).ok()?;
let event = Event::InvoiceReceived {
payment_id, invoice, context, responder,
};
self.pending_events.lock().unwrap().push_back((event, None));
return None;
}
let res = self.send_payment_for_verified_bolt12_invoice(&invoice, payment_id);
handle_pay_invoice_res!(res, invoice, logger);
},
OffersMessage::StaticInvoice(invoice) => {
let payment_id = match context {
Some(OffersContext::OutboundPayment { payment_id, .. }) => payment_id,
_ => return None
};
let res = self.initiate_async_payment(&invoice, payment_id);
handle_pay_invoice_res!(res, invoice, self.logger);
},
OffersMessage::InvoiceError(invoice_error) => {
let payment_hash = match context {
Some(OffersContext::InboundPayment { payment_hash }) => Some(payment_hash),
_ => None,
};
let logger = WithContext::from(&self.logger, None, None, payment_hash);
log_trace!(logger, "Received invoice_error: {}", invoice_error);
match context {
Some(OffersContext::OutboundPayment { payment_id, .. }) => {
self.abandon_payment_with_reason(
payment_id, PaymentFailureReason::InvoiceRequestRejected,
);
},
_ => {},
}
None
},
}
}
fn release_pending_messages(&self) -> Vec<(OffersMessage, MessageSendInstructions)> {
self.flow.release_pending_offers_messages()
}
}
impl<
M: Deref,
T: Deref,
ES: Deref,
NS: Deref,
SP: Deref,
F: Deref,
R: Deref,
MR: Deref,
L: Deref,
> AsyncPaymentsMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
fn handle_offer_paths_request(
&self, message: OfferPathsRequest, context: AsyncPaymentsContext,
responder: Option<Responder>,
) -> Option<(OfferPaths, ResponseInstruction)> {
let peers = self.get_peers_for_blinded_path();
let (message, reply_path_context) =
match self.flow.handle_offer_paths_request(&message, context, peers) {
Some(msg) => msg,
None => return None,
};
responder.map(|resp| (message, resp.respond_with_reply_path(reply_path_context)))
}
fn handle_offer_paths(
&self, message: OfferPaths, context: AsyncPaymentsContext, responder: Option<Responder>,
) -> Option<(ServeStaticInvoice, ResponseInstruction)> {
let responder = match responder {
Some(responder) => responder,
None => return None,
};
let (serve_static_invoice, reply_context) = match self.flow.handle_offer_paths(
message,
context,
responder.clone(),
self.get_peers_for_blinded_path(),
self.list_usable_channels(),
&*self.entropy_source,
&*self.router,
) {
Some((msg, ctx)) => (msg, ctx),
None => return None,
};
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let response_instructions = responder.respond_with_reply_path(reply_context);
return Some((serve_static_invoice, response_instructions));
}
fn handle_serve_static_invoice(
&self, message: ServeStaticInvoice, context: AsyncPaymentsContext,
responder: Option<Responder>,
) {
let responder = match responder {
Some(resp) => resp,
None => return,
};
let (recipient_id, invoice_slot) =
match self.flow.verify_serve_static_invoice_message(&message, context) {
Ok((recipient_id, inv_slot)) => (recipient_id, inv_slot),
Err(()) => return,
};
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push_back((
Event::PersistStaticInvoice {
invoice: message.invoice,
invoice_request_path: message.forward_invoice_request_path,
invoice_slot,
recipient_id,
invoice_persisted_path: responder,
},
None,
));
}
fn handle_static_invoice_persisted(
&self, _message: StaticInvoicePersisted, context: AsyncPaymentsContext,
) {
let should_persist = self.flow.handle_static_invoice_persisted(context);
if should_persist {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
}
}
fn handle_held_htlc_available(
&self, _message: HeldHtlcAvailable, context: AsyncPaymentsContext,
responder: Option<Responder>,
) -> Option<(ReleaseHeldHtlc, ResponseInstruction)> {
self.flow.verify_inbound_async_payment_context(context).ok()?;
return responder.map(|responder| (ReleaseHeldHtlc {}, responder.respond()));
}
fn handle_release_held_htlc(&self, _message: ReleaseHeldHtlc, context: AsyncPaymentsContext) {
match context {
AsyncPaymentsContext::OutboundPayment { payment_id } => {
if let Err(e) = self.send_payment_for_static_invoice(payment_id) {
log_trace!(
self.logger,
"Failed to release held HTLC with payment id {}: {:?}",
payment_id,
e
);
}
},
AsyncPaymentsContext::ReleaseHeldHtlc {
intercept_id,
prev_outbound_scid_alias,
htlc_id,
} => {
let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
if let Some(htlcs) = decode_update_add_htlcs.get_mut(&prev_outbound_scid_alias) {
for update_add in htlcs.iter_mut() {
if update_add.htlc_id == htlc_id {
log_trace!(
self.logger,
"Marking held htlc with intercept_id {} as ready to release",
intercept_id
);
update_add.hold_htlc.take();
return;
}
}
}
core::mem::drop(decode_update_add_htlcs);
let mut htlc = {
let mut pending_intercept_htlcs =
self.pending_intercepted_htlcs.lock().unwrap();
match pending_intercept_htlcs.remove(&intercept_id) {
Some(htlc) => htlc,
None => {
log_trace!(
self.logger,
"Failed to release HTLC with intercept_id {}: HTLC not found",
intercept_id
);
return;
},
}
};
match htlc.forward_info.routing {
PendingHTLCRouting::Forward { ref mut hold_htlc, .. } => {
debug_assert!(hold_htlc.is_some());
*hold_htlc = None;
},
_ => {
debug_assert!(false, "HTLC intercepts can only be forwards");
return;
},
}
let logger = WithContext::from(
&self.logger,
Some(htlc.prev_counterparty_node_id),
Some(htlc.prev_channel_id),
Some(htlc.forward_info.payment_hash),
);
log_trace!(logger, "Releasing held htlc with intercept_id {}", intercept_id);
let mut per_source_pending_forward = [(
htlc.prev_outbound_scid_alias,
htlc.prev_counterparty_node_id,
htlc.prev_funding_outpoint,
htlc.prev_channel_id,
htlc.prev_user_channel_id,
vec![(htlc.forward_info, htlc.prev_htlc_id)],
)];
self.forward_htlcs(&mut per_source_pending_forward);
PersistenceNotifierGuard::notify_on_drop(self);
},
_ => return,
}
}
fn release_pending_messages(&self) -> Vec<(AsyncPaymentsMessage, MessageSendInstructions)> {
self.flow.release_pending_async_messages()
}
}
#[cfg(feature = "dnssec")]
impl<
M: Deref,
T: Deref,
ES: Deref,
NS: Deref,
SP: Deref,
F: Deref,
R: Deref,
MR: Deref,
L: Deref,
> DNSResolverMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
fn handle_dnssec_query(
&self, _message: DNSSECQuery, _responder: Option<Responder>,
) -> Option<(DNSResolverMessage, ResponseInstruction)> {
None
}
#[rustfmt::skip]
fn handle_dnssec_proof(&self, message: DNSSECProof, context: DNSResolverContext) {
let offer_opt = self.flow.hrn_resolver.handle_dnssec_proof_for_offer(message, context);
#[cfg_attr(not(feature = "_test_utils"), allow(unused_mut))]
if let Some((completed_requests, mut offer)) = offer_opt {
for (name, payment_id) in completed_requests {
#[cfg(feature = "_test_utils")]
if let Some(replacement_offer) = self.testing_dnssec_proof_offer_resolution_override.lock().unwrap().remove(&name) {
offer = replacement_offer;
}
if let Ok((amt_msats, payer_note)) = self.pending_outbound_payments.params_for_payment_awaiting_offer(payment_id) {
let offer_pay_res =
self.pay_for_offer_intern(&offer, None, Some(amt_msats), payer_note, payment_id, Some(name),
|retryable_invoice_request| {
self.pending_outbound_payments
.received_offer(payment_id, Some(retryable_invoice_request))
.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
});
if offer_pay_res.is_err() {
self.pending_outbound_payments.abandon_payment(
payment_id, PaymentFailureReason::RouteNotFound, &self.pending_events,
);
}
}
}
}
}
fn release_pending_messages(&self) -> Vec<(DNSResolverMessage, MessageSendInstructions)> {
self.flow.release_pending_dns_messages()
}
}
impl<
M: Deref,
T: Deref,
ES: Deref,
NS: Deref,
SP: Deref,
F: Deref,
R: Deref,
MR: Deref,
L: Deref,
> NodeIdLookUp for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
fn next_node_id(&self, short_channel_id: u64) -> Option<PublicKey> {
self.short_to_chan_info.read().unwrap().get(&short_channel_id).map(|(pubkey, _)| *pubkey)
}
}
pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures {
let mut node_features = provided_init_features(config).to_context();
node_features.set_keysend_optional();
node_features
}
#[cfg(any(feature = "_test_utils", test))]
pub(crate) fn provided_bolt11_invoice_features(config: &UserConfig) -> Bolt11InvoiceFeatures {
provided_init_features(config).to_context()
}
pub(crate) fn provided_bolt12_invoice_features(config: &UserConfig) -> Bolt12InvoiceFeatures {
provided_init_features(config).to_context()
}
pub(crate) fn provided_channel_features(config: &UserConfig) -> ChannelFeatures {
provided_init_features(config).to_context()
}
pub(crate) fn provided_channel_type_features(config: &UserConfig) -> ChannelTypeFeatures {
ChannelTypeFeatures::from_init(&provided_init_features(config))
}
pub fn provided_init_features(config: &UserConfig) -> InitFeatures {
let mut features = InitFeatures::empty();
features.set_data_loss_protect_required();
features.set_upfront_shutdown_script_optional();
features.set_variable_length_onion_required();
features.set_static_remote_key_required();
features.set_payment_secret_required();
features.set_basic_mpp_optional();
features.set_wumbo_optional();
features.set_shutdown_any_segwit_optional();
features.set_channel_type_required();
features.set_scid_privacy_optional();
features.set_zero_conf_optional();
features.set_route_blinding_optional();
features.set_provide_storage_optional();
#[cfg(simple_close)]
features.set_simple_close_optional();
features.set_quiescence_optional();
features.set_splicing_optional();
if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx {
features.set_anchors_zero_fee_htlc_tx_optional();
}
#[cfg(dual_funding)]
if config.enable_dual_funded_channels {
features.set_dual_fund_optional();
}
if config.channel_handshake_config.negotiate_anchor_zero_fee_commitments {
features.set_anchor_zero_fee_commitments_optional();
}
if config.enable_htlc_hold {
features.set_htlc_hold_optional();
}
features
}
const SERIALIZATION_VERSION: u8 = 1;
const MIN_SERIALIZATION_VERSION: u8 = 1;
impl_writeable_tlv_based!(PhantomRouteHints, {
(2, channels, required_vec),
(4, phantom_scid, required),
(6, real_node_pubkey, required),
});
impl_writeable_tlv_based!(BlindedForward, {
(0, inbound_blinding_point, required),
(1, failure, (default_value, BlindedFailure::FromIntroductionNode)),
(3, next_blinding_override, option),
});
impl_writeable_tlv_based_enum!(PendingHTLCRouting,
(0, Forward) => {
(0, onion_packet, required),
(1, blinded, option),
(2, short_channel_id, required),
(3, incoming_cltv_expiry, option),
(4, hold_htlc, option),
},
(1, Receive) => {
(0, payment_data, required),
(1, phantom_shared_secret, option),
(2, incoming_cltv_expiry, required),
(3, payment_metadata, option),
(5, custom_tlvs, optional_vec),
(7, requires_blinded_error, (default_value, false)),
(9, payment_context, option),
},
(2, ReceiveKeysend) => {
(0, payment_preimage, required),
(1, requires_blinded_error, (default_value, false)),
(2, incoming_cltv_expiry, required),
(3, payment_metadata, option),
(4, payment_data, option), (5, custom_tlvs, optional_vec),
(7, has_recipient_created_payment_secret, (default_value, false)),
(9, payment_context, option),
(11, invoice_request, option),
},
(3, TrampolineForward) => {
(0, incoming_shared_secret, required),
(2, onion_packet, required),
(4, blinded, option),
(6, node_id, required),
(8, incoming_cltv_expiry, required),
}
);
impl_writeable_tlv_based!(PendingHTLCInfo, {
(0, routing, required),
(2, incoming_shared_secret, required),
(4, payment_hash, required),
(6, outgoing_amt_msat, required),
(8, outgoing_cltv_value, required),
(9, incoming_amt_msat, option),
(10, skimmed_fee_msat, option),
});
impl Writeable for HTLCFailureMsg {
#[rustfmt::skip]
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
match self {
HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id, htlc_id, reason, attribution_data }) => {
0u8.write(writer)?;
channel_id.write(writer)?;
htlc_id.write(writer)?;
reason.write(writer)?;
debug_assert!(attribution_data.is_none());
},
HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
channel_id, htlc_id, sha256_of_onion, failure_code
}) => {
1u8.write(writer)?;
channel_id.write(writer)?;
htlc_id.write(writer)?;
sha256_of_onion.write(writer)?;
failure_code.write(writer)?;
},
}
Ok(())
}
}
impl Readable for HTLCFailureMsg {
#[rustfmt::skip]
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
let id: u8 = Readable::read(reader)?;
match id {
0 => {
Ok(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
channel_id: Readable::read(reader)?,
htlc_id: Readable::read(reader)?,
reason: Readable::read(reader)?,
attribution_data: None,
}))
},
1 => {
Ok(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
channel_id: Readable::read(reader)?,
htlc_id: Readable::read(reader)?,
sha256_of_onion: Readable::read(reader)?,
failure_code: Readable::read(reader)?,
}))
},
2 => {
let length: BigSize = Readable::read(reader)?;
let mut s = FixedLengthReader::new(reader, length.0);
let res = LengthReadable::read_from_fixed_length_buffer(&mut s)?;
s.eat_remaining()?; Ok(HTLCFailureMsg::Relay(res))
},
3 => {
let length: BigSize = Readable::read(reader)?;
let mut s = FixedLengthReader::new(reader, length.0);
let res = LengthReadable::read_from_fixed_length_buffer(&mut s)?;
s.eat_remaining()?; Ok(HTLCFailureMsg::Malformed(res))
},
_ => Err(DecodeError::UnknownRequiredFeature),
}
}
}
impl_writeable_tlv_based_enum_legacy!(PendingHTLCStatus, ;
(0, Forward),
(1, Fail),
);
impl_writeable_tlv_based_enum!(BlindedFailure,
(0, FromIntroductionNode) => {},
(2, FromBlindedNode) => {},
);
impl_writeable_tlv_based!(HTLCPreviousHopData, {
(0, prev_outbound_scid_alias, required),
(1, phantom_shared_secret, option),
(2, outpoint, required),
(3, blinded_failure, option),
(4, htlc_id, required),
(5, cltv_expiry, option),
(6, incoming_packet_shared_secret, required),
(7, user_channel_id, option),
(9, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(outpoint.0.unwrap()))),
(11, counterparty_node_id, option),
});
impl Writeable for ClaimableHTLC {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
let (payment_data, keysend_preimage) = match &self.onion_payload {
OnionPayload::Invoice { _legacy_hop_data } => (_legacy_hop_data.as_ref(), None),
OnionPayload::Spontaneous(preimage) => (None, Some(preimage)),
};
write_tlv_fields!(writer, {
(0, self.prev_hop, required),
(1, self.total_msat, required),
(2, self.value, required),
(3, self.sender_intended_value, required),
(4, payment_data, option),
(5, self.total_value_received, option),
(6, self.cltv_expiry, required),
(8, keysend_preimage, option),
(10, self.counterparty_skimmed_fee_msat, option),
});
Ok(())
}
}
impl Readable for ClaimableHTLC {
#[rustfmt::skip]
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
_init_and_read_len_prefixed_tlv_fields!(reader, {
(0, prev_hop, required),
(1, total_msat, option),
(2, value_ser, required),
(3, sender_intended_value, option),
(4, payment_data_opt, option),
(5, total_value_received, option),
(6, cltv_expiry, required),
(8, keysend_preimage, option),
(10, counterparty_skimmed_fee_msat, option),
});
let payment_data: Option<msgs::FinalOnionHopData> = payment_data_opt;
let value = value_ser.0.unwrap();
let onion_payload = match keysend_preimage {
Some(p) => {
if payment_data.is_some() {
return Err(DecodeError::InvalidValue)
}
if total_msat.is_none() {
total_msat = Some(value);
}
OnionPayload::Spontaneous(p)
},
None => {
if total_msat.is_none() {
if payment_data.is_none() {
return Err(DecodeError::InvalidValue)
}
total_msat = Some(payment_data.as_ref().unwrap().total_msat);
}
OnionPayload::Invoice { _legacy_hop_data: payment_data }
},
};
Ok(Self {
prev_hop: prev_hop.0.unwrap(),
timer_ticks: 0,
value,
sender_intended_value: sender_intended_value.unwrap_or(value),
total_value_received,
total_msat: total_msat.unwrap(),
onion_payload,
cltv_expiry: cltv_expiry.0.unwrap(),
counterparty_skimmed_fee_msat,
})
}
}
impl Readable for HTLCSource {
#[rustfmt::skip]
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
let id: u8 = Readable::read(reader)?;
match id {
0 => {
let mut session_priv: crate::util::ser::RequiredWrapper<SecretKey> = crate::util::ser::RequiredWrapper(None);
let mut first_hop_htlc_msat: u64 = 0;
let mut path_hops = Vec::new();
let mut payment_id = None;
let mut payment_params: Option<PaymentParameters> = None;
let mut blinded_tail: Option<BlindedTail> = None;
let mut bolt12_invoice: Option<PaidBolt12Invoice> = None;
read_tlv_fields!(reader, {
(0, session_priv, required),
(1, payment_id, option),
(2, first_hop_htlc_msat, required),
(4, path_hops, required_vec),
(5, payment_params, (option: ReadableArgs, 0)),
(6, blinded_tail, option),
(7, bolt12_invoice, option),
});
if payment_id.is_none() {
payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref()));
}
let path = Path { hops: path_hops, blinded_tail };
if path.hops.len() == 0 {
return Err(DecodeError::InvalidValue);
}
if let Some(params) = payment_params.as_mut() {
if let Payee::Clear { ref mut final_cltv_expiry_delta, .. } = params.payee {
if final_cltv_expiry_delta == &0 {
*final_cltv_expiry_delta = path.final_cltv_expiry_delta().ok_or(DecodeError::InvalidValue)?;
}
}
}
Ok(HTLCSource::OutboundRoute {
session_priv: session_priv.0.unwrap(),
first_hop_htlc_msat,
path,
payment_id: payment_id.unwrap(),
bolt12_invoice,
})
}
1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
_ => Err(DecodeError::UnknownRequiredFeature),
}
}
}
impl Writeable for HTLCSource {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), crate::io::Error> {
match self {
HTLCSource::OutboundRoute {
ref session_priv,
ref first_hop_htlc_msat,
ref path,
payment_id,
bolt12_invoice,
} => {
0u8.write(writer)?;
let payment_id_opt = Some(payment_id);
write_tlv_fields!(writer, {
(0, session_priv, required),
(1, payment_id_opt, option),
(2, first_hop_htlc_msat, required),
(4, path.hops, required_vec),
(5, None::<PaymentParameters>, option), (6, path.blinded_tail, option),
(7, bolt12_invoice, option),
});
},
HTLCSource::PreviousHopData(ref field) => {
1u8.write(writer)?;
field.write(writer)?;
},
}
Ok(())
}
}
impl_writeable_tlv_based!(PendingAddHTLCInfo, {
(0, forward_info, required),
(1, prev_user_channel_id, (default_value, 0)),
(2, prev_outbound_scid_alias, required),
(4, prev_htlc_id, required),
(6, prev_funding_outpoint, required),
(7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))),
(9, prev_counterparty_node_id, required),
});
impl Writeable for HTLCForwardInfo {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
const FAIL_HTLC_VARIANT_ID: u8 = 1;
match self {
Self::AddHTLC(info) => {
0u8.write(w)?;
info.write(w)?;
},
Self::FailHTLC { htlc_id, err_packet } => {
FAIL_HTLC_VARIANT_ID.write(w)?;
write_tlv_fields!(w, {
(0, htlc_id, required),
(2, err_packet.data, required),
(5, err_packet.attribution_data, option),
});
},
Self::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
FAIL_HTLC_VARIANT_ID.write(w)?;
write_tlv_fields!(w, {
(0, htlc_id, required),
(1, failure_code, required),
(2, Vec::<u8>::new(), required),
(3, sha256_of_onion, required),
});
},
}
Ok(())
}
}
impl Readable for HTLCForwardInfo {
#[rustfmt::skip]
fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
let id: u8 = Readable::read(r)?;
Ok(match id {
0 => Self::AddHTLC(Readable::read(r)?),
1 => {
_init_and_read_len_prefixed_tlv_fields!(r, {
(0, htlc_id, required),
(1, malformed_htlc_failure_code, option),
(2, err_packet, required),
(3, sha256_of_onion, option),
(5, attribution_data, option),
});
if let Some(failure_code) = malformed_htlc_failure_code {
if attribution_data.is_some() {
return Err(DecodeError::InvalidValue);
}
Self::FailMalformedHTLC {
htlc_id: _init_tlv_based_struct_field!(htlc_id, required),
failure_code,
sha256_of_onion: sha256_of_onion.ok_or(DecodeError::InvalidValue)?,
}
} else {
Self::FailHTLC {
htlc_id: _init_tlv_based_struct_field!(htlc_id, required),
err_packet: crate::ln::msgs::OnionErrorPacket {
data: _init_tlv_based_struct_field!(err_packet, required),
attribution_data: _init_tlv_based_struct_field!(attribution_data, option),
},
}
}
},
_ => return Err(DecodeError::InvalidValue),
})
}
}
impl_writeable_tlv_based!(PendingInboundPayment, {
(0, payment_secret, required),
(2, expiry_time, required),
(4, user_payment_id, required),
(6, payment_preimage, required),
(8, min_value_msat, required),
});
impl<
M: Deref,
T: Deref,
ES: Deref,
NS: Deref,
SP: Deref,
F: Deref,
R: Deref,
MR: Deref,
L: Deref,
> Writeable for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
#[rustfmt::skip]
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
let _consistency_lock = self.total_consistency_lock.write().unwrap();
write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
self.chain_hash.write(writer)?;
{
let best_block = self.best_block.read().unwrap();
best_block.height.write(writer)?;
best_block.block_hash.write(writer)?;
}
let per_peer_state = self.per_peer_state.write().unwrap();
let mut serializable_peer_count: u64 = 0;
{
let mut number_of_funded_channels = 0;
for (_, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if !peer_state.ok_to_remove(false) {
serializable_peer_count += 1;
}
number_of_funded_channels += peer_state.channel_by_id
.values()
.filter_map(Channel::as_funded)
.filter(|chan| chan.context.can_resume_on_restart())
.count();
}
(number_of_funded_channels as u64).write(writer)?;
for (_, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for channel in peer_state.channel_by_id
.values()
.filter_map(Channel::as_funded)
.filter(|channel| channel.context.can_resume_on_restart())
{
channel.write(writer)?;
}
}
}
{
let forward_htlcs = self.forward_htlcs.lock().unwrap();
(forward_htlcs.len() as u64).write(writer)?;
for (short_channel_id, pending_forwards) in forward_htlcs.iter() {
short_channel_id.write(writer)?;
(pending_forwards.len() as u64).write(writer)?;
for forward in pending_forwards {
forward.write(writer)?;
}
}
}
let mut decode_update_add_htlcs_opt = None;
let decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
if !decode_update_add_htlcs.is_empty() {
decode_update_add_htlcs_opt = Some(decode_update_add_htlcs);
}
let claimable_payments = self.claimable_payments.lock().unwrap();
let pending_outbound_payments = self.pending_outbound_payments.pending_outbound_payments.lock().unwrap();
let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
let mut htlc_onion_fields: Vec<&_> = Vec::new();
(claimable_payments.claimable_payments.len() as u64).write(writer)?;
for (payment_hash, payment) in claimable_payments.claimable_payments.iter() {
payment_hash.write(writer)?;
(payment.htlcs.len() as u64).write(writer)?;
for htlc in payment.htlcs.iter() {
htlc.write(writer)?;
}
htlc_purposes.push(&payment.purpose);
htlc_onion_fields.push(&payment.onion_fields);
}
let mut monitor_update_blocked_actions_per_peer = None;
let mut peer_states = Vec::new();
for (_, peer_state_mutex) in per_peer_state.iter() {
peer_states.push(peer_state_mutex.unsafe_well_ordered_double_lock_self());
}
let mut peer_storage_dir: Vec<(&PublicKey, &Vec<u8>)> = Vec::new();
(serializable_peer_count).write(writer)?;
for ((peer_pubkey, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
if !peer_state.ok_to_remove(false) {
peer_pubkey.write(writer)?;
peer_state.latest_features.write(writer)?;
peer_storage_dir.push((peer_pubkey, &peer_state.peer_storage));
if !peer_state.monitor_update_blocked_actions.is_empty() {
monitor_update_blocked_actions_per_peer
.get_or_insert_with(Vec::new)
.push((*peer_pubkey, &peer_state.monitor_update_blocked_actions));
}
}
}
let mut events = self.pending_events.lock().unwrap();
let event_count = events.len();
for peer_state in peer_states.iter() {
for chan in peer_state.channel_by_id.values().filter_map(Channel::as_funded) {
if let Some(splice_funding_failed) = chan.maybe_splice_funding_failed() {
events.push_back((
events::Event::SpliceFailed {
channel_id: chan.context.channel_id(),
counterparty_node_id: chan.context.get_counterparty_node_id(),
user_channel_id: chan.context.get_user_id(),
abandoned_funding_txo: splice_funding_failed.funding_txo,
channel_type: splice_funding_failed.channel_type,
contributed_inputs: splice_funding_failed.contributed_inputs,
contributed_outputs: splice_funding_failed.contributed_outputs,
},
None,
));
}
}
}
let events_not_backwards_compatible = events.iter().any(|(_, action)| action.is_some());
if events_not_backwards_compatible {
0u64.write(writer)?;
} else {
(events.len() as u64).write(writer)?;
for (event, _) in events.iter() {
event.write(writer)?;
}
}
0u64.write(writer)?;
(self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?;
(self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?;
(0 as u64).write(writer)?;
let mut num_pending_outbounds_compat: u64 = 0;
for (_, outbound) in pending_outbound_payments.iter() {
if !outbound.is_fulfilled() && !outbound.abandoned() {
num_pending_outbounds_compat += outbound.remaining_parts() as u64;
}
}
num_pending_outbounds_compat.write(writer)?;
for (_, outbound) in pending_outbound_payments.iter() {
match outbound {
PendingOutboundPayment::Legacy { session_privs } |
PendingOutboundPayment::Retryable { session_privs, .. } => {
for session_priv in session_privs.iter() {
session_priv.write(writer)?;
}
}
PendingOutboundPayment::AwaitingInvoice { .. } => {},
PendingOutboundPayment::AwaitingOffer { .. } => {},
PendingOutboundPayment::InvoiceReceived { .. } => {},
PendingOutboundPayment::StaticInvoiceReceived { .. } => {},
PendingOutboundPayment::Fulfilled { .. } => {},
PendingOutboundPayment::Abandoned { .. } => {},
}
}
let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = new_hash_map();
for (id, outbound) in pending_outbound_payments.iter() {
match outbound {
PendingOutboundPayment::Legacy { session_privs } |
PendingOutboundPayment::Retryable { session_privs, .. } => {
pending_outbound_payments_no_retry.insert(*id, session_privs.clone());
},
_ => {},
}
}
let mut pending_intercepted_htlcs = None;
let our_pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
if our_pending_intercepts.len() != 0 {
pending_intercepted_htlcs = Some(our_pending_intercepts);
}
let mut pending_claiming_payments = Some(&claimable_payments.pending_claiming_payments);
if pending_claiming_payments.as_ref().unwrap().is_empty() {
pending_claiming_payments = None;
}
let mut legacy_in_flight_monitor_updates: Option<HashMap<(&PublicKey, &OutPoint), &Vec<ChannelMonitorUpdate>>> = None;
let mut in_flight_monitor_updates: Option<HashMap<(&PublicKey, &ChannelId), &Vec<ChannelMonitorUpdate>>> = None;
for ((counterparty_id, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
for (channel_id, (funding_txo, updates)) in peer_state.in_flight_monitor_updates.iter() {
if !updates.is_empty() {
legacy_in_flight_monitor_updates.get_or_insert_with(|| new_hash_map())
.insert((counterparty_id, funding_txo), updates);
in_flight_monitor_updates.get_or_insert_with(|| new_hash_map())
.insert((counterparty_id, channel_id), updates);
}
}
}
write_tlv_fields!(writer, {
(1, pending_outbound_payments_no_retry, required),
(2, pending_intercepted_htlcs, option),
(3, pending_outbound_payments, required),
(4, pending_claiming_payments, option),
(5, self.our_network_pubkey, required),
(6, monitor_update_blocked_actions_per_peer, option),
(7, self.fake_scid_rand_bytes, required),
(8, if events_not_backwards_compatible { Some(&*events) } else { None }, option),
(9, htlc_purposes, required_vec),
(10, legacy_in_flight_monitor_updates, option),
(11, self.probing_cookie_secret, required),
(13, htlc_onion_fields, optional_vec),
(14, decode_update_add_htlcs_opt, option),
(15, self.inbound_payment_id_secret, required),
(17, in_flight_monitor_updates, option),
(19, peer_storage_dir, optional_vec),
(21, WithoutLength(&self.flow.writeable_async_receive_offer_cache()), required),
});
events.truncate(event_count);
Ok(())
}
}
impl Writeable for VecDeque<(Event, Option<EventCompletionAction>)> {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
(self.len() as u64).write(w)?;
for (event, action) in self.iter() {
event.write(w)?;
action.write(w)?;
#[cfg(debug_assertions)]
{
let event_encoded = event.encode();
let event_read: Option<Event> =
MaybeReadable::read(&mut &event_encoded[..]).unwrap();
if action.is_some() {
assert!(event_read.is_some());
}
}
}
Ok(())
}
}
impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
let len: u64 = Readable::read(reader)?;
const MAX_ALLOC_SIZE: u64 = 1024 * 16;
let event_size = mem::size_of::<(events::Event, Option<EventCompletionAction>)>();
let mut events: Self =
VecDeque::with_capacity(cmp::min(MAX_ALLOC_SIZE / event_size as u64, len) as usize);
for _ in 0..len {
let ev_opt = MaybeReadable::read(reader)?;
let action = Readable::read(reader)?;
if let Some(ev) = ev_opt {
events.push_back((ev, action));
} else if action.is_some() {
return Err(DecodeError::InvalidValue);
}
}
Ok(events)
}
}
pub struct ChannelManagerReadArgs<
'a,
M: Deref,
T: Deref,
ES: Deref,
NS: Deref,
SP: Deref,
F: Deref,
R: Deref,
MR: Deref,
L: Deref + Clone,
> where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
pub entropy_source: ES,
pub node_signer: NS,
pub signer_provider: SP,
pub fee_estimator: F,
pub chain_monitor: M,
pub tx_broadcaster: T,
pub router: R,
pub message_router: MR,
pub logger: L,
pub config: UserConfig,
pub channel_monitors:
HashMap<ChannelId, &'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
}
impl<
'a,
M: Deref,
T: Deref,
ES: Deref,
NS: Deref,
SP: Deref,
F: Deref,
R: Deref,
MR: Deref,
L: Deref + Clone,
> ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
pub fn new(
entropy_source: ES, node_signer: NS, signer_provider: SP, fee_estimator: F,
chain_monitor: M, tx_broadcaster: T, router: R, message_router: MR, logger: L,
config: UserConfig,
mut channel_monitors: Vec<&'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
) -> Self {
Self {
entropy_source,
node_signer,
signer_provider,
fee_estimator,
chain_monitor,
tx_broadcaster,
router,
message_router,
logger,
config,
channel_monitors: hash_map_from_iter(
channel_monitors.drain(..).map(|monitor| (monitor.channel_id(), monitor)),
),
}
}
}
impl<
'a,
M: Deref,
T: Deref,
ES: Deref,
NS: Deref,
SP: Deref,
F: Deref,
R: Deref,
MR: Deref,
L: Deref + Clone,
> ReadableArgs<ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>>
for (BlockHash, Arc<ChannelManager<M, T, ES, NS, SP, F, R, MR, L>>)
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
fn read<Reader: io::Read>(
reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>,
) -> Result<Self, DecodeError> {
let (blockhash, chan_manager) =
<(BlockHash, ChannelManager<M, T, ES, NS, SP, F, R, MR, L>)>::read(reader, args)?;
Ok((blockhash, Arc::new(chan_manager)))
}
}
impl<
'a,
M: Deref,
T: Deref,
ES: Deref,
NS: Deref,
SP: Deref,
F: Deref,
R: Deref,
MR: Deref,
L: Deref + Clone,
> ReadableArgs<ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>>
for (BlockHash, ChannelManager<M, T, ES, NS, SP, F, R, MR, L>)
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
fn read<Reader: io::Read>(
reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>,
) -> Result<Self, DecodeError> {
let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
let chain_hash: ChainHash = Readable::read(reader)?;
let best_block_height: u32 = Readable::read(reader)?;
let best_block_hash: BlockHash = Readable::read(reader)?;
let empty_peer_state = || PeerState {
channel_by_id: new_hash_map(),
inbound_channel_request_by_id: new_hash_map(),
latest_features: InitFeatures::empty(),
pending_msg_events: Vec::new(),
in_flight_monitor_updates: BTreeMap::new(),
monitor_update_blocked_actions: BTreeMap::new(),
actions_blocking_raa_monitor_updates: BTreeMap::new(),
closed_channel_monitor_update_ids: BTreeMap::new(),
peer_storage: Vec::new(),
is_connected: false,
};
let mut failed_htlcs = Vec::new();
let channel_count: u64 = Readable::read(reader)?;
let mut channel_id_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128));
let mut per_peer_state = hash_map_with_capacity(cmp::min(
channel_count as usize,
MAX_ALLOC_SIZE / mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>(),
));
let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
let mut channel_closures = VecDeque::new();
let mut close_background_events = Vec::new();
for _ in 0..channel_count {
let mut channel: FundedChannel<SP> = FundedChannel::read(
reader,
(
&args.entropy_source,
&args.signer_provider,
&provided_channel_type_features(&args.config),
),
)?;
let logger = WithChannelContext::from(&args.logger, &channel.context, None);
let channel_id = channel.context.channel_id();
channel_id_set.insert(channel_id);
if let Some(ref mut monitor) = args.channel_monitors.get_mut(&channel_id) {
if channel.get_cur_holder_commitment_transaction_number()
> monitor.get_cur_holder_commitment_number()
|| channel.get_revoked_counterparty_commitment_transaction_number()
> monitor.get_min_seen_secret()
|| channel.get_cur_counterparty_commitment_transaction_number()
> monitor.get_cur_counterparty_commitment_number()
|| channel.context.get_latest_monitor_update_id()
< monitor.get_latest_update_id()
{
log_error!(
logger,
"A ChannelManager is stale compared to the current ChannelMonitor!"
);
log_error!(logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
if channel.context.get_latest_monitor_update_id()
< monitor.get_latest_update_id()
{
log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
&channel.context.channel_id(), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
}
if channel.get_cur_holder_commitment_transaction_number()
> monitor.get_cur_holder_commitment_number()
{
log_error!(logger, " The ChannelMonitor for channel {} is at holder commitment number {} but the ChannelManager is at holder commitment number {}.",
&channel.context.channel_id(), monitor.get_cur_holder_commitment_number(), channel.get_cur_holder_commitment_transaction_number());
}
if channel.get_revoked_counterparty_commitment_transaction_number()
> monitor.get_min_seen_secret()
{
log_error!(logger, " The ChannelMonitor for channel {} is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.",
&channel.context.channel_id(), monitor.get_min_seen_secret(), channel.get_revoked_counterparty_commitment_transaction_number());
}
if channel.get_cur_counterparty_commitment_transaction_number()
> monitor.get_cur_counterparty_commitment_number()
{
log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
&channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
}
let shutdown_result =
channel.force_shutdown(ClosureReason::OutdatedChannelManager);
if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
return Err(DecodeError::InvalidValue);
}
if let Some((counterparty_node_id, funding_txo, channel_id, mut update)) =
shutdown_result.monitor_update
{
let latest_update_id = monitor.get_latest_update_id().saturating_add(1);
update.update_id = latest_update_id;
per_peer_state
.entry(counterparty_node_id)
.or_insert_with(|| Mutex::new(empty_peer_state()))
.lock()
.unwrap()
.closed_channel_monitor_update_ids
.entry(channel_id)
.and_modify(|v| *v = cmp::max(latest_update_id, *v))
.or_insert(latest_update_id);
close_background_events.push(
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id,
funding_txo,
channel_id,
update,
},
);
}
for (source, hash, cp_id, chan_id) in shutdown_result.dropped_outbound_htlcs {
let reason = LocalHTLCFailureReason::ChannelClosed;
failed_htlcs.push((source, hash, cp_id, chan_id, reason, None));
}
channel_closures.push_back((
events::Event::ChannelClosed {
channel_id: channel.context.channel_id(),
user_channel_id: channel.context.get_user_id(),
reason: ClosureReason::OutdatedChannelManager,
counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
channel_capacity_sats: Some(channel.funding.get_value_satoshis()),
channel_funding_txo: channel.funding.get_funding_txo(),
last_local_balance_msat: Some(channel.funding.get_value_to_self_msat()),
},
None,
));
for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
let mut found_htlc = false;
for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() {
if *channel_htlc_source == monitor_htlc_source {
found_htlc = true;
break;
}
}
if !found_htlc {
let logger = WithChannelContext::from(
&args.logger,
&channel.context,
Some(*payment_hash),
);
log_info!(logger,
"Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager",
&channel.context.channel_id(), &payment_hash);
failed_htlcs.push((
channel_htlc_source.clone(),
*payment_hash,
channel.context.get_counterparty_node_id(),
channel.context.channel_id(),
LocalHTLCFailureReason::ChannelClosed,
None,
));
}
}
} else {
channel.on_startup_drop_completed_blocked_mon_updates_through(
&logger,
monitor.get_latest_update_id(),
);
log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {} with {} blocked updates",
&channel.context.channel_id(), channel.context.get_latest_monitor_update_id(),
monitor.get_latest_update_id(), channel.blocked_monitor_updates_pending());
if let Some(short_channel_id) = channel.funding.get_short_channel_id() {
short_to_chan_info.insert(
short_channel_id,
(
channel.context.get_counterparty_node_id(),
channel.context.channel_id(),
),
);
}
for short_channel_id in channel.context.historical_scids() {
let cp_id = channel.context.get_counterparty_node_id();
let chan_id = channel.context.channel_id();
short_to_chan_info.insert(*short_channel_id, (cp_id, chan_id));
}
per_peer_state
.entry(channel.context.get_counterparty_node_id())
.or_insert_with(|| Mutex::new(empty_peer_state()))
.get_mut()
.unwrap()
.channel_by_id
.insert(channel.context.channel_id(), Channel::from(channel));
}
} else if channel.is_awaiting_initial_mon_persist() {
channel_closures.push_back((
events::Event::ChannelClosed {
channel_id: channel.context.channel_id(),
user_channel_id: channel.context.get_user_id(),
reason: ClosureReason::DisconnectedPeer,
counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
channel_capacity_sats: Some(channel.funding.get_value_satoshis()),
channel_funding_txo: channel.funding.get_funding_txo(),
last_local_balance_msat: Some(channel.funding.get_value_to_self_msat()),
},
None,
));
} else {
log_error!(
logger,
"Missing ChannelMonitor for channel {} needed by ChannelManager.",
&channel.context.channel_id()
);
log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
log_error!(
logger,
" Without the ChannelMonitor we cannot continue without risking funds."
);
log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
return Err(DecodeError::InvalidValue);
}
}
for (channel_id, monitor) in args.channel_monitors.iter() {
if !channel_id_set.contains(channel_id) {
let mut should_queue_fc_update = false;
let counterparty_node_id = monitor.get_counterparty_node_id();
if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 {
should_queue_fc_update = !monitor.no_further_updates_allowed();
let mut latest_update_id = monitor.get_latest_update_id();
if should_queue_fc_update {
latest_update_id = latest_update_id.saturating_add(1);
}
per_peer_state
.entry(counterparty_node_id)
.or_insert_with(|| Mutex::new(empty_peer_state()))
.lock()
.unwrap()
.closed_channel_monitor_update_ids
.entry(monitor.channel_id())
.and_modify(|v| *v = cmp::max(latest_update_id, *v))
.or_insert(latest_update_id);
}
if !should_queue_fc_update {
continue;
}
let logger = WithChannelMonitor::from(&args.logger, monitor, None);
let channel_id = monitor.channel_id();
log_info!(
logger,
"Queueing monitor update to ensure missing channel {} is force closed",
&channel_id
);
let monitor_update = ChannelMonitorUpdate {
update_id: monitor.get_latest_update_id().saturating_add(1),
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed {
should_broadcast: true,
}],
channel_id: Some(monitor.channel_id()),
};
let funding_txo = monitor.get_funding_txo();
let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id,
funding_txo,
channel_id,
update: monitor_update,
};
close_background_events.push(update);
}
}
const MAX_ALLOC_SIZE: usize = 1024 * 64;
let forward_htlcs_count: u64 = Readable::read(reader)?;
let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128));
for _ in 0..forward_htlcs_count {
let short_channel_id = Readable::read(reader)?;
let pending_forwards_count: u64 = Readable::read(reader)?;
let mut pending_forwards = Vec::with_capacity(cmp::min(
pending_forwards_count as usize,
MAX_ALLOC_SIZE / mem::size_of::<HTLCForwardInfo>(),
));
for _ in 0..pending_forwards_count {
pending_forwards.push(Readable::read(reader)?);
}
forward_htlcs.insert(short_channel_id, pending_forwards);
}
let claimable_htlcs_count: u64 = Readable::read(reader)?;
let mut claimable_htlcs_list =
Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128));
for _ in 0..claimable_htlcs_count {
let payment_hash = Readable::read(reader)?;
let previous_hops_len: u64 = Readable::read(reader)?;
let mut previous_hops = Vec::with_capacity(cmp::min(
previous_hops_len as usize,
MAX_ALLOC_SIZE / mem::size_of::<ClaimableHTLC>(),
));
for _ in 0..previous_hops_len {
previous_hops.push(<ClaimableHTLC as Readable>::read(reader)?);
}
claimable_htlcs_list.push((payment_hash, previous_hops));
}
let peer_count: u64 = Readable::read(reader)?;
for _ in 0..peer_count {
let peer_pubkey: PublicKey = Readable::read(reader)?;
let latest_features = Readable::read(reader)?;
if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) {
peer_state.get_mut().unwrap().latest_features = latest_features;
}
}
let event_count: u64 = Readable::read(reader)?;
let mut pending_events_read: VecDeque<(events::Event, Option<EventCompletionAction>)> =
VecDeque::with_capacity(cmp::min(
event_count as usize,
MAX_ALLOC_SIZE / mem::size_of::<(events::Event, Option<EventCompletionAction>)>(),
));
for _ in 0..event_count {
match MaybeReadable::read(reader)? {
Some(event) => pending_events_read.push_back((event, None)),
None => continue,
}
}
let background_event_count: u64 = Readable::read(reader)?;
for _ in 0..background_event_count {
match <u8 as Readable>::read(reader)? {
0 => {
let _: OutPoint = Readable::read(reader)?;
let _: ChannelMonitorUpdate = Readable::read(reader)?;
},
_ => return Err(DecodeError::InvalidValue),
}
}
let _last_node_announcement_serial: u32 = Readable::read(reader)?; let highest_seen_timestamp: u32 = Readable::read(reader)?;
let pending_inbound_payment_count: u64 = Readable::read(reader)?;
for _ in 0..pending_inbound_payment_count {
let payment_hash: PaymentHash = Readable::read(reader)?;
let logger = WithContext::from(&args.logger, None, None, Some(payment_hash));
let inbound: PendingInboundPayment = Readable::read(reader)?;
log_warn!(
logger,
"Ignoring deprecated pending inbound payment with payment hash {}: {:?}",
payment_hash,
inbound
);
}
let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?;
let mut pending_outbound_payments_compat: HashMap<PaymentId, PendingOutboundPayment> =
hash_map_with_capacity(cmp::min(
pending_outbound_payments_count_compat as usize,
MAX_ALLOC_SIZE / 32,
));
for _ in 0..pending_outbound_payments_count_compat {
let session_priv = Readable::read(reader)?;
let payment = PendingOutboundPayment::Legacy {
session_privs: hash_set_from_iter([session_priv]),
};
if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() {
return Err(DecodeError::InvalidValue);
};
}
let mut pending_outbound_payments_no_retry: Option<HashMap<PaymentId, HashSet<[u8; 32]>>> =
None;
let mut pending_outbound_payments = None;
let mut pending_intercepted_htlcs: Option<HashMap<InterceptId, PendingAddHTLCInfo>> =
Some(new_hash_map());
let mut received_network_pubkey: Option<PublicKey> = None;
let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
let mut probing_cookie_secret: Option<[u8; 32]> = None;
let mut claimable_htlc_purposes = None;
let mut claimable_htlc_onion_fields = None;
let mut pending_claiming_payments = Some(new_hash_map());
let mut monitor_update_blocked_actions_per_peer: Option<Vec<(_, BTreeMap<_, Vec<_>>)>> =
Some(Vec::new());
let mut events_override = None;
let mut legacy_in_flight_monitor_updates: Option<
HashMap<(PublicKey, OutPoint), Vec<ChannelMonitorUpdate>>,
> = None;
let mut in_flight_monitor_updates: Option<
HashMap<(PublicKey, ChannelId), Vec<ChannelMonitorUpdate>>,
> = None;
let mut decode_update_add_htlcs: Option<HashMap<u64, Vec<msgs::UpdateAddHTLC>>> = None;
let mut inbound_payment_id_secret = None;
let mut peer_storage_dir: Option<Vec<(PublicKey, Vec<u8>)>> = None;
let mut async_receive_offer_cache: AsyncReceiveOfferCache = AsyncReceiveOfferCache::new();
read_tlv_fields!(reader, {
(1, pending_outbound_payments_no_retry, option),
(2, pending_intercepted_htlcs, option),
(3, pending_outbound_payments, option),
(4, pending_claiming_payments, option),
(5, received_network_pubkey, option),
(6, monitor_update_blocked_actions_per_peer, option),
(7, fake_scid_rand_bytes, option),
(8, events_override, option),
(9, claimable_htlc_purposes, optional_vec),
(10, legacy_in_flight_monitor_updates, option),
(11, probing_cookie_secret, option),
(13, claimable_htlc_onion_fields, optional_vec),
(14, decode_update_add_htlcs, option),
(15, inbound_payment_id_secret, option),
(17, in_flight_monitor_updates, option),
(19, peer_storage_dir, optional_vec),
(21, async_receive_offer_cache, (default_value, async_receive_offer_cache)),
});
let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map());
let peer_storage_dir: Vec<(PublicKey, Vec<u8>)> = peer_storage_dir.unwrap_or_else(Vec::new);
if fake_scid_rand_bytes.is_none() {
fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes());
}
if probing_cookie_secret.is_none() {
probing_cookie_secret = Some(args.entropy_source.get_secure_random_bytes());
}
if inbound_payment_id_secret.is_none() {
inbound_payment_id_secret = Some(args.entropy_source.get_secure_random_bytes());
}
if let Some(events) = events_override {
pending_events_read = events;
}
if !channel_closures.is_empty() {
pending_events_read.append(&mut channel_closures);
}
if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
pending_outbound_payments = Some(pending_outbound_payments_compat);
} else if pending_outbound_payments.is_none() {
let mut outbounds = new_hash_map();
for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() {
outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
}
pending_outbound_payments = Some(outbounds);
}
let pending_outbounds = OutboundPayments::new(pending_outbound_payments.unwrap());
for (peer_pubkey, peer_storage) in peer_storage_dir {
if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) {
peer_state.get_mut().unwrap().peer_storage = peer_storage;
}
}
if let Some(legacy_in_flight_upds) = legacy_in_flight_monitor_updates {
if legacy_in_flight_upds.is_empty() {
return Err(DecodeError::InvalidValue);
}
if in_flight_monitor_updates.is_none() {
let in_flight_upds =
in_flight_monitor_updates.get_or_insert_with(|| new_hash_map());
for ((counterparty_node_id, funding_txo), updates) in legacy_in_flight_upds {
let channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
in_flight_upds.insert((counterparty_node_id, channel_id), updates);
}
} else {
if in_flight_monitor_updates.as_ref().unwrap().is_empty() {
return Err(DecodeError::InvalidValue);
}
}
}
let mut pending_background_events = Vec::new();
macro_rules! handle_in_flight_updates {
($counterparty_node_id: expr, $chan_in_flight_upds: expr, $monitor: expr,
$peer_state: expr, $logger: expr, $channel_info_log: expr
) => { {
let mut max_in_flight_update_id = 0;
let num_updates_completed = $chan_in_flight_upds
.iter()
.filter(|update| {
max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
update.update_id <= $monitor.get_latest_update_id()
})
.count();
if num_updates_completed > 0 {
log_debug!(
$logger,
"{} ChannelMonitorUpdates completed after ChannelManager was last serialized",
num_updates_completed,
);
}
let all_updates_completed = num_updates_completed == $chan_in_flight_upds.len();
let funding_txo = $monitor.get_funding_txo();
if all_updates_completed {
log_debug!($logger, "All monitor updates completed since the ChannelManager was last serialized");
pending_background_events.push(
BackgroundEvent::MonitorUpdatesComplete {
counterparty_node_id: $counterparty_node_id,
channel_id: $monitor.channel_id(),
highest_update_id_completed: max_in_flight_update_id,
});
} else {
$chan_in_flight_upds.retain(|update| {
let replay = update.update_id > $monitor.get_latest_update_id();
if replay {
log_debug!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
update.update_id, $channel_info_log, &$monitor.channel_id());
pending_background_events.push(
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id: $counterparty_node_id,
funding_txo: funding_txo,
channel_id: $monitor.channel_id(),
update: update.clone(),
}
);
}
replay
});
$peer_state.closed_channel_monitor_update_ids.entry($monitor.channel_id())
.and_modify(|v| *v = cmp::max(max_in_flight_update_id, *v))
.or_insert(max_in_flight_update_id);
}
if $peer_state.in_flight_monitor_updates.insert($monitor.channel_id(), (funding_txo, $chan_in_flight_upds)).is_some() {
log_error!($logger, "Duplicate in-flight monitor update set for the same channel!");
return Err(DecodeError::InvalidValue);
}
max_in_flight_update_id
} }
}
for (counterparty_id, peer_state_mtx) in per_peer_state.iter_mut() {
let mut peer_state_lock = peer_state_mtx.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for (chan_id, chan) in peer_state.channel_by_id.iter() {
if let Some(funded_chan) = chan.as_funded() {
let logger = WithChannelContext::from(&args.logger, &funded_chan.context, None);
let monitor = args
.channel_monitors
.get(chan_id)
.expect("We already checked for monitor presence when loading channels");
let mut max_in_flight_update_id = monitor.get_latest_update_id();
if let Some(in_flight_upds) = &mut in_flight_monitor_updates {
if let Some(mut chan_in_flight_upds) =
in_flight_upds.remove(&(*counterparty_id, *chan_id))
{
max_in_flight_update_id = cmp::max(
max_in_flight_update_id,
handle_in_flight_updates!(
*counterparty_id,
chan_in_flight_upds,
monitor,
peer_state,
logger,
""
),
);
}
}
if funded_chan.get_latest_unblocked_monitor_update_id()
> max_in_flight_update_id
{
log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
chan_id, monitor.get_latest_update_id(), max_in_flight_update_id);
log_error!(
logger,
" but the ChannelManager is at update_id {}.",
funded_chan.get_latest_unblocked_monitor_update_id()
);
log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
return Err(DecodeError::DangerousValue);
}
} else {
debug_assert!(false);
return Err(DecodeError::InvalidValue);
}
}
}
if let Some(in_flight_upds) = in_flight_monitor_updates {
for ((counterparty_id, channel_id), mut chan_in_flight_updates) in in_flight_upds {
let logger =
WithContext::from(&args.logger, Some(counterparty_id), Some(channel_id), None);
if let Some(monitor) = args.channel_monitors.get(&channel_id) {
let peer_state_mutex = per_peer_state
.entry(counterparty_id)
.or_insert_with(|| Mutex::new(empty_peer_state()));
let mut peer_state = peer_state_mutex.lock().unwrap();
handle_in_flight_updates!(
counterparty_id,
chan_in_flight_updates,
monitor,
peer_state,
logger,
"closed "
);
} else {
log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
log_error!(
logger,
" The ChannelMonitor for channel {} is missing.",
channel_id
);
log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
log_error!(
logger,
" Pending in-flight updates are: {:?}",
chan_in_flight_updates
);
return Err(DecodeError::InvalidValue);
}
}
}
pending_background_events.reserve(close_background_events.len());
'each_bg_event: for mut new_event in close_background_events {
if let BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id,
funding_txo,
channel_id,
update,
} = &mut new_event
{
debug_assert_eq!(update.updates.len(), 1);
debug_assert!(matches!(
update.updates[0],
ChannelMonitorUpdateStep::ChannelForceClosed { .. }
));
let mut updated_id = false;
for pending_event in pending_background_events.iter() {
if let BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id: pending_cp,
funding_txo: pending_funding,
channel_id: pending_chan_id,
update: pending_update,
} = pending_event
{
let for_same_channel = counterparty_node_id == pending_cp
&& funding_txo == pending_funding
&& channel_id == pending_chan_id;
if for_same_channel {
debug_assert!(update.update_id >= pending_update.update_id);
if pending_update.updates.iter().any(|upd| {
matches!(upd, ChannelMonitorUpdateStep::ChannelForceClosed { .. })
}) {
continue 'each_bg_event;
}
update.update_id = pending_update.update_id.saturating_add(1);
updated_id = true;
}
}
}
let mut per_peer_state = per_peer_state
.get(counterparty_node_id)
.expect("If we have pending updates for a channel it must have an entry")
.lock()
.unwrap();
if updated_id {
per_peer_state
.closed_channel_monitor_update_ids
.entry(*channel_id)
.and_modify(|v| *v = cmp::max(update.update_id, *v))
.or_insert(update.update_id);
}
let in_flight_updates = &mut per_peer_state
.in_flight_monitor_updates
.entry(*channel_id)
.or_insert_with(|| (*funding_txo, Vec::new()))
.1;
debug_assert!(!in_flight_updates.iter().any(|upd| upd == update));
in_flight_updates.push(update.clone());
}
pending_background_events.push(new_event);
}
let mut pending_claims_to_replay = Vec::new();
{
for (channel_id, monitor) in args.channel_monitors.iter() {
let mut is_channel_closed = true;
let counterparty_node_id = monitor.get_counterparty_node_id();
if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state_lock = peer_state_mtx.lock().unwrap();
let peer_state = &mut *peer_state_lock;
is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id);
}
if is_channel_closed {
for (htlc_source, (htlc, _)) in monitor.get_all_current_outbound_htlcs() {
let logger = WithChannelMonitor::from(
&args.logger,
monitor,
Some(htlc.payment_hash),
);
if let HTLCSource::OutboundRoute {
payment_id, session_priv, path, ..
} = htlc_source
{
if path.hops.is_empty() {
log_error!(logger, "Got an empty path for a pending payment");
return Err(DecodeError::InvalidValue);
}
let mut session_priv_bytes = [0; 32];
session_priv_bytes[..].copy_from_slice(&session_priv[..]);
pending_outbounds.insert_from_monitor_on_startup(
payment_id,
htlc.payment_hash,
session_priv_bytes,
&path,
best_block_height,
&logger,
);
}
}
}
}
for (channel_id, monitor) in args.channel_monitors.iter() {
let mut is_channel_closed = true;
let counterparty_node_id = monitor.get_counterparty_node_id();
if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state_lock = peer_state_mtx.lock().unwrap();
let peer_state = &mut *peer_state_lock;
is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id);
}
if is_channel_closed {
for (htlc_source, (htlc, preimage_opt)) in
monitor.get_all_current_outbound_htlcs()
{
let logger = WithChannelMonitor::from(
&args.logger,
monitor,
Some(htlc.payment_hash),
);
let htlc_id = SentHTLCId::from_source(&htlc_source);
match htlc_source {
HTLCSource::PreviousHopData(prev_hop_data) => {
let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
info.prev_funding_outpoint == prev_hop_data.outpoint
&& info.prev_htlc_id == prev_hop_data.htlc_id
};
decode_update_add_htlcs.retain(|src_outb_alias, update_add_htlcs| {
update_add_htlcs.retain(|update_add_htlc| {
let matches = *src_outb_alias == prev_hop_data.prev_outbound_scid_alias &&
update_add_htlc.htlc_id == prev_hop_data.htlc_id;
if matches {
log_info!(logger, "Removing pending to-decode HTLC with hash {} as it was forwarded to the closed channel {}",
&htlc.payment_hash, &monitor.channel_id());
}
!matches
});
!update_add_htlcs.is_empty()
});
forward_htlcs.retain(|_, forwards| {
forwards.retain(|forward| {
if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
if pending_forward_matches_htlc(&htlc_info) {
log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
&htlc.payment_hash, &monitor.channel_id());
false
} else { true }
} else { true }
});
!forwards.is_empty()
});
pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
if pending_forward_matches_htlc(&htlc_info) {
log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
&htlc.payment_hash, &monitor.channel_id());
pending_events_read.retain(|(event, _)| {
if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
intercepted_id != ev_id
} else { true }
});
false
} else { true }
});
},
HTLCSource::OutboundRoute {
payment_id,
session_priv,
path,
bolt12_invoice,
..
} => {
if let Some(preimage) = preimage_opt {
let pending_events = Mutex::new(pending_events_read);
let update = PaymentCompleteUpdate {
counterparty_node_id: monitor.get_counterparty_node_id(),
channel_funding_outpoint: monitor.get_funding_txo(),
channel_id: monitor.channel_id(),
htlc_id,
};
let mut compl_action = Some(
EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(update)
);
pending_outbounds.claim_htlc(
payment_id,
preimage,
bolt12_invoice,
session_priv,
path,
true,
&mut compl_action,
&pending_events,
&logger,
);
let have_action = if compl_action.is_some() {
let pending_events = pending_events.lock().unwrap();
pending_events.iter().any(|(_, act)| *act == compl_action)
} else {
false
};
if !have_action && compl_action.is_some() {
let mut peer_state = per_peer_state
.get(&counterparty_node_id)
.map(|state| state.lock().unwrap())
.expect("Channels originating a preimage must have peer state");
let update_id = peer_state
.closed_channel_monitor_update_ids
.get_mut(channel_id)
.expect("Channels originating a preimage must have a monitor");
*update_id = update_id.saturating_add(1);
pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id: monitor.get_counterparty_node_id(),
funding_txo: monitor.get_funding_txo(),
channel_id: monitor.channel_id(),
update: ChannelMonitorUpdate {
update_id: *update_id,
channel_id: Some(monitor.channel_id()),
updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete {
htlc: htlc_id,
}],
},
});
}
pending_events_read = pending_events.into_inner().unwrap();
}
},
}
}
for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() {
let logger =
WithChannelMonitor::from(&args.logger, monitor, Some(payment_hash));
log_info!(
logger,
"Failing HTLC with payment hash {} as it was resolved on-chain.",
payment_hash
);
let completion_action = Some(PaymentCompleteUpdate {
counterparty_node_id: monitor.get_counterparty_node_id(),
channel_funding_outpoint: monitor.get_funding_txo(),
channel_id: monitor.channel_id(),
htlc_id: SentHTLCId::from_source(&htlc_source),
});
failed_htlcs.push((
htlc_source,
payment_hash,
monitor.get_counterparty_node_id(),
monitor.channel_id(),
LocalHTLCFailureReason::OnChainTimeout,
completion_action,
));
}
}
let mut fail_read = false;
let outbound_claimed_htlcs_iter = monitor.get_all_current_outbound_htlcs()
.into_iter()
.filter_map(|(htlc_source, (htlc, preimage_opt))| {
if let HTLCSource::PreviousHopData(prev_hop) = &htlc_source {
if let Some(payment_preimage) = preimage_opt {
let inbound_edge_monitor = args.channel_monitors.get(&prev_hop.channel_id);
let inbound_edge_monitor = if let Some(monitor) = inbound_edge_monitor {
monitor
} else {
return None;
};
let inbound_edge_balances = inbound_edge_monitor.get_claimable_balances();
if inbound_edge_balances.is_empty() {
return None;
}
if prev_hop.counterparty_node_id.is_none() {
let htlc_payment_hash: PaymentHash = payment_preimage.into();
let logger = WithChannelMonitor::from(
&args.logger,
monitor,
Some(htlc_payment_hash),
);
let balance_could_incl_htlc = |bal| match bal {
&Balance::ClaimableOnChannelClose { .. } => {
true
},
&Balance::MaybePreimageClaimableHTLC { payment_hash, .. } => {
payment_hash == htlc_payment_hash
},
_ => false,
};
let htlc_may_be_in_balances =
inbound_edge_balances.iter().any(balance_could_incl_htlc);
if !htlc_may_be_in_balances {
return None;
}
if short_to_chan_info.get(&prev_hop.prev_outbound_scid_alias).is_none() {
log_error!(logger,
"We need to replay the HTLC claim for payment_hash {} (preimage {}) but cannot do so as the HTLC was forwarded prior to LDK 0.0.124.\
All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1",
htlc_payment_hash,
payment_preimage,
);
fail_read = true;
}
log_error!(logger,
"We need to replay the HTLC claim for payment_hash {} (preimage {}) but don't have all the required information to do so reliably.\
As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime!\
All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1\
Continuing anyway, though panics may occur!",
htlc_payment_hash,
payment_preimage,
);
}
Some((htlc_source, payment_preimage, htlc.amount_msat,
is_channel_closed, monitor.get_counterparty_node_id(),
monitor.get_funding_txo(), monitor.channel_id()))
} else { None }
} else {
None
}
});
for tuple in outbound_claimed_htlcs_iter {
pending_claims_to_replay.push(tuple);
}
if fail_read {
return Err(DecodeError::InvalidValue);
}
}
}
let expanded_inbound_key = args.node_signer.get_expanded_key();
let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len());
if let Some(purposes) = claimable_htlc_purposes {
if purposes.len() != claimable_htlcs_list.len() {
return Err(DecodeError::InvalidValue);
}
if let Some(onion_fields) = claimable_htlc_onion_fields {
if onion_fields.len() != claimable_htlcs_list.len() {
return Err(DecodeError::InvalidValue);
}
for (purpose, (onion, (payment_hash, htlcs))) in purposes
.into_iter()
.zip(onion_fields.into_iter().zip(claimable_htlcs_list.into_iter()))
{
let claimable = ClaimablePayment { purpose, htlcs, onion_fields: onion };
let existing_payment = claimable_payments.insert(payment_hash, claimable);
if existing_payment.is_some() {
return Err(DecodeError::InvalidValue);
}
}
} else {
for (purpose, (payment_hash, htlcs)) in
purposes.into_iter().zip(claimable_htlcs_list.into_iter())
{
let claimable = ClaimablePayment { purpose, htlcs, onion_fields: None };
let existing_payment = claimable_payments.insert(payment_hash, claimable);
if existing_payment.is_some() {
return Err(DecodeError::InvalidValue);
}
}
}
} else {
for (payment_hash, htlcs) in claimable_htlcs_list.drain(..) {
if htlcs.is_empty() {
return Err(DecodeError::InvalidValue);
}
let purpose = match &htlcs[0].onion_payload {
OnionPayload::Invoice { _legacy_hop_data } => {
if let Some(hop_data) = _legacy_hop_data {
events::PaymentPurpose::Bolt11InvoicePayment {
payment_preimage: match inbound_payment::verify(
payment_hash,
&hop_data,
0,
&expanded_inbound_key,
&args.logger,
) {
Ok((payment_preimage, _)) => payment_preimage,
Err(()) => {
log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", &payment_hash);
return Err(DecodeError::InvalidValue);
},
},
payment_secret: hop_data.payment_secret,
}
} else {
return Err(DecodeError::InvalidValue);
}
},
OnionPayload::Spontaneous(payment_preimage) => {
events::PaymentPurpose::SpontaneousPayment(*payment_preimage)
},
};
claimable_payments
.insert(payment_hash, ClaimablePayment { purpose, htlcs, onion_fields: None });
}
}
for (payment_hash, payment) in claimable_payments.iter() {
for htlc in payment.htlcs.iter() {
if htlc.prev_hop.counterparty_node_id.is_some() {
continue;
}
if short_to_chan_info.get(&htlc.prev_hop.prev_outbound_scid_alias).is_some() {
log_error!(args.logger,
"We do not have the required information to claim a pending payment with payment hash {} reliably.\
As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime!\
All HTLCs that were received by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1\
Continuing anyway, though panics may occur!",
payment_hash,
);
} else {
log_error!(args.logger,
"We do not have the required information to claim a pending payment with payment hash {}.\
All HTLCs that were received by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1",
payment_hash,
);
return Err(DecodeError::InvalidValue);
}
}
}
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&args.entropy_source.get_secure_random_bytes());
let our_network_pubkey = match args.node_signer.get_node_id(Recipient::Node) {
Ok(key) => key,
Err(()) => return Err(DecodeError::InvalidValue),
};
if let Some(network_pubkey) = received_network_pubkey {
if network_pubkey != our_network_pubkey {
log_error!(args.logger, "Key that was generated does not match the existing key.");
return Err(DecodeError::InvalidValue);
}
}
let mut outbound_scid_aliases = new_hash_set();
for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for (chan_id, chan) in peer_state.channel_by_id.iter_mut() {
if let Some(funded_chan) = chan.as_funded_mut() {
let logger = WithChannelContext::from(&args.logger, &funded_chan.context, None);
if funded_chan.context.outbound_scid_alias() == 0 {
let mut outbound_scid_alias;
loop {
outbound_scid_alias = fake_scid::Namespace::OutboundAlias
.get_fake_scid(
best_block_height,
&chain_hash,
fake_scid_rand_bytes.as_ref().unwrap(),
&args.entropy_source,
);
if outbound_scid_aliases.insert(outbound_scid_alias) {
break;
}
}
funded_chan.context.set_outbound_scid_alias(outbound_scid_alias);
} else if !outbound_scid_aliases
.insert(funded_chan.context.outbound_scid_alias())
{
log_error!(
logger,
"Got duplicate outbound SCID alias; {}",
funded_chan.context.outbound_scid_alias()
);
return Err(DecodeError::InvalidValue);
}
if funded_chan.context.is_usable() {
let alias = funded_chan.context.outbound_scid_alias();
let cp_id = funded_chan.context.get_counterparty_node_id();
if short_to_chan_info.insert(alias, (cp_id, *chan_id)).is_some() {
log_error!(
logger,
"Got duplicate outbound SCID alias; {}",
funded_chan.context.outbound_scid_alias()
);
return Err(DecodeError::InvalidValue);
}
}
} else {
debug_assert!(false);
return Err(DecodeError::InvalidValue);
}
}
}
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(args.fee_estimator);
for (node_id, monitor_update_blocked_actions) in
monitor_update_blocked_actions_per_peer.unwrap()
{
if let Some(peer_state) = per_peer_state.get(&node_id) {
for (channel_id, actions) in monitor_update_blocked_actions.iter() {
let logger =
WithContext::from(&args.logger, Some(node_id), Some(*channel_id), None);
for action in actions.iter() {
if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
downstream_counterparty_and_funding_outpoint:
Some(EventUnblockedChannel {
counterparty_node_id: blocked_node_id,
funding_txo: _,
channel_id: blocked_channel_id,
blocking_action,
}),
..
} = action
{
if let Some(blocked_peer_state) = per_peer_state.get(blocked_node_id) {
log_trace!(logger,
"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
blocked_channel_id);
blocked_peer_state
.lock()
.unwrap()
.actions_blocking_raa_monitor_updates
.entry(*blocked_channel_id)
.or_insert_with(Vec::new)
.push(blocking_action.clone());
} else {
}
}
if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
..
} = action
{
debug_assert!(false, "Non-event-generating channel freeing should not appear in our queue");
}
}
}
peer_state.lock().unwrap().monitor_update_blocked_actions =
monitor_update_blocked_actions;
} else {
for actions in monitor_update_blocked_actions.values() {
for action in actions.iter() {
if matches!(action, MonitorUpdateCompletionAction::PaymentClaimed { .. }) {
} else {
let logger = WithContext::from(&args.logger, Some(node_id), None, None);
log_error!(
logger,
"Got blocked actions {:?} without a per-peer-state for {}",
monitor_update_blocked_actions,
node_id
);
return Err(DecodeError::InvalidValue);
}
}
}
}
}
let best_block = BestBlock::new(best_block_hash, best_block_height);
let flow = OffersMessageFlow::new(
chain_hash,
best_block,
our_network_pubkey,
highest_seen_timestamp,
expanded_inbound_key,
args.node_signer.get_receive_auth_key(),
secp_ctx.clone(),
args.message_router,
args.logger.clone(),
)
.with_async_payments_offers_cache(async_receive_offer_cache);
let channel_manager = ChannelManager {
chain_hash,
fee_estimator: bounded_fee_estimator,
chain_monitor: args.chain_monitor,
tx_broadcaster: args.tx_broadcaster,
router: args.router,
flow,
best_block: RwLock::new(best_block),
inbound_payment_key: expanded_inbound_key,
pending_outbound_payments: pending_outbounds,
pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()),
forward_htlcs: Mutex::new(forward_htlcs),
decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs),
claimable_payments: Mutex::new(ClaimablePayments {
claimable_payments,
pending_claiming_payments: pending_claiming_payments.unwrap(),
}),
outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
short_to_chan_info: FairRwLock::new(short_to_chan_info),
fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(),
probing_cookie_secret: probing_cookie_secret.unwrap(),
inbound_payment_id_secret: inbound_payment_id_secret.unwrap(),
our_network_pubkey,
secp_ctx,
highest_seen_timestamp: AtomicUsize::new(highest_seen_timestamp as usize),
per_peer_state: FairRwLock::new(per_peer_state),
#[cfg(not(any(test, feature = "_externalize_tests")))]
monitor_update_type: AtomicUsize::new(0),
pending_events: Mutex::new(pending_events_read),
pending_events_processor: AtomicBool::new(false),
pending_htlc_forwards_processor: AtomicBool::new(false),
pending_background_events: Mutex::new(pending_background_events),
total_consistency_lock: RwLock::new(()),
background_events_processed_since_startup: AtomicBool::new(false),
event_persist_notifier: Notifier::new(),
needs_persist_flag: AtomicBool::new(false),
funding_batch_states: Mutex::new(BTreeMap::new()),
pending_broadcast_messages: Mutex::new(Vec::new()),
entropy_source: args.entropy_source,
node_signer: args.node_signer,
signer_provider: args.signer_provider,
last_days_feerates: Mutex::new(VecDeque::new()),
logger: args.logger,
config: RwLock::new(args.config),
#[cfg(feature = "_test_utils")]
testing_dnssec_proof_offer_resolution_override: Mutex::new(new_hash_map()),
};
let mut processed_claims: HashSet<Vec<MPPClaimHTLCSource>> = new_hash_set();
for (_, monitor) in args.channel_monitors.iter() {
for (payment_hash, (payment_preimage, payment_claims)) in monitor.get_stored_preimages()
{
if !payment_claims.is_empty() {
for payment_claim in payment_claims {
if processed_claims.contains(&payment_claim.mpp_parts) {
continue;
}
if payment_claim.mpp_parts.is_empty() {
return Err(DecodeError::InvalidValue);
}
{
let payments = channel_manager.claimable_payments.lock().unwrap();
if !payments.claimable_payments.contains_key(&payment_hash) {
if let Some(payment) =
payments.pending_claiming_payments.get(&payment_hash)
{
if payment.payment_id
== payment_claim.claiming_payment.payment_id
{
continue;
}
}
}
}
let mut channels_without_preimage = payment_claim
.mpp_parts
.iter()
.map(|htlc_info| (htlc_info.counterparty_node_id, htlc_info.channel_id))
.collect::<Vec<_>>();
channels_without_preimage.sort_unstable();
channels_without_preimage.dedup();
let pending_claims = PendingMPPClaim {
channels_without_preimage,
channels_with_preimage: Vec::new(),
};
let pending_claim_ptr_opt = Some(Arc::new(Mutex::new(pending_claims)));
let claim_found = channel_manager
.claimable_payments
.lock()
.unwrap()
.begin_claiming_payment(
payment_hash,
&channel_manager.node_signer,
&channel_manager.logger,
&channel_manager.inbound_payment_id_secret,
true,
);
if claim_found.is_err() {
let mut claimable_payments =
channel_manager.claimable_payments.lock().unwrap();
match claimable_payments.pending_claiming_payments.entry(payment_hash) {
hash_map::Entry::Occupied(_) => {
debug_assert!(
false,
"Entry was added in begin_claiming_payment"
);
return Err(DecodeError::InvalidValue);
},
hash_map::Entry::Vacant(entry) => {
entry.insert(payment_claim.claiming_payment);
},
}
}
for part in payment_claim.mpp_parts.iter() {
let pending_mpp_claim = pending_claim_ptr_opt.as_ref().map(|ptr| {
(
part.counterparty_node_id,
part.channel_id,
PendingMPPClaimPointer(Arc::clone(&ptr)),
)
});
let pending_claim_ptr = pending_claim_ptr_opt.as_ref().map(|ptr| {
RAAMonitorUpdateBlockingAction::ClaimedMPPPayment {
pending_claim: PendingMPPClaimPointer(Arc::clone(&ptr)),
}
});
channel_manager.claim_mpp_part(
part.into(),
payment_preimage,
None,
None,
|_, _| {
(
Some(MonitorUpdateCompletionAction::PaymentClaimed {
payment_hash,
pending_mpp_claim,
}),
pending_claim_ptr,
)
},
);
}
processed_claims.insert(payment_claim.mpp_parts);
}
} else {
let per_peer_state = channel_manager.per_peer_state.read().unwrap();
let mut claimable_payments = channel_manager.claimable_payments.lock().unwrap();
let payment = claimable_payments.claimable_payments.remove(&payment_hash);
mem::drop(claimable_payments);
if let Some(payment) = payment {
log_info!(channel_manager.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", &payment_hash);
let mut claimable_amt_msat = 0;
let mut receiver_node_id = Some(our_network_pubkey);
let phantom_shared_secret = payment.htlcs[0].prev_hop.phantom_shared_secret;
if phantom_shared_secret.is_some() {
let phantom_pubkey = channel_manager
.node_signer
.get_node_id(Recipient::PhantomNode)
.expect("Failed to get node_id for phantom node recipient");
receiver_node_id = Some(phantom_pubkey)
}
for claimable_htlc in &payment.htlcs {
claimable_amt_msat += claimable_htlc.value;
let previous_channel_id = claimable_htlc.prev_hop.channel_id;
let peer_node_id = monitor.get_counterparty_node_id();
{
let peer_state_mutex = per_peer_state.get(&peer_node_id).unwrap();
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let Some(channel) = peer_state
.channel_by_id
.get_mut(&previous_channel_id)
.and_then(Channel::as_funded_mut)
{
let logger = WithChannelContext::from(
&channel_manager.logger,
&channel.context,
Some(payment_hash),
);
channel
.claim_htlc_while_disconnected_dropping_mon_update_legacy(
claimable_htlc.prev_hop.htlc_id,
payment_preimage,
&&logger,
);
}
}
if let Some(previous_hop_monitor) =
args.channel_monitors.get(&claimable_htlc.prev_hop.channel_id)
{
previous_hop_monitor.provide_payment_preimage_unsafe_legacy(
&payment_hash,
&payment_preimage,
&channel_manager.tx_broadcaster,
&channel_manager.fee_estimator,
&channel_manager.logger,
);
}
}
let mut pending_events = channel_manager.pending_events.lock().unwrap();
let payment_id =
payment.inbound_payment_id(&inbound_payment_id_secret.unwrap());
let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect();
let sender_intended_total_msat =
payment.htlcs.first().map(|htlc| htlc.total_msat);
pending_events.push_back((
events::Event::PaymentClaimed {
receiver_node_id,
payment_hash,
purpose: payment.purpose,
amount_msat: claimable_amt_msat,
htlcs,
sender_intended_total_msat,
onion_fields: payment.onion_fields,
payment_id: Some(payment_id),
},
None,
));
}
}
}
}
for htlc_source in failed_htlcs {
let (source, hash, counterparty_id, channel_id, failure_reason, ev_action) =
htlc_source;
let receiver =
HTLCHandlingFailureType::Forward { node_id: Some(counterparty_id), channel_id };
let reason = HTLCFailReason::from_failure_code(failure_reason);
channel_manager
.fail_htlc_backwards_internal(&source, &hash, &reason, receiver, ev_action);
}
for (
source,
preimage,
downstream_value,
downstream_closed,
downstream_node_id,
downstream_funding,
downstream_channel_id,
) in pending_claims_to_replay
{
channel_manager.claim_funds_internal(
source,
preimage,
Some(downstream_value),
None,
downstream_closed,
downstream_node_id,
downstream_funding,
downstream_channel_id,
None,
None,
None,
);
}
Ok((best_block_hash.clone(), channel_manager))
}
}
#[cfg(test)]
mod tests {
use crate::events::{ClosureReason, Event, HTLCHandlingFailureType};
use crate::ln::channelmanager::{
create_recv_pending_htlc_info, inbound_payment, HTLCForwardInfo, InterceptId, PaymentId,
RecipientOnionFields,
};
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, MessageSendEvent};
use crate::ln::onion_utils::AttributionData;
use crate::ln::onion_utils::{self, LocalHTLCFailureReason};
use crate::ln::outbound_payment::Retry;
use crate::ln::types::ChannelId;
use crate::prelude::*;
use crate::routing::router::{find_route, PaymentParameters, RouteParameters};
use crate::sign::EntropySource;
use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret};
use crate::util::config::{ChannelConfig, ChannelConfigUpdate};
use crate::util::errors::APIError;
use crate::util::ser::Writeable;
use crate::util::test_utils;
use bitcoin::secp256k1::ecdh::SharedSecret;
use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
use core::sync::atomic::Ordering;
#[test]
#[rustfmt::skip]
fn test_notify_limits() {
let chanmon_cfgs = create_chanmon_cfgs(3);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
assert!(nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1);
chan.0.contents.fee_base_msat *= 2;
chan.1.contents.fee_base_msat *= 2;
let node_a_chan_info = nodes[0].node.list_channels_with_counterparty(
&nodes[1].node.get_our_node_id()).pop().unwrap();
let node_b_chan_info = nodes[1].node.list_channels_with_counterparty(
&nodes[0].node.get_our_node_id()).pop().unwrap();
assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
assert!(!nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
nodes[2].node.handle_channel_update(nodes[1].node.get_our_node_id(), &chan.0);
nodes[2].node.handle_channel_update(nodes[1].node.get_our_node_id(), &chan.1);
assert!(!nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
nodes[0].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.0);
nodes[0].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.1);
nodes[1].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.0);
nodes[1].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.1);
assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
let as_node_one = nodes[0].node.get_our_node_id().serialize()[..] < nodes[1].node.get_our_node_id().serialize()[..];
let as_update = if as_node_one == (chan.0.contents.channel_flags & 1 == 0 ) { &chan.0 } else { &chan.1 };
let bs_update = if as_node_one == (chan.0.contents.channel_flags & 1 == 0 ) { &chan.1 } else { &chan.0 };
nodes[0].node.handle_channel_update(nodes[1].node.get_our_node_id(), &as_update);
nodes[1].node.handle_channel_update(nodes[0].node.get_our_node_id(), &bs_update);
assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
nodes[0].node.handle_channel_update(nodes[1].node.get_our_node_id(), &bs_update);
nodes[1].node.handle_channel_update(nodes[0].node.get_our_node_id(), &as_update);
assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
}
#[test]
#[rustfmt::skip]
fn test_keysend_dup_hash_partial_mpp() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes(&nodes, 0, 1);
let (route, our_payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000);
let mut mpp_route = route.clone();
mpp_route.paths.push(mpp_route.paths[0].clone());
let payment_id = PaymentId([42; 32]);
let cur_height = CHAN_CONFIRM_DEPTH + 1; let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
RecipientOnionFields::secret_only(payment_secret), payment_id, &mpp_route).unwrap();
nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &our_payment_hash,
RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
nodes[0].node.send_spontaneous_payment(
Some(payment_preimage), RecipientOnionFields::spontaneous_empty(),
PaymentId(payment_preimage.0), route.route_params.clone().unwrap(), Retry::Attempts(0)
).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let ev = events.drain(..).next().unwrap();
let payment_event = SendEvent::from_event(ev);
nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
expect_and_process_pending_htlcs(&nodes[1], true);
let events = nodes[1].node.get_and_clear_pending_events();
let fail = HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash };
expect_htlc_failure_conditions(events, &[fail]);
check_added_monitors!(nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
assert!(updates.update_fulfill_htlcs.is_empty());
assert_eq!(updates.update_fail_htlcs.len(), 1);
assert!(updates.update_fail_malformed_htlcs.is_empty());
assert!(updates.update_fee.is_none());
nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
expect_payment_failed!(nodes[0], our_payment_hash, true);
nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &our_payment_hash,
RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), true, None);
nodes[1].node.claim_funds(payment_preimage);
expect_payment_claimed!(nodes[1], our_payment_hash, 200_000);
check_added_monitors!(nodes[1], 2);
let mut bs_1st_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), bs_1st_updates.update_fulfill_htlcs.remove(0));
expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_1st_updates.commitment_signed);
check_added_monitors!(nodes[0], 1);
let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa);
check_added_monitors!(nodes[1], 1);
let mut bs_2nd_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_first_cs);
check_added_monitors!(nodes[1], 1);
let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), bs_2nd_updates.update_fulfill_htlcs.remove(0));
nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_2nd_updates.commitment_signed);
check_added_monitors!(nodes[0], 1);
let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa);
let as_second_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
check_added_monitors!(nodes[0], 1);
nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa);
check_added_monitors!(nodes[1], 1);
nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed);
check_added_monitors!(nodes[1], 1);
let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_third_raa);
check_added_monitors!(nodes[0], 1);
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
match events[0] {
Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path, .. } => {
assert_eq!(payment_id, *actual_payment_id);
assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
assert_eq!(route.paths[0], *path);
},
_ => panic!("Unexpected event"),
}
match events[1] {
Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path, ..} => {
assert_eq!(payment_id, *actual_payment_id);
assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
assert_eq!(route.paths[0], *path);
},
_ => panic!("Unexpected event"),
}
}
#[test]
#[rustfmt::skip]
fn test_keysend_dup_payment_hash() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes(&nodes, 0, 1);
let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let expected_route = [&nodes[1]];
let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &expected_route, 100_000);
let route_params = RouteParameters::from_payment_params_and_value(
PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(),
TEST_FINAL_CLTV, false), 100_000);
nodes[0].node.send_spontaneous_payment(
Some(payment_preimage), RecipientOnionFields::spontaneous_empty(),
PaymentId(payment_preimage.0), route_params.clone(), Retry::Attempts(0)
).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let ev = events.drain(..).next().unwrap();
let payment_event = SendEvent::from_event(ev);
nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
expect_and_process_pending_htlcs(&nodes[1], true);
let events = nodes[1].node.get_and_clear_pending_events();
let fail = HTLCHandlingFailureType::Receive { payment_hash };
expect_htlc_failure_conditions(events, &[fail]);
check_added_monitors!(nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
assert!(updates.update_fulfill_htlcs.is_empty());
assert_eq!(updates.update_fail_htlcs.len(), 1);
assert!(updates.update_fail_malformed_htlcs.is_empty());
assert!(updates.update_fee.is_none());
nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
expect_payment_failed!(nodes[0], payment_hash, true);
claim_payment(&nodes[0], &expected_route, payment_preimage);
let payment_preimage = PaymentPreimage([42; 32]);
let route = find_route(
&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
).unwrap();
let payment_hash = nodes[0].node.send_spontaneous_payment(
Some(payment_preimage), RecipientOnionFields::spontaneous_empty(),
PaymentId(payment_preimage.0), route.route_params.clone().unwrap(), Retry::Attempts(0)
).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let event = events.pop().unwrap();
let path = vec![&nodes[1]];
pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
let payment_secret = PaymentSecret([43; 32]);
nodes[0].node.send_payment_with_route(route.clone(), payment_hash,
RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let ev = events.drain(..).next().unwrap();
let payment_event = SendEvent::from_event(ev);
nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
expect_and_process_pending_htlcs(&nodes[1], true);
let events = nodes[1].node.get_and_clear_pending_events();
let fail = HTLCHandlingFailureType::Receive { payment_hash };
expect_htlc_failure_conditions(events, &[fail]);
check_added_monitors!(nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
assert!(updates.update_fulfill_htlcs.is_empty());
assert_eq!(updates.update_fail_htlcs.len(), 1);
assert!(updates.update_fail_malformed_htlcs.is_empty());
assert!(updates.update_fee.is_none());
nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
expect_payment_failed!(nodes[0], payment_hash, true);
claim_payment(&nodes[0], &expected_route, payment_preimage);
let payment_id_1 = PaymentId([44; 32]);
let payment_hash = nodes[0].node.send_spontaneous_payment(
Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_1,
route.route_params.clone().unwrap(), Retry::Attempts(0)
).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let event = events.pop().unwrap();
let path = vec![&nodes[1]];
pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
let route_params = RouteParameters::from_payment_params_and_value(
PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false),
100_000
);
let payment_id_2 = PaymentId([45; 32]);
nodes[0].node.send_spontaneous_payment(
Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_2, route_params,
Retry::Attempts(0)
).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let ev = events.drain(..).next().unwrap();
let payment_event = SendEvent::from_event(ev);
nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
expect_and_process_pending_htlcs(&nodes[1], true);
let events = nodes[1].node.get_and_clear_pending_events();
let fail = HTLCHandlingFailureType::Receive { payment_hash };
expect_htlc_failure_conditions(events, &[fail]);
check_added_monitors!(nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
assert!(updates.update_fulfill_htlcs.is_empty());
assert_eq!(updates.update_fail_htlcs.len(), 1);
assert!(updates.update_fail_malformed_htlcs.is_empty());
assert!(updates.update_fee.is_none());
nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
expect_payment_failed!(nodes[0], payment_hash, true);
claim_payment(&nodes[0], &expected_route, payment_preimage);
}
#[test]
#[rustfmt::skip]
fn test_keysend_hash_mismatch() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let payer_pubkey = nodes[0].node.get_our_node_id();
let payee_pubkey = nodes[1].node.get_our_node_id();
let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
let route_params = RouteParameters::from_payment_params_and_value(
PaymentParameters::for_keysend(payee_pubkey, 40, false), 10_000);
let network_graph = nodes[0].network_graph;
let first_hops = nodes[0].node.list_usable_channels();
let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
&payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
).unwrap();
let test_preimage = PaymentPreimage([42; 32]);
let mismatch_payment_hash = PaymentHash([43; 32]);
let session_privs = nodes[0].node.test_add_new_pending_payment(mismatch_payment_hash,
RecipientOnionFields::spontaneous_empty(), PaymentId(mismatch_payment_hash.0), &route).unwrap();
nodes[0].node.test_send_payment_internal(&route, mismatch_payment_hash,
RecipientOnionFields::spontaneous_empty(), Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap();
check_added_monitors!(nodes[0], 1);
let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
assert_eq!(updates.update_add_htlcs.len(), 1);
assert!(updates.update_fulfill_htlcs.is_empty());
assert!(updates.update_fail_htlcs.is_empty());
assert!(updates.update_fail_malformed_htlcs.is_empty());
assert!(updates.update_fee.is_none());
nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
commitment_signed_dance!(nodes[1], nodes[0], &updates.commitment_signed, false);
expect_and_process_pending_htlcs(&nodes[1], false);
expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash: mismatch_payment_hash }]);
check_added_monitors(&nodes[1], 1);
let _ = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Payment preimage didn't match payment hash", 1);
}
#[test]
#[rustfmt::skip]
fn test_multi_hop_missing_secret() {
let chanmon_cfgs = create_chanmon_cfgs(4);
let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
let (mut route, payment_hash, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
let path = route.paths[0].clone();
route.paths.push(path);
route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
route.paths[0].hops[0].short_channel_id = chan_1_id;
route.paths[0].hops[1].short_channel_id = chan_3_id;
route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
route.paths[1].hops[0].short_channel_id = chan_2_id;
route.paths[1].hops[1].short_channel_id = chan_4_id;
nodes[0].node.send_payment_with_route(route, payment_hash,
RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)).unwrap();
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
Event::PaymentFailed { reason, .. } => {
assert_eq!(reason.unwrap(), crate::events::PaymentFailureReason::UnexpectedError);
}
_ => panic!()
}
nodes[0].logger.assert_log_contains("lightning::ln::outbound_payment", "Payment secret is required for multi-path payments", 2);
assert!(nodes[0].node.list_recent_payments().is_empty());
}
#[test]
#[rustfmt::skip]
fn test_channel_update_cached() {
let chanmon_cfgs = create_chanmon_cfgs(3);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
let message = "Channel force-closed".to_owned();
nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), message.clone()).unwrap();
check_added_monitors!(nodes[0], 1);
let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message };
check_closed_event!(nodes[0], 1, reason, [nodes[1].node.get_our_node_id()], 100000);
let node_1_events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(node_1_events.len(), 0);
{
let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
assert_eq!(pending_broadcast_messages.len(), 1);
}
nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
nodes[0].node.peer_disconnected(nodes[2].node.get_our_node_id());
nodes[2].node.peer_disconnected(nodes[0].node.get_our_node_id());
let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(node_0_events.len(), 0);
nodes[0].node.peer_connected(nodes[2].node.get_our_node_id(), &msgs::Init {
features: nodes[2].node.init_features(), networks: None, remote_network_address: None
}, true).unwrap();
nodes[2].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
features: nodes[0].node.init_features(), networks: None, remote_network_address: None
}, false).unwrap();
let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(node_0_events.len(), 1);
match &node_0_events[0] {
MessageSendEvent::BroadcastChannelUpdate { .. } => (),
_ => panic!("Unexpected event"),
}
{
let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
assert_eq!(pending_broadcast_messages.len(), 0);
}
}
#[test]
#[rustfmt::skip]
fn test_drop_disconnected_peers_when_removing_channels() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
let chan_id = nodes[0].node.list_channels()[0].channel_id;
let message = "Channel force-closed".to_owned();
nodes[0]
.node
.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone())
.unwrap();
check_added_monitors!(nodes[0], 1);
let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message };
check_closed_event!(nodes[0], 1, reason, [nodes[1].node.get_our_node_id()], 1_000_000);
{
let nodes_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
assert_eq!(nodes_0_per_peer_state.len(), 1);
assert!(nodes_0_per_peer_state.get(&nodes[1].node.get_our_node_id()).is_some());
}
nodes[0].node.timer_tick_occurred();
{
assert_eq!(nodes[0].node.per_peer_state.read().unwrap().len(), 0);
}
}
#[test]
#[rustfmt::skip]
fn test_drop_peers_when_removing_unfunded_channels() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1, "Unexpected events {:?}", events);
match events[0] {
Event::FundingGenerationReady { .. } => {}
_ => panic!("Unexpected event {:?}", events),
}
nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer, [nodes[1].node.get_our_node_id()], 1_000_000);
check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer, [nodes[0].node.get_our_node_id()], 1_000_000);
assert_eq!(nodes[0].node.per_peer_state.read().unwrap().len(), 0);
assert_eq!(nodes[1].node.per_peer_state.read().unwrap().len(), 0);
}
#[test]
#[rustfmt::skip]
fn bad_inbound_payment_hash() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[0]);
let payment_data = msgs::FinalOnionHopData {
payment_secret,
total_msat: 100_000,
};
let mut bad_payment_hash = payment_hash.clone();
bad_payment_hash.0[0] += 1;
match inbound_payment::verify(bad_payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) {
Ok(_) => panic!("Unexpected ok"),
Err(()) => {
nodes[0].logger.assert_log_contains("lightning::ln::inbound_payment", "Failing HTLC with user-generated payment_hash", 1);
}
}
assert!(inbound_payment::verify(payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger).is_ok());
}
fn check_not_connected_to_peer_error<T>(
res_err: Result<T, APIError>, expected_public_key: PublicKey,
) {
let expected_message = format!("Not connected to node: {}", expected_public_key);
check_api_error_message(expected_message, res_err)
}
#[rustfmt::skip]
fn check_unkown_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
let expected_message = format!("Can't find a peer matching the passed counterparty node_id {}", expected_public_key);
check_api_error_message(expected_message, res_err)
}
#[rustfmt::skip]
fn check_channel_unavailable_error<T>(res_err: Result<T, APIError>, expected_channel_id: ChannelId, peer_node_id: PublicKey) {
let expected_message = format!("Channel with id {} not found for the passed counterparty node_id {}", expected_channel_id, peer_node_id);
check_api_error_message(expected_message, res_err)
}
fn check_api_misuse_error<T>(res_err: Result<T, APIError>) {
let expected_message = "No such channel awaiting to be accepted.".to_string();
check_api_error_message(expected_message, res_err)
}
fn check_api_error_message<T>(expected_err_message: String, res_err: Result<T, APIError>) {
match res_err {
Err(APIError::APIMisuseError { err }) => {
assert_eq!(err, expected_err_message);
},
Err(APIError::ChannelUnavailable { err }) => {
assert_eq!(err, expected_err_message);
},
Ok(_) => panic!("Unexpected Ok"),
Err(_) => panic!("Unexpected Error"),
}
}
#[test]
#[rustfmt::skip]
fn test_api_calls_with_unkown_counterparty_node() {
let chanmon_cfg = create_chanmon_cfgs(2);
let node_cfg = create_node_cfgs(2, &chanmon_cfg);
let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
let nodes = create_network(2, &node_cfg, &node_chanmgr);
let channel_id = ChannelId::from_bytes([4; 32]);
let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap());
let intercept_id = InterceptId([0; 32]);
let error_message = "Channel force-closed";
check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None, None), unkown_public_key);
check_unkown_peer_error(nodes[0].node.accept_inbound_channel(&channel_id, &unkown_public_key, 42, None), unkown_public_key);
check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key);
check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key);
check_unkown_peer_error(nodes[0].node.forward_intercepted_htlc(intercept_id, &channel_id, unkown_public_key, 1_000_000), unkown_public_key);
check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key);
}
#[test]
#[rustfmt::skip]
fn test_api_calls_with_unavailable_channel() {
let chanmon_cfg = create_chanmon_cfgs(2);
let node_cfg = create_node_cfgs(2, &chanmon_cfg);
let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
let nodes = create_network(2, &node_cfg, &node_chanmgr);
let counterparty_node_id = nodes[1].node.get_our_node_id();
let channel_id = ChannelId::from_bytes([4; 32]);
let error_message = "Channel force-closed";
check_api_misuse_error(nodes[0].node.accept_inbound_channel(&channel_id, &counterparty_node_id, 42, None));
check_channel_unavailable_error(nodes[0].node.close_channel(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id);
check_channel_unavailable_error(nodes[0].node.forward_intercepted_htlc(InterceptId([0; 32]), &channel_id, counterparty_node_id, 1_000_000), channel_id, counterparty_node_id);
check_channel_unavailable_error(nodes[0].node.update_channel_config(&counterparty_node_id, &[channel_id], &ChannelConfig::default()), channel_id, counterparty_node_id);
}
#[test]
#[rustfmt::skip]
fn test_connection_limiting() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
let mut funding_tx = None;
for idx in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
if idx == 0 {
nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel);
let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
funding_tx = Some(tx.clone());
nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx).unwrap();
let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg);
check_added_monitors!(nodes[1], 1);
expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed);
check_added_monitors!(nodes[0], 1);
expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
}
open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
}
open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(
&nodes[0].keys_manager);
nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
open_channel_msg.common_fields.temporary_channel_id);
let mut peer_pks = Vec::with_capacity(super::MAX_NO_CHANNEL_PEERS);
for _ in 1..super::MAX_NO_CHANNEL_PEERS {
let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
peer_pks.push(random_pk);
nodes[1].node.peer_connected(random_pk, &msgs::Init {
features: nodes[0].node.init_features(), networks: None, remote_network_address: None
}, true).unwrap();
}
let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
nodes[1].node.peer_connected(last_random_pk, &msgs::Init {
features: nodes[0].node.init_features(), networks: None, remote_network_address: None
}, true).unwrap_err();
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
let chan_closed_events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(chan_closed_events.len(), super::MAX_UNFUNDED_CHANS_PER_PEER - 1);
for ev in chan_closed_events {
if let Event::ChannelClosed { .. } = ev { } else { panic!(); }
}
nodes[1].node.peer_connected(last_random_pk, &msgs::Init {
features: nodes[0].node.init_features(), networks: None, remote_network_address: None
}, true).unwrap();
nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
features: nodes[0].node.init_features(), networks: None, remote_network_address: None
}, true).unwrap_err();
nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
features: nodes[0].node.init_features(), networks: None, remote_network_address: None
}, false).unwrap();
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
assert!(peer_pks.len() > super::MAX_UNFUNDED_CHANNEL_PEERS - 1);
for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
nodes[1].node.handle_open_channel(peer_pks[i], &open_channel_msg);
get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]);
open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
}
nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
open_channel_msg.common_fields.temporary_channel_id);
nodes[1].node.create_channel(last_random_pk, 100_000, 0, 42, None, None).unwrap();
get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, last_random_pk);
mine_transaction(&nodes[1], funding_tx.as_ref().unwrap());
nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
features: nodes[0].node.init_features(), networks: None, remote_network_address: None
}, true).unwrap();
get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
}
#[test]
#[rustfmt::skip]
fn reject_excessively_underpaying_htlcs() {
let chanmon_cfg = create_chanmon_cfgs(1);
let node_cfg = create_node_cfgs(1, &chanmon_cfg);
let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
let node = create_network(1, &node_cfg, &node_chanmgr);
let sender_intended_amt_msat = 100;
let extra_fee_msat = 10;
let hop_data = onion_utils::Hop::Receive {
hop_data: msgs::InboundOnionReceivePayload {
sender_intended_htlc_amt_msat: 100,
cltv_expiry_height: 42,
payment_metadata: None,
keysend_preimage: None,
payment_data: Some(msgs::FinalOnionHopData {
payment_secret: PaymentSecret([0; 32]),
total_msat: sender_intended_amt_msat,
}),
custom_tlvs: Vec::new(),
},
shared_secret: SharedSecret::from_bytes([0; 32]),
};
let current_height: u32 = node[0].node.best_block.read().unwrap().height;
if let Err(crate::ln::channelmanager::InboundHTLCErr { reason, .. }) =
create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat),
current_height)
{
assert_eq!(reason, LocalHTLCFailureReason::FinalIncorrectHTLCAmount);
} else { panic!(); }
let hop_data = onion_utils::Hop::Receive {
hop_data: msgs::InboundOnionReceivePayload { sender_intended_htlc_amt_msat: 100,
cltv_expiry_height: 42,
payment_metadata: None,
keysend_preimage: None,
payment_data: Some(msgs::FinalOnionHopData {
payment_secret: PaymentSecret([0; 32]),
total_msat: sender_intended_amt_msat,
}),
custom_tlvs: Vec::new(),
},
shared_secret: SharedSecret::from_bytes([0; 32]),
};
let current_height: u32 = node[0].node.best_block.read().unwrap().height;
assert!(create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat),
current_height).is_ok());
}
#[test]
#[rustfmt::skip]
fn test_final_incorrect_cltv(){
let chanmon_cfg = create_chanmon_cfgs(1);
let node_cfg = create_node_cfgs(1, &chanmon_cfg);
let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
let node = create_network(1, &node_cfg, &node_chanmgr);
let current_height: u32 = node[0].node.best_block.read().unwrap().height;
let result = create_recv_pending_htlc_info(onion_utils::Hop::Receive {
hop_data: msgs::InboundOnionReceivePayload {
sender_intended_htlc_amt_msat: 100,
cltv_expiry_height: TEST_FINAL_CLTV,
payment_metadata: None,
keysend_preimage: None,
payment_data: Some(msgs::FinalOnionHopData {
payment_secret: PaymentSecret([0; 32]),
total_msat: 100,
}),
custom_tlvs: Vec::new(),
},
shared_secret: SharedSecret::from_bytes([0; 32]),
}, [0; 32], PaymentHash([0; 32]), 100, TEST_FINAL_CLTV + 1, None, true, None, current_height);
assert!(result.is_ok());
}
#[test]
#[rustfmt::skip]
fn test_update_channel_config() {
let chanmon_cfg = create_chanmon_cfgs(2);
let node_cfg = create_node_cfgs(2, &chanmon_cfg);
let mut user_config = test_default_channel_config();
let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config.clone()), Some(user_config.clone())]);
let nodes = create_network(2, &node_cfg, &node_chanmgr);
let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
let channel = &nodes[0].node.list_channels()[0];
nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap();
let events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 0);
user_config.channel_config.forwarding_fee_base_msat += 10;
nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap();
assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_base_msat, user_config.channel_config.forwarding_fee_base_msat);
let events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
match &events[0] {
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("expected BroadcastChannelUpdate event"),
}
nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate::default()).unwrap();
let events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 0);
let new_cltv_expiry_delta = user_config.channel_config.cltv_expiry_delta + 6;
nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate {
cltv_expiry_delta: Some(new_cltv_expiry_delta),
..Default::default()
}).unwrap();
assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta);
let events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
match &events[0] {
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("expected BroadcastChannelUpdate event"),
}
let new_fee = user_config.channel_config.forwarding_fee_proportional_millionths + 100;
nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate {
forwarding_fee_proportional_millionths: Some(new_fee),
accept_underpaying_htlcs: Some(true),
..Default::default()
}).unwrap();
assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta);
assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, new_fee);
assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().accept_underpaying_htlcs, true);
let events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
match &events[0] {
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("expected BroadcastChannelUpdate event"),
}
let bad_channel_id = ChannelId::v1_from_funding_txid(&[10; 32], 10);
let current_fee = nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths;
let new_fee = current_fee + 100;
assert!(
matches!(
nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id, bad_channel_id], &ChannelConfigUpdate {
forwarding_fee_proportional_millionths: Some(new_fee),
..Default::default()
}),
Err(APIError::ChannelUnavailable { err: _ }),
)
);
assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, current_fee);
let events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 0);
}
#[test]
#[rustfmt::skip]
fn test_payment_display() {
let payment_id = PaymentId([42; 32]);
assert_eq!(format!("{}", &payment_id), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
let payment_hash = PaymentHash([42; 32]);
assert_eq!(format!("{}", &payment_hash), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
let payment_preimage = PaymentPreimage([42; 32]);
assert_eq!(format!("{}", &payment_preimage), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
}
#[test]
#[rustfmt::skip]
fn test_trigger_lnd_force_close() {
let chanmon_cfg = create_chanmon_cfgs(2);
let node_cfg = create_node_cfgs(2, &chanmon_cfg);
let user_config = test_default_channel_config();
let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config.clone()), Some(user_config)]);
let nodes = create_network(2, &node_cfg, &node_chanmgr);
let message = "Channel force-closed".to_owned();
let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
nodes[0]
.node
.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone())
.unwrap();
check_closed_broadcast(&nodes[0], 1, false);
check_added_monitors(&nodes[0], 1);
let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message };
check_closed_event!(nodes[0], 1, reason, [nodes[1].node.get_our_node_id()], 100000);
{
let txn = nodes[0].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1);
check_spends!(txn[0], funding_tx);
}
nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init {
features: nodes[1].node.init_features(), networks: None, remote_network_address: None
}, true).unwrap();
nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
features: nodes[0].node.init_features(), networks: None, remote_network_address: None
}, false).unwrap();
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
let channel_reestablish = get_event_msg!(
nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()
);
nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &channel_reestablish);
let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 2);
if let MessageSendEvent::SendChannelReestablish { node_id, msg } = &msg_events[0] {
assert_eq!(*node_id, nodes[1].node.get_our_node_id());
assert_eq!(msg.next_local_commitment_number, 0);
assert_eq!(msg.next_remote_commitment_number, 0);
nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &msg);
} else { panic!() };
check_closed_broadcast(&nodes[1], 1, true);
check_added_monitors(&nodes[1], 1);
let expected_close_reason = ClosureReason::ProcessingError {
err: "Peer sent an invalid channel_reestablish to force close in a non-standard way".to_string()
};
check_closed_event!(nodes[1], 1, expected_close_reason, [nodes[0].node.get_our_node_id()], 100000);
{
let txn = nodes[1].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1);
check_spends!(txn[0], funding_tx);
}
}
#[test]
#[rustfmt::skip]
fn test_malformed_forward_htlcs_ser() {
let chanmon_cfg = create_chanmon_cfgs(1);
let node_cfg = create_node_cfgs(1, &chanmon_cfg);
let persister;
let chain_monitor;
let chanmgrs = create_node_chanmgrs(1, &node_cfg, &[None]);
let deserialized_chanmgr;
let mut nodes = create_network(1, &node_cfg, &chanmgrs);
let dummy_failed_htlc = |htlc_id| {
HTLCForwardInfo::FailHTLC { htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42], attribution_data: Some(AttributionData::new()) } }
};
let dummy_malformed_htlc = |htlc_id| {
HTLCForwardInfo::FailMalformedHTLC {
htlc_id,
failure_code: LocalHTLCFailureReason::InvalidOnionPayload.failure_code(),
sha256_of_onion: [0; 32],
}
};
let dummy_htlcs_1: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
if htlc_id % 2 == 0 {
dummy_failed_htlc(htlc_id)
} else {
dummy_malformed_htlc(htlc_id)
}
}).collect();
let dummy_htlcs_2: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
if htlc_id % 2 == 1 {
dummy_failed_htlc(htlc_id)
} else {
dummy_malformed_htlc(htlc_id)
}
}).collect();
let (scid_1, scid_2) = (42, 43);
let mut forward_htlcs = new_hash_map();
forward_htlcs.insert(scid_1, dummy_htlcs_1.clone());
forward_htlcs.insert(scid_2, dummy_htlcs_2.clone());
let mut chanmgr_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
*chanmgr_fwd_htlcs = forward_htlcs.clone();
core::mem::drop(chanmgr_fwd_htlcs);
reload_node!(nodes[0], nodes[0].node.encode(), &[], persister, chain_monitor, deserialized_chanmgr);
let mut deserialized_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
for scid in [scid_1, scid_2].iter() {
let deserialized_htlcs = deserialized_fwd_htlcs.remove(scid).unwrap();
assert_eq!(forward_htlcs.remove(scid).unwrap(), deserialized_htlcs);
}
assert!(deserialized_fwd_htlcs.is_empty());
core::mem::drop(deserialized_fwd_htlcs);
}
}
#[cfg(ldk_bench)]
pub mod bench {
use crate::chain::chainmonitor::{ChainMonitor, Persist};
use crate::chain::Listen;
use crate::events::Event;
use crate::ln::channelmanager::{
BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentId, PaymentPreimage,
RecipientOnionFields, Retry,
};
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, Init, MessageSendEvent};
use crate::routing::gossip::NetworkGraph;
use crate::routing::router::{PaymentParameters, RouteParameters};
use crate::sign::{InMemorySigner, KeysManager, NodeSigner};
use crate::util::config::{MaxDustHTLCExposure, UserConfig};
use crate::util::test_utils;
use bitcoin::amount::Amount;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hashes::Hash;
use bitcoin::locktime::absolute::LockTime;
use bitcoin::transaction::Version;
use bitcoin::{Transaction, TxOut};
use crate::sync::{Arc, RwLock};
use criterion::Criterion;
type Manager<'a, P> = ChannelManager<
&'a ChainMonitor<
InMemorySigner,
&'a test_utils::TestChainSource,
&'a test_utils::TestBroadcaster,
&'a test_utils::TestFeeEstimator,
&'a test_utils::TestLogger,
&'a P,
&'a KeysManager,
>,
&'a test_utils::TestBroadcaster,
&'a KeysManager,
&'a KeysManager,
&'a KeysManager,
&'a test_utils::TestFeeEstimator,
&'a test_utils::TestRouter<'a>,
&'a test_utils::TestMessageRouter<'a>,
&'a test_utils::TestLogger,
>;
struct ANodeHolder<'node_cfg, 'chan_mon_cfg: 'node_cfg, P: Persist<InMemorySigner>> {
node: &'node_cfg Manager<'chan_mon_cfg, P>,
}
impl<'node_cfg, 'chan_mon_cfg: 'node_cfg, P: Persist<InMemorySigner>> NodeHolder
for ANodeHolder<'node_cfg, 'chan_mon_cfg, P>
{
type CM = Manager<'chan_mon_cfg, P>;
#[inline]
#[rustfmt::skip]
fn node(&self) -> &Manager<'chan_mon_cfg, P> { self.node }
#[inline]
#[rustfmt::skip]
fn chain_monitor(&self) -> Option<&test_utils::TestChainMonitor> { None }
}
#[rustfmt::skip]
pub fn bench_sends(bench: &mut Criterion) {
bench_two_sends(bench, "bench_sends", test_utils::TestPersister::new(), test_utils::TestPersister::new());
}
#[rustfmt::skip]
pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Criterion, bench_name: &str, persister_a: P, persister_b: P) {
let network = bitcoin::Network::Testnet;
let genesis_block = bitcoin::constants::genesis_block(network);
let tx_broadcaster = test_utils::TestBroadcaster::new(network);
let fee_estimator = test_utils::TestFeeEstimator::new(253);
let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
let scorer = RwLock::new(test_utils::TestScorer::new());
let entropy = test_utils::TestKeysInterface::new(&[0u8; 32], network);
let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &logger_a, &scorer);
let message_router = test_utils::TestMessageRouter::new_default(Arc::new(NetworkGraph::new(network, &logger_a)), &entropy);
let mut config: UserConfig = Default::default();
config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253);
config.channel_handshake_config.minimum_depth = 1;
let seed_a = [1u8; 32];
let keys_manager_a = KeysManager::new(&seed_a, 42, 42, true);
let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a, &keys_manager_a, keys_manager_a.get_peer_storage_key());
let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &message_router, &logger_a, &keys_manager_a, &keys_manager_a, &keys_manager_a, config.clone(), ChainParameters {
network,
best_block: BestBlock::from_network(network),
}, genesis_block.header.time);
let node_a_holder = ANodeHolder { node: &node_a };
let logger_b = test_utils::TestLogger::with_id("node a".to_owned());
let seed_b = [2u8; 32];
let keys_manager_b = KeysManager::new(&seed_b, 42, 42, true);
let chain_monitor_b = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_b, &keys_manager_b, keys_manager_b.get_peer_storage_key());
let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &message_router, &logger_b, &keys_manager_b, &keys_manager_b, &keys_manager_b, config.clone(), ChainParameters {
network,
best_block: BestBlock::from_network(network),
}, genesis_block.header.time);
let node_b_holder = ANodeHolder { node: &node_b };
node_a.peer_connected(node_b.get_our_node_id(), &Init {
features: node_b.init_features(), networks: None, remote_network_address: None
}, true).unwrap();
node_b.peer_connected(node_a.get_our_node_id(), &Init {
features: node_a.init_features(), networks: None, remote_network_address: None
}, false).unwrap();
node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None, None).unwrap();
node_b.handle_open_channel(node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
node_a.handle_accept_channel(node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
let tx;
if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) {
tx = Transaction { version: Version::TWO, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
value: Amount::from_sat(8_000_000), script_pubkey: output_script,
}]};
node_a.funding_transaction_generated(temporary_channel_id, node_b.get_our_node_id(), tx.clone()).unwrap();
} else { panic!(); }
node_b.handle_funding_created(node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingCreated, node_b.get_our_node_id()));
let events_b = node_b.get_and_clear_pending_events();
assert_eq!(events_b.len(), 1);
match events_b[0] {
Event::ChannelPending{ ref counterparty_node_id, .. } => {
assert_eq!(*counterparty_node_id, node_a.get_our_node_id());
},
_ => panic!("Unexpected event"),
}
node_a.handle_funding_signed(node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingSigned, node_a.get_our_node_id()));
let events_a = node_a.get_and_clear_pending_events();
assert_eq!(events_a.len(), 1);
match events_a[0] {
Event::ChannelPending{ ref counterparty_node_id, .. } => {
assert_eq!(*counterparty_node_id, node_b.get_our_node_id());
},
_ => panic!("Unexpected event"),
}
assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
let block = create_dummy_block(BestBlock::from_network(network).block_hash, 42, vec![tx]);
Listen::block_connected(&node_a, &block, 1);
Listen::block_connected(&node_b, &block, 1);
node_a.handle_channel_ready(node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendChannelReady, node_a.get_our_node_id()));
let msg_events = node_a.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 2);
match msg_events[0] {
MessageSendEvent::SendChannelReady { ref msg, .. } => {
node_b.handle_channel_ready(node_a.get_our_node_id(), msg);
get_event_msg!(node_b_holder, MessageSendEvent::SendChannelUpdate, node_a.get_our_node_id());
},
_ => panic!(),
}
match msg_events[1] {
MessageSendEvent::SendChannelUpdate { .. } => {},
_ => panic!(),
}
let events_a = node_a.get_and_clear_pending_events();
assert_eq!(events_a.len(), 1);
match events_a[0] {
Event::ChannelReady{ ref counterparty_node_id, .. } => {
assert_eq!(*counterparty_node_id, node_b.get_our_node_id());
},
_ => panic!("Unexpected event"),
}
let events_b = node_b.get_and_clear_pending_events();
assert_eq!(events_b.len(), 1);
match events_b[0] {
Event::ChannelReady{ ref counterparty_node_id, .. } => {
assert_eq!(*counterparty_node_id, node_a.get_our_node_id());
},
_ => panic!("Unexpected event"),
}
let mut payment_count: u64 = 0;
macro_rules! send_payment {
($node_a: expr, $node_b: expr) => {
let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id(), TEST_FINAL_CLTV)
.with_bolt11_features($node_b.bolt11_invoice_features()).unwrap();
let mut payment_preimage = PaymentPreimage([0; 32]);
payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes());
payment_count += 1;
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
$node_a.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
PaymentId(payment_hash.0),
RouteParameters::from_payment_params_and_value(payment_params, 10_000),
Retry::Attempts(0)).unwrap();
let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
$node_b.handle_update_add_htlc($node_a.get_our_node_id(), &payment_event.msgs[0]);
$node_b.handle_commitment_signed_batch_test($node_a.get_our_node_id(), &payment_event.commitment_msg);
let (raa, cs) = get_revoke_commit_msgs(&ANodeHolder { node: &$node_b }, &$node_a.get_our_node_id());
$node_a.handle_revoke_and_ack($node_b.get_our_node_id(), &raa);
$node_a.handle_commitment_signed_batch_test($node_b.get_our_node_id(), &cs);
$node_b.handle_revoke_and_ack($node_a.get_our_node_id(), &get_event_msg!(ANodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id()));
$node_b.process_pending_htlc_forwards();
expect_payment_claimable!(ANodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000);
$node_b.claim_funds(payment_preimage);
expect_payment_claimed!(ANodeHolder { node: &$node_b }, payment_hash, 10_000);
match $node_b.get_and_clear_pending_msg_events().pop().unwrap() {
MessageSendEvent::UpdateHTLCs { node_id, mut updates, .. } => {
assert_eq!(node_id, $node_a.get_our_node_id());
let fulfill = updates.update_fulfill_htlcs.remove(0);
$node_a.handle_update_fulfill_htlc($node_b.get_our_node_id(), fulfill);
$node_a.handle_commitment_signed_batch_test($node_b.get_our_node_id(), &updates.commitment_signed);
},
_ => panic!("Failed to generate claim event"),
}
let (raa, cs) = get_revoke_commit_msgs(&ANodeHolder { node: &$node_a }, &$node_b.get_our_node_id());
$node_b.handle_revoke_and_ack($node_a.get_our_node_id(), &raa);
$node_b.handle_commitment_signed_batch_test($node_a.get_our_node_id(), &cs);
$node_a.handle_revoke_and_ack($node_b.get_our_node_id(), &get_event_msg!(ANodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id()));
expect_payment_sent!(ANodeHolder { node: &$node_a }, payment_preimage);
}
}
bench.bench_function(bench_name, |b| b.iter(|| {
send_payment!(node_a, node_b);
send_payment!(node_b, node_a);
}));
}
}