use bitcoin::block::Header;
use bitcoin::transaction::{Transaction, TxIn};
use bitcoin::constants::ChainHash;
use bitcoin::key::constants::SECRET_KEY_SIZE;
use bitcoin::network::Network;
use bitcoin::hashes::{Hash, HashEngine, HmacEngine};
use bitcoin::hashes::hmac::Hmac;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hash_types::{BlockHash, Txid};
use bitcoin::secp256k1::{SecretKey,PublicKey};
use bitcoin::secp256k1::Secp256k1;
use bitcoin::{secp256k1, Sequence, Weight};
use crate::events::FundingInfo;
use crate::blinded_path::message::{AsyncPaymentsContext, MessageContext, OffersContext};
use crate::blinded_path::NodeIdLookUp;
use crate::blinded_path::message::{BlindedMessagePath, MessageForwardNode};
use crate::blinded_path::payment::{BlindedPaymentPath, Bolt12OfferContext, Bolt12RefundContext, PaymentConstraints, PaymentContext, UnauthenticatedReceiveTlvs};
use crate::chain;
use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
use crate::chain::channelmonitor::{Balance, ChannelMonitor, ChannelMonitorUpdate, WithChannelMonitor, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent};
use crate::chain::transaction::{OutPoint, TransactionData};
use crate::events::{self, Event, EventHandler, EventsProvider, InboundChannelFunds, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination, PaymentFailureReason, ReplayEvent};
use crate::ln::inbound_payment;
use crate::ln::types::ChannelId;
use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret};
use crate::ln::channel::{self, Channel, ChannelPhase, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext, InteractivelyFunded as _};
#[cfg(any(dual_funding, splicing))]
use crate::ln::channel::InboundV2Channel;
use crate::ln::channel_state::ChannelDetails;
use crate::types::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
#[cfg(any(feature = "_test_utils", test))]
use crate::types::features::Bolt11InvoiceFeatures;
use crate::routing::router::{BlindedTail, InFlightHtlcs, Path, Payee, PaymentParameters, RouteParameters, Router};
#[cfg(test)]
use crate::routing::router::Route;
use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundHTLCErr, NextPacketDetails};
use crate::ln::msgs;
use crate::ln::onion_utils;
use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING};
use crate::ln::msgs::{ChannelMessageHandler, CommitmentUpdate, DecodeError, LightningError};
#[cfg(test)]
use crate::ln::outbound_payment;
use crate::ln::outbound_payment::{OutboundPayments, PendingOutboundPayment, RetryableInvoiceRequest, SendAlongPathArgs, StaleExpiration};
use crate::offers::invoice::{Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, ExplicitSigningPubkey, InvoiceBuilder, UnsignedBolt12Invoice};
use crate::offers::invoice_error::InvoiceError;
use crate::offers::invoice_request::{InvoiceRequest, InvoiceRequestBuilder};
use crate::offers::nonce::Nonce;
use crate::offers::offer::{Offer, OfferBuilder};
use crate::offers::parse::Bolt12SemanticError;
use crate::offers::refund::{Refund, RefundBuilder};
use crate::offers::signer;
#[cfg(async_payments)]
use crate::offers::static_invoice::StaticInvoice;
use crate::onion_message::async_payments::{AsyncPaymentsMessage, HeldHtlcAvailable, ReleaseHeldHtlc, AsyncPaymentsMessageHandler};
use crate::onion_message::dns_resolution::HumanReadableName;
use crate::onion_message::messenger::{Destination, MessageRouter, Responder, ResponseInstruction, MessageSendInstructions};
use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider};
use crate::sign::ecdsa::EcdsaChannelSigner;
use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
use crate::util::wakers::{Future, Notifier};
use crate::util::scid_utils::fake_scid;
use crate::util::string::UntrustedString;
use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
use crate::util::ser::TransactionU16LenLimited;
use crate::util::logger::{Level, Logger, WithContext};
use crate::util::errors::APIError;
#[cfg(feature = "dnssec")]
use crate::blinded_path::message::DNSResolverContext;
#[cfg(feature = "dnssec")]
use crate::onion_message::dns_resolution::{DNSResolverMessage, DNSResolverMessageHandler, DNSSECQuery, DNSSECProof, OMNameResolver};
#[cfg(not(c_bindings))]
use {
crate::offers::offer::DerivedMetadata,
crate::onion_message::messenger::DefaultMessageRouter,
crate::routing::router::DefaultRouter,
crate::routing::gossip::NetworkGraph,
crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters},
crate::sign::KeysManager,
};
#[cfg(c_bindings)]
use {
crate::offers::offer::OfferWithDerivedMetadataBuilder,
crate::offers::refund::RefundMaybeWithDerivedMetadataBuilder,
};
use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, CreationError, Currency, Description, InvoiceBuilder as Bolt11InvoiceBuilder, SignOrCreationError, DEFAULT_EXPIRY_TIME};
use alloc::collections::{btree_map, BTreeMap};
use crate::io;
use crate::prelude::*;
use core::{cmp, mem};
use core::borrow::Borrow;
use core::cell::RefCell;
use crate::io::Read;
use crate::sync::{Arc, Mutex, RwLock, RwLockReadGuard, FairRwLock, LockTestExt, LockHeldState};
use core::sync::atomic::{AtomicUsize, AtomicBool, Ordering};
use core::time::Duration;
use core::ops::Deref;
use bitcoin::hex::impl_fmt_traits;
pub use crate::ln::outbound_payment::{Bolt12PaymentError, ProbeSendFailure, Retry, RetryableSendFailure, RecipientOnionFields};
#[cfg(test)]
pub(crate) use crate::ln::outbound_payment::PaymentSendFailure;
use crate::ln::script::ShutdownScript;
#[derive(Clone)] #[cfg_attr(test, derive(Debug, PartialEq))]
pub enum PendingHTLCRouting {
Forward {
onion_packet: msgs::OnionPacket,
short_channel_id: u64, blinded: Option<BlindedForward>,
},
Receive {
payment_data: msgs::FinalOnionHopData,
payment_metadata: Option<Vec<u8>>,
payment_context: Option<PaymentContext>,
incoming_cltv_expiry: u32,
phantom_shared_secret: Option<[u8; 32]>,
custom_tlvs: Vec<(u64, Vec<u8>)>,
requires_blinded_error: bool,
},
ReceiveKeysend {
payment_data: Option<msgs::FinalOnionHopData>,
payment_preimage: PaymentPreimage,
payment_metadata: Option<Vec<u8>>,
incoming_cltv_expiry: u32,
custom_tlvs: Vec<(u64, Vec<u8>)>,
requires_blinded_error: bool,
has_recipient_created_payment_secret: bool,
},
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct BlindedForward {
pub inbound_blinding_point: PublicKey,
pub failure: BlindedFailure,
pub next_blinding_override: Option<PublicKey>,
}
impl PendingHTLCRouting {
fn blinded_failure(&self) -> Option<BlindedFailure> {
match self {
Self::Forward { blinded: Some(BlindedForward { failure, .. }), .. } => Some(*failure),
Self::Receive { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
Self::ReceiveKeysend { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
_ => None,
}
}
}
#[derive(Clone)] #[cfg_attr(test, derive(Debug, PartialEq))]
pub struct PendingHTLCInfo {
pub routing: PendingHTLCRouting,
pub incoming_shared_secret: [u8; 32],
pub payment_hash: PaymentHash,
pub incoming_amt_msat: Option<u64>,
pub outgoing_amt_msat: u64,
pub outgoing_cltv_value: u32,
pub skimmed_fee_msat: Option<u64>,
}
#[derive(Clone)] pub(super) enum HTLCFailureMsg {
Relay(msgs::UpdateFailHTLC),
Malformed(msgs::UpdateFailMalformedHTLC),
}
#[derive(Clone)] pub(super) enum PendingHTLCStatus {
Forward(PendingHTLCInfo),
Fail(HTLCFailureMsg),
}
#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
pub(super) struct PendingAddHTLCInfo {
pub(super) forward_info: PendingHTLCInfo,
prev_short_channel_id: u64,
prev_htlc_id: u64,
prev_counterparty_node_id: Option<PublicKey>,
prev_channel_id: ChannelId,
prev_funding_outpoint: OutPoint,
prev_user_channel_id: u128,
}
#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
pub(super) enum HTLCForwardInfo {
AddHTLC(PendingAddHTLCInfo),
FailHTLC {
htlc_id: u64,
err_packet: msgs::OnionErrorPacket,
},
FailMalformedHTLC {
htlc_id: u64,
failure_code: u16,
sha256_of_onion: [u8; 32],
},
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub enum BlindedFailure {
FromIntroductionNode,
FromBlindedNode,
}
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
pub(crate) struct HTLCPreviousHopData {
short_channel_id: u64,
user_channel_id: Option<u128>,
htlc_id: u64,
incoming_packet_shared_secret: [u8; 32],
phantom_shared_secret: Option<[u8; 32]>,
blinded_failure: Option<BlindedFailure>,
channel_id: ChannelId,
outpoint: OutPoint,
counterparty_node_id: Option<PublicKey>,
}
#[derive(PartialEq, Eq)]
enum OnionPayload {
Invoice {
_legacy_hop_data: Option<msgs::FinalOnionHopData>,
},
Spontaneous(PaymentPreimage),
}
#[derive(PartialEq, Eq)]
struct ClaimableHTLC {
prev_hop: HTLCPreviousHopData,
cltv_expiry: u32,
value: u64,
sender_intended_value: u64,
onion_payload: OnionPayload,
timer_ticks: u8,
total_value_received: Option<u64>,
total_msat: u64,
counterparty_skimmed_fee_msat: Option<u64>,
}
impl From<&ClaimableHTLC> for events::ClaimedHTLC {
fn from(val: &ClaimableHTLC) -> Self {
events::ClaimedHTLC {
channel_id: val.prev_hop.channel_id,
user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0),
cltv_expiry: val.cltv_expiry,
value_msat: val.value,
counterparty_skimmed_fee_msat: val.counterparty_skimmed_fee_msat.unwrap_or(0),
}
}
}
impl PartialOrd for ClaimableHTLC {
fn partial_cmp(&self, other: &ClaimableHTLC) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for ClaimableHTLC {
fn cmp(&self, other: &ClaimableHTLC) -> cmp::Ordering {
let res = (self.prev_hop.channel_id, self.prev_hop.htlc_id).cmp(
&(other.prev_hop.channel_id, other.prev_hop.htlc_id)
);
if res.is_eq() {
debug_assert!(self == other, "ClaimableHTLCs from the same source should be identical");
}
res
}
}
pub trait Verification {
fn hmac_for_offer_payment(
&self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
) -> Hmac<Sha256>;
fn verify_for_offer_payment(
&self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
) -> Result<(), ()>;
}
impl Verification for PaymentHash {
fn hmac_for_offer_payment(
&self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
) -> Hmac<Sha256> {
signer::hmac_for_payment_hash(*self, nonce, expanded_key)
}
fn verify_for_offer_payment(
&self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
) -> Result<(), ()> {
signer::verify_payment_hash(*self, hmac, nonce, expanded_key)
}
}
impl Verification for UnauthenticatedReceiveTlvs {
fn hmac_for_offer_payment(
&self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
) -> Hmac<Sha256> {
signer::hmac_for_payment_tlvs(self, nonce, expanded_key)
}
fn verify_for_offer_payment(
&self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
) -> Result<(), ()> {
signer::verify_payment_tlvs(self, hmac, nonce, expanded_key)
}
}
#[derive(Hash, Copy, Clone, PartialEq, Eq)]
pub struct PaymentId(pub [u8; Self::LENGTH]);
impl PaymentId {
pub const LENGTH: usize = 32;
#[cfg(async_payments)]
pub fn hmac_for_async_payment(
&self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
) -> Hmac<Sha256> {
signer::hmac_for_async_payment_id(*self, nonce, expanded_key)
}
#[cfg(async_payments)]
pub fn verify_for_async_payment(
&self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
) -> Result<(), ()> {
signer::verify_async_payment_id(*self, hmac, nonce, expanded_key)
}
}
impl Verification for PaymentId {
fn hmac_for_offer_payment(
&self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
) -> Hmac<Sha256> {
signer::hmac_for_offer_payment_id(*self, nonce, expanded_key)
}
fn verify_for_offer_payment(
&self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
) -> Result<(), ()> {
signer::verify_offer_payment_id(*self, hmac, nonce, expanded_key)
}
}
impl PaymentId {
fn for_inbound_from_htlcs<I: Iterator<Item=(ChannelId, u64)>>(key: &[u8; 32], htlcs: I) -> PaymentId {
let mut prev_pair = None;
let mut hasher = HmacEngine::new(key);
for (channel_id, htlc_id) in htlcs {
hasher.input(&channel_id.0);
hasher.input(&htlc_id.to_le_bytes());
if let Some(prev) = prev_pair {
debug_assert!(prev < (channel_id, htlc_id), "HTLCs should be sorted");
}
prev_pair = Some((channel_id, htlc_id));
}
PaymentId(Hmac::<Sha256>::from_engine(hasher).to_byte_array())
}
}
impl Borrow<[u8]> for PaymentId {
fn borrow(&self) -> &[u8] {
&self.0[..]
}
}
impl_fmt_traits! {
impl fmt_traits for PaymentId {
const LENGTH: usize = 32;
}
}
impl Writeable for PaymentId {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
self.0.write(w)
}
}
impl Readable for PaymentId {
fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
let buf: [u8; 32] = Readable::read(r)?;
Ok(PaymentId(buf))
}
}
#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
pub struct InterceptId(pub [u8; 32]);
impl Writeable for InterceptId {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
self.0.write(w)
}
}
impl Readable for InterceptId {
fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
let buf: [u8; 32] = Readable::read(r)?;
Ok(InterceptId(buf))
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub(crate) enum SentHTLCId {
PreviousHopData { short_channel_id: u64, htlc_id: u64 },
OutboundRoute { session_priv: [u8; SECRET_KEY_SIZE] },
}
impl SentHTLCId {
pub(crate) fn from_source(source: &HTLCSource) -> Self {
match source {
HTLCSource::PreviousHopData(hop_data) => Self::PreviousHopData {
short_channel_id: hop_data.short_channel_id,
htlc_id: hop_data.htlc_id,
},
HTLCSource::OutboundRoute { session_priv, .. } =>
Self::OutboundRoute { session_priv: session_priv.secret_bytes() },
}
}
}
impl_writeable_tlv_based_enum!(SentHTLCId,
(0, PreviousHopData) => {
(0, short_channel_id, required),
(2, htlc_id, required),
},
(2, OutboundRoute) => {
(0, session_priv, required),
},
);
#[allow(clippy::derive_hash_xor_eq)] #[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) enum HTLCSource {
PreviousHopData(HTLCPreviousHopData),
OutboundRoute {
path: Path,
session_priv: SecretKey,
first_hop_htlc_msat: u64,
payment_id: PaymentId,
},
}
#[allow(clippy::derive_hash_xor_eq)] impl core::hash::Hash for HTLCSource {
fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
match self {
HTLCSource::PreviousHopData(prev_hop_data) => {
0u8.hash(hasher);
prev_hop_data.hash(hasher);
},
HTLCSource::OutboundRoute { path, session_priv, payment_id, first_hop_htlc_msat } => {
1u8.hash(hasher);
path.hash(hasher);
session_priv[..].hash(hasher);
payment_id.hash(hasher);
first_hop_htlc_msat.hash(hasher);
},
}
}
}
impl HTLCSource {
#[cfg(all(ldk_test_vectors, test))]
pub fn dummy() -> Self {
assert!(cfg!(not(feature = "grind_signatures")));
HTLCSource::OutboundRoute {
path: Path { hops: Vec::new(), blinded_tail: None },
session_priv: SecretKey::from_slice(&[1; 32]).unwrap(),
first_hop_htlc_msat: 0,
payment_id: PaymentId([2; 32]),
}
}
#[cfg(debug_assertions)]
pub(crate) fn possibly_matches_output(&self, htlc: &super::chan_utils::HTLCOutputInCommitment) -> bool {
if let HTLCSource::OutboundRoute { first_hop_htlc_msat, .. } = self {
*first_hop_htlc_msat == htlc.amount_msat
} else {
true
}
}
}
#[derive(Clone, Copy)]
pub enum FailureCode {
TemporaryNodeFailure,
RequiredNodeFeatureMissing,
IncorrectOrUnknownPaymentDetails,
InvalidOnionPayload(Option<(u64, u16)>),
}
impl Into<u16> for FailureCode {
fn into(self) -> u16 {
match self {
FailureCode::TemporaryNodeFailure => 0x2000 | 2,
FailureCode::RequiredNodeFeatureMissing => 0x4000 | 0x2000 | 3,
FailureCode::IncorrectOrUnknownPaymentDetails => 0x4000 | 15,
FailureCode::InvalidOnionPayload(_) => 0x4000 | 22,
}
}
}
struct MsgHandleErrInternal {
err: msgs::LightningError,
closes_channel: bool,
shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
}
impl MsgHandleErrInternal {
#[inline]
fn send_err_msg_no_close(err: String, channel_id: ChannelId) -> Self {
Self {
err: LightningError {
err: err.clone(),
action: msgs::ErrorAction::SendErrorMessage {
msg: msgs::ErrorMessage {
channel_id,
data: err
},
},
},
closes_channel: false,
shutdown_finish: None,
}
}
#[inline]
fn from_no_close(err: msgs::LightningError) -> Self {
Self { err, closes_channel: false, shutdown_finish: None }
}
#[inline]
fn from_finish_shutdown(err: String, channel_id: ChannelId, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() };
let action = if shutdown_res.monitor_update.is_some() {
msgs::ErrorAction::DisconnectPeer { msg: Some(err_msg) }
} else {
msgs::ErrorAction::SendErrorMessage { msg: err_msg }
};
Self {
err: LightningError { err, action },
closes_channel: true,
shutdown_finish: Some((shutdown_res, channel_update)),
}
}
#[inline]
fn from_chan_no_close(err: ChannelError, channel_id: ChannelId) -> Self {
Self {
err: match err {
ChannelError::Warn(msg) => LightningError {
err: msg.clone(),
action: msgs::ErrorAction::SendWarningMessage {
msg: msgs::WarningMessage {
channel_id,
data: msg
},
log_level: Level::Warn,
},
},
ChannelError::Ignore(msg) => LightningError {
err: msg,
action: msgs::ErrorAction::IgnoreError,
},
ChannelError::Close((msg, _reason)) => LightningError {
err: msg.clone(),
action: msgs::ErrorAction::SendErrorMessage {
msg: msgs::ErrorMessage {
channel_id,
data: msg
},
},
},
},
closes_channel: false,
shutdown_finish: None,
}
}
fn closes_channel(&self) -> bool {
self.closes_channel
}
}
pub(super) const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100;
#[derive(Clone, PartialEq, Debug)]
pub(super) enum RAACommitmentOrder {
CommitmentFirst,
RevokeAndACKFirst,
}
#[derive(Clone, Debug, PartialEq, Eq)]
struct ClaimingPayment {
amount_msat: u64,
payment_purpose: events::PaymentPurpose,
receiver_node_id: PublicKey,
htlcs: Vec<events::ClaimedHTLC>,
sender_intended_value: Option<u64>,
onion_fields: Option<RecipientOnionFields>,
payment_id: Option<PaymentId>,
}
impl_writeable_tlv_based!(ClaimingPayment, {
(0, amount_msat, required),
(2, payment_purpose, required),
(4, receiver_node_id, required),
(5, htlcs, optional_vec),
(7, sender_intended_value, option),
(9, onion_fields, option),
(11, payment_id, option),
});
struct ClaimablePayment {
purpose: events::PaymentPurpose,
onion_fields: Option<RecipientOnionFields>,
htlcs: Vec<ClaimableHTLC>,
}
impl ClaimablePayment {
fn inbound_payment_id(&self, secret: &[u8; 32]) -> PaymentId {
PaymentId::for_inbound_from_htlcs(
secret,
self.htlcs.iter().map(|htlc| (htlc.prev_hop.channel_id, htlc.prev_hop.htlc_id))
)
}
}
enum FundingType {
Checked(Transaction),
Unchecked(OutPoint),
}
impl FundingType {
fn txid(&self) -> Txid {
match self {
FundingType::Checked(tx) => tx.compute_txid(),
FundingType::Unchecked(outp) => outp.txid,
}
}
fn transaction_or_dummy(&self) -> Transaction {
match self {
FundingType::Checked(tx) => tx.clone(),
FundingType::Unchecked(_) => Transaction {
version: bitcoin::transaction::Version::TWO,
lock_time: bitcoin::absolute::LockTime::ZERO,
input: Vec::new(),
output: Vec::new(),
},
}
}
fn is_manual_broadcast(&self) -> bool {
match self {
FundingType::Checked(_) => false,
FundingType::Unchecked(_) => true,
}
}
}
struct ClaimablePayments {
claimable_payments: HashMap<PaymentHash, ClaimablePayment>,
pending_claiming_payments: HashMap<PaymentHash, ClaimingPayment>,
}
impl ClaimablePayments {
fn begin_claiming_payment<L: Deref, S: Deref>(
&mut self, payment_hash: PaymentHash, node_signer: &S, logger: &L,
inbound_payment_id_secret: &[u8; 32], custom_tlvs_known: bool,
) -> Result<(Vec<ClaimableHTLC>, ClaimingPayment), Vec<ClaimableHTLC>>
where L::Target: Logger, S::Target: NodeSigner,
{
match self.claimable_payments.remove(&payment_hash) {
Some(payment) => {
let mut receiver_node_id = node_signer.get_node_id(Recipient::Node)
.expect("Failed to get node_id for node recipient");
for htlc in payment.htlcs.iter() {
if htlc.prev_hop.phantom_shared_secret.is_some() {
let phantom_pubkey = node_signer.get_node_id(Recipient::PhantomNode)
.expect("Failed to get node_id for phantom node recipient");
receiver_node_id = phantom_pubkey;
break;
}
}
if let Some(RecipientOnionFields { custom_tlvs, .. }) = &payment.onion_fields {
if !custom_tlvs_known && custom_tlvs.iter().any(|(typ, _)| typ % 2 == 0) {
log_info!(logger, "Rejecting payment with payment hash {} as we cannot accept payment with unknown even TLVs: {}",
&payment_hash, log_iter!(custom_tlvs.iter().map(|(typ, _)| typ).filter(|typ| *typ % 2 == 0)));
return Err(payment.htlcs);
}
}
let payment_id = payment.inbound_payment_id(inbound_payment_id_secret);
let claiming_payment = self.pending_claiming_payments
.entry(payment_hash)
.and_modify(|_| {
debug_assert!(false, "Shouldn't get a duplicate pending claim event ever");
log_error!(logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug",
&payment_hash);
})
.or_insert_with(|| {
let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect();
let sender_intended_value = payment.htlcs.first().map(|htlc| htlc.total_msat);
ClaimingPayment {
amount_msat: payment.htlcs.iter().map(|source| source.value).sum(),
payment_purpose: payment.purpose,
receiver_node_id,
htlcs,
sender_intended_value,
onion_fields: payment.onion_fields,
payment_id: Some(payment_id),
}
}).clone();
Ok((payment.htlcs, claiming_payment))
},
None => Err(Vec::new())
}
}
}
#[derive(Debug)]
enum BackgroundEvent {
ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelId, ChannelMonitorUpdate)),
MonitorUpdateRegeneratedOnStartup {
counterparty_node_id: PublicKey,
funding_txo: OutPoint,
channel_id: ChannelId,
update: ChannelMonitorUpdate
},
MonitorUpdatesComplete {
counterparty_node_id: PublicKey,
channel_id: ChannelId,
},
}
#[derive(Debug)]
pub(crate) struct EventUnblockedChannel {
counterparty_node_id: PublicKey,
funding_txo: OutPoint,
channel_id: ChannelId,
blocking_action: RAAMonitorUpdateBlockingAction,
}
impl Writeable for EventUnblockedChannel {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
self.counterparty_node_id.write(writer)?;
self.funding_txo.write(writer)?;
self.channel_id.write(writer)?;
self.blocking_action.write(writer)
}
}
impl MaybeReadable for EventUnblockedChannel {
fn read<R: Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
let counterparty_node_id = Readable::read(reader)?;
let funding_txo = Readable::read(reader)?;
let channel_id = Readable::read(reader)?;
let blocking_action = match RAAMonitorUpdateBlockingAction::read(reader)? {
Some(blocking_action) => blocking_action,
None => return Ok(None),
};
Ok(Some(EventUnblockedChannel {
counterparty_node_id,
funding_txo,
channel_id,
blocking_action,
}))
}
}
#[derive(Debug)]
pub(crate) enum MonitorUpdateCompletionAction {
PaymentClaimed {
payment_hash: PaymentHash,
pending_mpp_claim: Option<(PublicKey, ChannelId, u64, PendingMPPClaimPointer)>,
},
EmitEventAndFreeOtherChannel {
event: events::Event,
downstream_counterparty_and_funding_outpoint: Option<EventUnblockedChannel>,
},
FreeOtherChannelImmediately {
downstream_counterparty_node_id: PublicKey,
downstream_funding_outpoint: OutPoint,
blocking_action: RAAMonitorUpdateBlockingAction,
downstream_channel_id: ChannelId,
},
}
impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction,
(0, PaymentClaimed) => {
(0, payment_hash, required),
(9999999999, pending_mpp_claim, (static_value, None)),
},
(1, FreeOtherChannelImmediately) => {
(0, downstream_counterparty_node_id, required),
(2, downstream_funding_outpoint, required),
(4, blocking_action, upgradable_required),
(5, downstream_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(downstream_funding_outpoint.0.unwrap()))),
},
(2, EmitEventAndFreeOtherChannel) => {
(0, event, upgradable_required),
(1, downstream_counterparty_and_funding_outpoint, upgradable_option),
},
);
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) enum EventCompletionAction {
ReleaseRAAChannelMonitorUpdate {
counterparty_node_id: PublicKey,
channel_funding_outpoint: OutPoint,
channel_id: ChannelId,
},
}
impl_writeable_tlv_based_enum!(EventCompletionAction,
(0, ReleaseRAAChannelMonitorUpdate) => {
(0, channel_funding_outpoint, required),
(2, counterparty_node_id, required),
(3, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(channel_funding_outpoint.0.unwrap()))),
}
);
struct HTLCClaimSource {
counterparty_node_id: Option<PublicKey>,
funding_txo: OutPoint,
channel_id: ChannelId,
htlc_id: u64,
}
impl From<&MPPClaimHTLCSource> for HTLCClaimSource {
fn from(o: &MPPClaimHTLCSource) -> HTLCClaimSource {
HTLCClaimSource {
counterparty_node_id: Some(o.counterparty_node_id),
funding_txo: o.funding_txo,
channel_id: o.channel_id,
htlc_id: o.htlc_id,
}
}
}
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
struct MPPClaimHTLCSource {
counterparty_node_id: PublicKey,
funding_txo: OutPoint,
channel_id: ChannelId,
htlc_id: u64,
}
impl_writeable_tlv_based!(MPPClaimHTLCSource, {
(0, counterparty_node_id, required),
(2, funding_txo, required),
(4, channel_id, required),
(6, htlc_id, required),
});
#[derive(Debug)]
pub(crate) struct PendingMPPClaim {
channels_without_preimage: Vec<MPPClaimHTLCSource>,
channels_with_preimage: Vec<MPPClaimHTLCSource>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) struct PaymentClaimDetails {
mpp_parts: Vec<MPPClaimHTLCSource>,
claiming_payment: ClaimingPayment,
}
impl_writeable_tlv_based!(PaymentClaimDetails, {
(0, mpp_parts, required_vec),
(2, claiming_payment, required),
});
#[derive(Clone)]
pub(crate) struct PendingMPPClaimPointer(Arc<Mutex<PendingMPPClaim>>);
impl PartialEq for PendingMPPClaimPointer {
fn eq(&self, o: &Self) -> bool { Arc::ptr_eq(&self.0, &o.0) }
}
impl Eq for PendingMPPClaimPointer {}
impl core::fmt::Debug for PendingMPPClaimPointer {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
self.0.lock().unwrap().fmt(f)
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub(crate) enum RAAMonitorUpdateBlockingAction {
ForwardedPaymentInboundClaim {
channel_id: ChannelId,
htlc_id: u64,
},
ClaimedMPPPayment {
pending_claim: PendingMPPClaimPointer,
}
}
impl RAAMonitorUpdateBlockingAction {
fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
Self::ForwardedPaymentInboundClaim {
channel_id: prev_hop.channel_id,
htlc_id: prev_hop.htlc_id,
}
}
}
impl_writeable_tlv_based_enum_upgradable!(RAAMonitorUpdateBlockingAction,
(0, ForwardedPaymentInboundClaim) => { (0, channel_id, required), (2, htlc_id, required) },
unread_variants: ClaimedMPPPayment
);
impl Readable for Option<RAAMonitorUpdateBlockingAction> {
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
Ok(RAAMonitorUpdateBlockingAction::read(reader)?)
}
}
pub(super) struct PeerState<SP: Deref> where SP::Target: SignerProvider {
pub(super) channel_by_id: HashMap<ChannelId, ChannelPhase<SP>>,
pub(super) inbound_channel_request_by_id: HashMap<ChannelId, InboundChannelRequest>,
latest_features: InitFeatures,
pub(super) pending_msg_events: Vec<MessageSendEvent>,
in_flight_monitor_updates: BTreeMap<OutPoint, Vec<ChannelMonitorUpdate>>,
monitor_update_blocked_actions: BTreeMap<ChannelId, Vec<MonitorUpdateCompletionAction>>,
actions_blocking_raa_monitor_updates: BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
closed_channel_monitor_update_ids: BTreeMap<ChannelId, u64>,
pub is_connected: bool,
}
impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
fn ok_to_remove(&self, require_disconnected: bool) -> bool {
if require_disconnected && self.is_connected {
return false
}
for (_, updates) in self.in_flight_monitor_updates.iter() {
if !updates.is_empty() {
return false;
}
}
!self.channel_by_id.iter().any(|(_, phase)|
match phase {
ChannelPhase::Funded(_) | ChannelPhase::UnfundedOutboundV1(_) => true,
ChannelPhase::UnfundedInboundV1(_) => false,
ChannelPhase::UnfundedOutboundV2(_) => true,
ChannelPhase::UnfundedInboundV2(_) => false,
}
)
&& self.monitor_update_blocked_actions.is_empty()
&& self.closed_channel_monitor_update_ids.is_empty()
}
fn total_channel_count(&self) -> usize {
self.channel_by_id.len() + self.inbound_channel_request_by_id.len()
}
fn has_channel(&self, channel_id: &ChannelId) -> bool {
self.channel_by_id.contains_key(channel_id) ||
self.inbound_channel_request_by_id.contains_key(channel_id)
}
}
#[derive(Clone)]
pub(super) enum OpenChannelMessage {
V1(msgs::OpenChannel),
#[cfg(dual_funding)]
V2(msgs::OpenChannelV2),
}
pub(super) enum OpenChannelMessageRef<'a> {
V1(&'a msgs::OpenChannel),
#[cfg(dual_funding)]
V2(&'a msgs::OpenChannelV2),
}
pub(super) struct InboundChannelRequest {
pub open_channel_msg: OpenChannelMessage,
pub ticks_remaining: i32,
}
const UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS: i32 = 2;
pub(super) const FEERATE_TRACKING_BLOCKS: usize = 144;
#[derive(Debug)]
struct PendingInboundPayment {
payment_secret: PaymentSecret,
expiry_time: u64,
user_payment_id: u64,
payment_preimage: Option<PaymentPreimage>,
min_value_msat: Option<u64>,
}
#[cfg(not(c_bindings))]
pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<
Arc<M>,
Arc<T>,
Arc<KeysManager>,
Arc<KeysManager>,
Arc<KeysManager>,
Arc<F>,
Arc<DefaultRouter<
Arc<NetworkGraph<Arc<L>>>,
Arc<L>,
Arc<KeysManager>,
Arc<RwLock<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
ProbabilisticScoringFeeParameters,
ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>,
>>,
Arc<DefaultMessageRouter<
Arc<NetworkGraph<Arc<L>>>,
Arc<L>,
Arc<KeysManager>,
>>,
Arc<L>
>;
#[cfg(not(c_bindings))]
pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, 'i, M, T, F, L> =
ChannelManager<
&'a M,
&'b T,
&'c KeysManager,
&'c KeysManager,
&'c KeysManager,
&'d F,
&'e DefaultRouter<
&'f NetworkGraph<&'g L>,
&'g L,
&'c KeysManager,
&'h RwLock<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>,
ProbabilisticScoringFeeParameters,
ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>
>,
&'i DefaultMessageRouter<
&'f NetworkGraph<&'g L>,
&'g L,
&'c KeysManager,
>,
&'g L
>;
pub trait AChannelManager {
type Watch: chain::Watch<Self::Signer> + ?Sized;
type M: Deref<Target = Self::Watch>;
type Broadcaster: BroadcasterInterface + ?Sized;
type T: Deref<Target = Self::Broadcaster>;
type EntropySource: EntropySource + ?Sized;
type ES: Deref<Target = Self::EntropySource>;
type NodeSigner: NodeSigner + ?Sized;
type NS: Deref<Target = Self::NodeSigner>;
type Signer: EcdsaChannelSigner + Sized;
type SignerProvider: SignerProvider<EcdsaSigner= Self::Signer> + ?Sized;
type SP: Deref<Target = Self::SignerProvider>;
type FeeEstimator: FeeEstimator + ?Sized;
type F: Deref<Target = Self::FeeEstimator>;
type Router: Router + ?Sized;
type R: Deref<Target = Self::Router>;
type MessageRouter: MessageRouter + ?Sized;
type MR: Deref<Target = Self::MessageRouter>;
type Logger: Logger + ?Sized;
type L: Deref<Target = Self::Logger>;
fn get_cm(&self) -> &ChannelManager<Self::M, Self::T, Self::ES, Self::NS, Self::SP, Self::F, Self::R, Self::MR, Self::L>;
}
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> AChannelManager
for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
type Watch = M::Target;
type M = M;
type Broadcaster = T::Target;
type T = T;
type EntropySource = ES::Target;
type ES = ES;
type NodeSigner = NS::Target;
type NS = NS;
type Signer = <SP::Target as SignerProvider>::EcdsaSigner;
type SignerProvider = SP::Target;
type SP = SP;
type FeeEstimator = F::Target;
type F = F;
type Router = R::Target;
type R = R;
type MessageRouter = MR::Target;
type MR = MR;
type Logger = L::Target;
type L = L;
fn get_cm(&self) -> &ChannelManager<M, T, ES, NS, SP, F, R, MR, L> { self }
}
pub struct ChannelManager<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
default_configuration: UserConfig,
chain_hash: ChainHash,
fee_estimator: LowerBoundedFeeEstimator<F>,
chain_monitor: M,
tx_broadcaster: T,
#[cfg(fuzzing)]
pub router: R,
#[cfg(not(fuzzing))]
router: R,
message_router: MR,
#[cfg(test)]
pub(super) best_block: RwLock<BestBlock>,
#[cfg(not(test))]
best_block: RwLock<BestBlock>,
secp_ctx: Secp256k1<secp256k1::All>,
pending_outbound_payments: OutboundPayments,
#[cfg(test)]
pub(super) forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
#[cfg(not(test))]
forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
pending_intercepted_htlcs: Mutex<HashMap<InterceptId, PendingAddHTLCInfo>>,
decode_update_add_htlcs: Mutex<HashMap<u64, Vec<msgs::UpdateAddHTLC>>>,
claimable_payments: Mutex<ClaimablePayments>,
outbound_scid_aliases: Mutex<HashSet<u64>>,
#[cfg(not(test))]
outpoint_to_peer: Mutex<HashMap<OutPoint, PublicKey>>,
#[cfg(test)]
pub(crate) outpoint_to_peer: Mutex<HashMap<OutPoint, PublicKey>>,
#[cfg(test)]
pub(super) short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, ChannelId)>>,
#[cfg(not(test))]
short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, ChannelId)>>,
our_network_pubkey: PublicKey,
inbound_payment_key: inbound_payment::ExpandedKey,
fake_scid_rand_bytes: [u8; 32],
probing_cookie_secret: [u8; 32],
inbound_payment_id_secret: [u8; 32],
highest_seen_timestamp: AtomicUsize,
#[cfg(not(any(test, feature = "_test_utils")))]
per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<SP>>>>,
#[cfg(any(test, feature = "_test_utils"))]
pub(super) per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<SP>>>>,
#[cfg(not(any(test, feature = "_test_utils")))]
pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
#[cfg(any(test, feature = "_test_utils"))]
pub(crate) pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
pending_events_processor: AtomicBool,
pending_background_events: Mutex<Vec<BackgroundEvent>>,
total_consistency_lock: RwLock<()>,
funding_batch_states: Mutex<BTreeMap<Txid, Vec<(ChannelId, PublicKey, bool)>>>,
background_events_processed_since_startup: AtomicBool,
event_persist_notifier: Notifier,
needs_persist_flag: AtomicBool,
#[cfg(not(any(test, feature = "_test_utils")))]
pending_offers_messages: Mutex<Vec<(OffersMessage, MessageSendInstructions)>>,
#[cfg(any(test, feature = "_test_utils"))]
pub(crate) pending_offers_messages: Mutex<Vec<(OffersMessage, MessageSendInstructions)>>,
pending_async_payments_messages: Mutex<Vec<(AsyncPaymentsMessage, MessageSendInstructions)>>,
pending_broadcast_messages: Mutex<Vec<MessageSendEvent>>,
last_days_feerates: Mutex<VecDeque<(u32, u32)>>,
#[cfg(feature = "dnssec")]
hrn_resolver: OMNameResolver,
#[cfg(feature = "dnssec")]
pending_dns_onion_messages: Mutex<Vec<(DNSResolverMessage, MessageSendInstructions)>>,
#[cfg(feature = "_test_utils")]
pub testing_dnssec_proof_offer_resolution_override: Mutex<HashMap<HumanReadableName, Offer>>,
#[cfg(test)]
pub(super) entropy_source: ES,
#[cfg(not(test))]
entropy_source: ES,
node_signer: NS,
#[cfg(test)]
pub(super) signer_provider: SP,
#[cfg(not(test))]
signer_provider: SP,
logger: L,
}
#[derive(Clone, Copy, PartialEq)]
pub struct ChainParameters {
pub network: Network,
pub best_block: BestBlock,
}
#[derive(Copy, Clone, PartialEq)]
#[must_use]
enum NotifyOption {
DoPersist,
SkipPersistHandleEvents,
SkipPersistNoEvents,
}
struct PersistenceNotifierGuard<'a, F: FnMut() -> NotifyOption> {
event_persist_notifier: &'a Notifier,
needs_persist_flag: &'a AtomicBool,
should_persist: F,
_read_guard: RwLockReadGuard<'a, ()>,
}
impl<'a> PersistenceNotifierGuard<'a, fn() -> NotifyOption> { fn notify_on_drop<C: AChannelManager>(cm: &'a C) -> PersistenceNotifierGuard<'a, impl FnMut() -> NotifyOption> {
Self::optionally_notify(cm, || -> NotifyOption { NotifyOption::DoPersist })
}
fn optionally_notify<F: FnMut() -> NotifyOption, C: AChannelManager>(cm: &'a C, mut persist_check: F)
-> PersistenceNotifierGuard<'a, impl FnMut() -> NotifyOption> {
let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
let force_notify = cm.get_cm().process_background_events();
PersistenceNotifierGuard {
event_persist_notifier: &cm.get_cm().event_persist_notifier,
needs_persist_flag: &cm.get_cm().needs_persist_flag,
should_persist: move || {
let notify = persist_check();
match (notify, force_notify) {
(NotifyOption::DoPersist, _) => NotifyOption::DoPersist,
(_, NotifyOption::DoPersist) => NotifyOption::DoPersist,
(NotifyOption::SkipPersistHandleEvents, _) => NotifyOption::SkipPersistHandleEvents,
(_, NotifyOption::SkipPersistHandleEvents) => NotifyOption::SkipPersistHandleEvents,
_ => NotifyOption::SkipPersistNoEvents,
}
},
_read_guard: read_guard,
}
}
fn optionally_notify_skipping_background_events<F: Fn() -> NotifyOption, C: AChannelManager>
(cm: &'a C, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
PersistenceNotifierGuard {
event_persist_notifier: &cm.get_cm().event_persist_notifier,
needs_persist_flag: &cm.get_cm().needs_persist_flag,
should_persist: persist_check,
_read_guard: read_guard,
}
}
}
impl<'a, F: FnMut() -> NotifyOption> Drop for PersistenceNotifierGuard<'a, F> {
fn drop(&mut self) {
match (self.should_persist)() {
NotifyOption::DoPersist => {
self.needs_persist_flag.store(true, Ordering::Release);
self.event_persist_notifier.notify()
},
NotifyOption::SkipPersistHandleEvents =>
self.event_persist_notifier.notify(),
NotifyOption::SkipPersistNoEvents => {},
}
}
}
pub const BREAKDOWN_TIMEOUT: u16 = 6 * 24;
pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7;
pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6*7;
pub(super) const CLTV_FAR_FAR_AWAY: u32 = 14 * 24 * 6;
pub const MIN_FINAL_CLTV_EXPIRY_DELTA: u16 = HTLC_FAIL_BACK_BUFFER as u16 + 3;
#[allow(dead_code)]
const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
#[allow(dead_code)]
const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;
pub(crate) const DISABLE_GOSSIP_TICKS: u8 = 10;
pub(crate) const ENABLE_GOSSIP_TICKS: u8 = 5;
const MAX_UNFUNDED_CHANS_PER_PEER: usize = 4;
const MAX_UNFUNDED_CHANNEL_PEERS: usize = 50;
const MAX_NO_CHANNEL_PEERS: usize = 250;
pub const MAX_SHORT_LIVED_RELATIVE_EXPIRY: Duration = Duration::from_secs(60 * 60 * 24);
#[derive(Debug, PartialEq)]
pub enum RecentPaymentDetails {
AwaitingInvoice {
payment_id: PaymentId,
},
Pending {
payment_id: PaymentId,
payment_hash: PaymentHash,
total_msat: u64,
},
Fulfilled {
payment_id: PaymentId,
payment_hash: Option<PaymentHash>,
},
Abandoned {
payment_id: PaymentId,
payment_hash: PaymentHash,
},
}
#[derive(Clone)]
pub struct PhantomRouteHints {
pub channels: Vec<ChannelDetails>,
pub phantom_scid: u64,
pub real_node_pubkey: PublicKey,
}
macro_rules! handle_error {
($self: ident, $internal: expr, $counterparty_node_id: expr) => { {
debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
match $internal {
Ok(msg) => Ok(msg),
Err(MsgHandleErrInternal { err, shutdown_finish, .. }) => {
let mut msg_event = None;
if let Some((shutdown_res, update_option)) = shutdown_finish {
let counterparty_node_id = shutdown_res.counterparty_node_id;
let channel_id = shutdown_res.channel_id;
let logger = WithContext::from(
&$self.logger, Some(counterparty_node_id), Some(channel_id), None
);
log_error!(logger, "Force-closing channel: {}", err.err);
$self.finish_close_channel(shutdown_res);
if let Some(update) = update_option {
let mut pending_broadcast_messages = $self.pending_broadcast_messages.lock().unwrap();
pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
} else {
log_error!($self.logger, "Got non-closing error: {}", err.err);
}
if let msgs::ErrorAction::IgnoreError = err.action {
} else {
msg_event = Some(events::MessageSendEvent::HandleError {
node_id: $counterparty_node_id,
action: err.action.clone()
});
}
if let Some(msg_event) = msg_event {
let per_peer_state = $self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) {
let mut peer_state = peer_state_mutex.lock().unwrap();
peer_state.pending_msg_events.push(msg_event);
}
}
Err(err)
},
}
} };
}
macro_rules! locked_close_channel {
($self: ident, $peer_state: expr, $channel_context: expr, $shutdown_res_mut: expr) => {{
if let Some((_, funding_txo, _, update)) = $shutdown_res_mut.monitor_update.take() {
handle_new_monitor_update!($self, funding_txo, update, $peer_state,
$channel_context, REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER);
}
let update_id = $channel_context.get_latest_monitor_update_id();
if $channel_context.get_funding_tx_confirmation_height().is_some() || $channel_context.minimum_depth() == Some(0) || update_id > 1 {
let chan_id = $channel_context.channel_id();
$peer_state.closed_channel_monitor_update_ids.insert(chan_id, update_id);
}
if let Some(outpoint) = $channel_context.get_funding_txo() {
$self.outpoint_to_peer.lock().unwrap().remove(&outpoint);
}
let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
if let Some(short_id) = $channel_context.get_short_channel_id() {
short_to_chan_info.remove(&short_id);
} else {
let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel_context.outbound_scid_alias());
debug_assert!(alias_removed);
}
short_to_chan_info.remove(&$channel_context.outbound_scid_alias());
}}
}
macro_rules! convert_chan_phase_err {
($self: ident, $peer_state: expr, $err: expr, $channel: expr, $channel_id: expr, MANUAL_CHANNEL_UPDATE, $channel_update: expr) => {
match $err {
ChannelError::Warn(msg) => {
(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), *$channel_id))
},
ChannelError::Ignore(msg) => {
(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), *$channel_id))
},
ChannelError::Close((msg, reason)) => {
let logger = WithChannelContext::from(&$self.logger, &$channel.context, None);
log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
let mut shutdown_res = $channel.context.force_shutdown(true, reason);
locked_close_channel!($self, $peer_state, &$channel.context, &mut shutdown_res);
let err =
MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $channel_update);
(true, err)
},
}
};
($self: ident, $peer_state: expr, $err: expr, $channel: expr, $channel_id: expr, FUNDED_CHANNEL) => {
convert_chan_phase_err!($self, $peer_state, $err, $channel, $channel_id, MANUAL_CHANNEL_UPDATE, { $self.get_channel_update_for_broadcast($channel).ok() })
};
($self: ident, $peer_state: expr, $err: expr, $channel: expr, $channel_id: expr, UNFUNDED_CHANNEL) => {
convert_chan_phase_err!($self, $peer_state, $err, $channel, $channel_id, MANUAL_CHANNEL_UPDATE, None)
};
($self: ident, $peer_state: expr, $err: expr, $channel_phase: expr, $channel_id: expr) => {
match $channel_phase {
ChannelPhase::Funded(channel) => {
convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, FUNDED_CHANNEL)
},
ChannelPhase::UnfundedOutboundV1(channel) => {
convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, UNFUNDED_CHANNEL)
},
ChannelPhase::UnfundedInboundV1(channel) => {
convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, UNFUNDED_CHANNEL)
},
ChannelPhase::UnfundedOutboundV2(channel) => {
convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, UNFUNDED_CHANNEL)
},
ChannelPhase::UnfundedInboundV2(channel) => {
convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, UNFUNDED_CHANNEL)
},
}
};
}
macro_rules! break_chan_phase_entry {
($self: ident, $peer_state: expr, $res: expr, $entry: expr) => {
match $res {
Ok(res) => res,
Err(e) => {
let key = *$entry.key();
let (drop, res) = convert_chan_phase_err!($self, $peer_state, e, $entry.get_mut(), &key);
if drop {
$entry.remove_entry();
}
break Err(res);
}
}
}
}
macro_rules! try_chan_phase_entry {
($self: ident, $peer_state: expr, $res: expr, $entry: expr) => {
match $res {
Ok(res) => res,
Err(e) => {
let key = *$entry.key();
let (drop, res) = convert_chan_phase_err!($self, $peer_state, e, $entry.get_mut(), &key);
if drop {
$entry.remove_entry();
}
return Err(res);
}
}
}
}
macro_rules! remove_channel_phase {
($self: ident, $peer_state: expr, $entry: expr, $shutdown_res_mut: expr) => {
{
let channel = $entry.remove_entry().1;
locked_close_channel!($self, $peer_state, &channel.context(), $shutdown_res_mut);
channel
}
}
}
macro_rules! send_channel_ready {
($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{
$pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
node_id: $channel.context.get_counterparty_node_id(),
msg: $channel_ready_msg,
});
let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
let outbound_alias_insert = short_to_chan_info.insert($channel.context.outbound_scid_alias(), ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()));
assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()),
"SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
if let Some(real_scid) = $channel.context.get_short_channel_id() {
let scid_insert = short_to_chan_info.insert(real_scid, ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()));
assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()),
"SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
}
}}
}
macro_rules! emit_funding_tx_broadcast_safe_event {
($locked_events: expr, $channel: expr, $funding_txo: expr) => {
if !$channel.context.funding_tx_broadcast_safe_event_emitted() {
$locked_events.push_back((events::Event::FundingTxBroadcastSafe {
channel_id: $channel.context.channel_id(),
user_channel_id: $channel.context.get_user_id(),
funding_txo: $funding_txo,
counterparty_node_id: $channel.context.get_counterparty_node_id(),
former_temporary_channel_id: $channel.context.temporary_channel_id()
.expect("Unreachable: FundingTxBroadcastSafe event feature added to channel establishment process in LDK v0.0.124 where this should never be None."),
}, None));
$channel.context.set_funding_tx_broadcast_safe_event_emitted();
}
}
}
macro_rules! emit_channel_pending_event {
($locked_events: expr, $channel: expr) => {
if $channel.context.should_emit_channel_pending_event() {
$locked_events.push_back((events::Event::ChannelPending {
channel_id: $channel.context.channel_id(),
former_temporary_channel_id: $channel.context.temporary_channel_id(),
counterparty_node_id: $channel.context.get_counterparty_node_id(),
user_channel_id: $channel.context.get_user_id(),
funding_txo: $channel.context.get_funding_txo().unwrap().into_bitcoin_outpoint(),
channel_type: Some($channel.context.get_channel_type().clone()),
}, None));
$channel.context.set_channel_pending_event_emitted();
}
}
}
macro_rules! emit_channel_ready_event {
($locked_events: expr, $channel: expr) => {
if $channel.context.should_emit_channel_ready_event() {
debug_assert!($channel.context.channel_pending_event_emitted());
$locked_events.push_back((events::Event::ChannelReady {
channel_id: $channel.context.channel_id(),
user_channel_id: $channel.context.get_user_id(),
counterparty_node_id: $channel.context.get_counterparty_node_id(),
channel_type: $channel.context.get_channel_type().clone(),
}, None));
$channel.context.set_channel_ready_event_emitted();
}
}
}
macro_rules! handle_monitor_update_completion {
($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
let mut updates = $chan.monitor_updating_restored(&&logger,
&$self.node_signer, $self.chain_hash, &$self.default_configuration,
$self.best_block.read().unwrap().height);
let counterparty_node_id = $chan.context.get_counterparty_node_id();
let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() {
if let Ok(msg) = $self.get_channel_update_for_unicast($chan) {
Some(events::MessageSendEvent::SendChannelUpdate {
node_id: counterparty_node_id,
msg,
})
} else { None }
} else { None };
let update_actions = $peer_state.monitor_update_blocked_actions
.remove(&$chan.context.channel_id()).unwrap_or(Vec::new());
let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption(
&mut $peer_state.pending_msg_events, $chan, updates.raa,
updates.commitment_update, updates.order, updates.accepted_htlcs, updates.pending_update_adds,
updates.funding_broadcastable, updates.channel_ready,
updates.announcement_sigs, updates.tx_signatures);
if let Some(upd) = channel_update {
$peer_state.pending_msg_events.push(upd);
}
let channel_id = $chan.context.channel_id();
let unbroadcasted_batch_funding_txid = $chan.context.unbroadcasted_batch_funding_txid();
core::mem::drop($peer_state_lock);
core::mem::drop($per_peer_state_lock);
if let Some(txid) = unbroadcasted_batch_funding_txid {
let mut funding_batch_states = $self.funding_batch_states.lock().unwrap();
let mut batch_completed = false;
if let Some(batch_state) = funding_batch_states.get_mut(&txid) {
let channel_state = batch_state.iter_mut().find(|(chan_id, pubkey, _)| (
*chan_id == channel_id &&
*pubkey == counterparty_node_id
));
if let Some(channel_state) = channel_state {
channel_state.2 = true;
} else {
debug_assert!(false, "Missing channel batch state for channel which completed initial monitor update");
}
batch_completed = batch_state.iter().all(|(_, _, completed)| *completed);
} else {
debug_assert!(false, "Missing batch state for channel which completed initial monitor update");
}
if batch_completed {
let removed_batch_state = funding_batch_states.remove(&txid).into_iter().flatten();
let per_peer_state = $self.per_peer_state.read().unwrap();
let mut batch_funding_tx = None;
for (channel_id, counterparty_node_id, _) in removed_batch_state {
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state = peer_state_mutex.lock().unwrap();
if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
batch_funding_tx = batch_funding_tx.or_else(|| chan.context.unbroadcasted_funding());
chan.set_batch_ready();
let mut pending_events = $self.pending_events.lock().unwrap();
emit_channel_pending_event!(pending_events, chan);
}
}
}
if let Some(tx) = batch_funding_tx {
log_info!($self.logger, "Broadcasting batch funding transaction with txid {}", tx.compute_txid());
$self.tx_broadcaster.broadcast_transactions(&[&tx]);
}
}
}
$self.handle_monitor_update_completion_actions(update_actions);
if let Some(forwards) = htlc_forwards {
$self.forward_htlcs(&mut [forwards][..]);
}
if let Some(decode) = decode_update_add_htlcs {
$self.push_decode_update_add_htlcs(decode);
}
$self.finalize_claims(updates.finalized_claimed_htlcs);
for failure in updates.failed_htlcs.drain(..) {
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
$self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver);
}
} }
}
macro_rules! handle_new_monitor_update {
($self: ident, $update_res: expr, $logger: expr, $channel_id: expr, _internal, $completed: expr) => { {
debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire));
match $update_res {
ChannelMonitorUpdateStatus::UnrecoverableError => {
let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
log_error!($logger, "{}", err_str);
panic!("{}", err_str);
},
ChannelMonitorUpdateStatus::InProgress => {
log_debug!($logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
$channel_id);
false
},
ChannelMonitorUpdateStatus::Completed => {
$completed;
true
},
}
} };
($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, INITIAL_MONITOR) => {
let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
handle_new_monitor_update!($self, $update_res, logger, $chan.context.channel_id(), _internal,
handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan))
};
(
$self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $logger: expr,
$chan_id: expr, $counterparty_node_id: expr, $in_flight_updates: ident, $update_idx: ident,
_internal_outer, $completed: expr
) => { {
$in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
.or_insert_with(Vec::new);
$update_idx = $in_flight_updates.iter().position(|upd| upd == &$update)
.unwrap_or_else(|| {
$in_flight_updates.push($update);
$in_flight_updates.len() - 1
});
if $self.background_events_processed_since_startup.load(Ordering::Acquire) {
let update_res = $self.chain_monitor.update_channel($funding_txo, &$in_flight_updates[$update_idx]);
handle_new_monitor_update!($self, update_res, $logger, $chan_id, _internal, $completed)
} else {
let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id: $counterparty_node_id,
funding_txo: $funding_txo,
channel_id: $chan_id,
update: $in_flight_updates[$update_idx].clone(),
};
$self.pending_background_events.lock().unwrap().push(event);
false
}
} };
(
$self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $chan_context: expr,
REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER
) => { {
let logger = WithChannelContext::from(&$self.logger, &$chan_context, None);
let chan_id = $chan_context.channel_id();
let counterparty_node_id = $chan_context.get_counterparty_node_id();
let in_flight_updates;
let idx;
handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
counterparty_node_id, in_flight_updates, idx, _internal_outer,
{
let _ = in_flight_updates.remove(idx);
})
} };
(
$self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
$per_peer_state_lock: expr, $counterparty_node_id: expr, $channel_id: expr, POST_CHANNEL_CLOSE
) => { {
let logger = WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None);
let in_flight_updates;
let idx;
handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger,
$channel_id, $counterparty_node_id, in_flight_updates, idx, _internal_outer,
{
let _ = in_flight_updates.remove(idx);
if in_flight_updates.is_empty() {
let update_actions = $peer_state.monitor_update_blocked_actions
.remove(&$channel_id).unwrap_or(Vec::new());
mem::drop($peer_state_lock);
mem::drop($per_peer_state_lock);
$self.handle_monitor_update_completion_actions(update_actions);
}
})
} };
(
$self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
$per_peer_state_lock: expr, $chan: expr
) => { {
let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
let chan_id = $chan.context.channel_id();
let counterparty_node_id = $chan.context.get_counterparty_node_id();
let in_flight_updates;
let idx;
handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
counterparty_node_id, in_flight_updates, idx, _internal_outer,
{
let _ = in_flight_updates.remove(idx);
if in_flight_updates.is_empty() && $chan.blocked_monitor_updates_pending() == 0 {
handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan);
}
})
} };
}
macro_rules! process_events_body {
($self: expr, $event_to_handle: expr, $handle_event: expr) => {
let mut handling_failed = false;
let mut processed_all_events = false;
while !handling_failed && !processed_all_events {
if $self.pending_events_processor.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed).is_err() {
return;
}
let mut result;
{
let _read_guard = $self.total_consistency_lock.read().unwrap();
result = $self.process_background_events();
if $self.process_pending_monitor_events() {
result = NotifyOption::DoPersist;
}
}
let pending_events = $self.pending_events.lock().unwrap().clone();
if !pending_events.is_empty() {
result = NotifyOption::DoPersist;
}
let mut post_event_actions = Vec::new();
let mut num_handled_events = 0;
for (event, action_opt) in pending_events {
log_trace!($self.logger, "Handling event {:?}...", event);
$event_to_handle = event;
let event_handling_result = $handle_event;
log_trace!($self.logger, "Done handling event, result: {:?}", event_handling_result);
match event_handling_result {
Ok(()) => {
if let Some(action) = action_opt {
post_event_actions.push(action);
}
num_handled_events += 1;
}
Err(_e) => {
handling_failed = true;
break;
}
}
}
{
let mut pending_events = $self.pending_events.lock().unwrap();
pending_events.drain(..num_handled_events);
processed_all_events = pending_events.is_empty();
$self.pending_events_processor.store(false, Ordering::Release);
}
if !post_event_actions.is_empty() {
$self.handle_post_event_actions(post_event_actions);
processed_all_events = false;
}
match result {
NotifyOption::DoPersist => {
$self.needs_persist_flag.store(true, Ordering::Release);
$self.event_persist_notifier.notify();
},
NotifyOption::SkipPersistHandleEvents =>
$self.event_persist_notifier.notify(),
NotifyOption::SkipPersistNoEvents => {},
}
}
}
}
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
pub fn new(
fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, message_router: MR, logger: L,
entropy_source: ES, node_signer: NS, signer_provider: SP, config: UserConfig,
params: ChainParameters, current_timestamp: u32,
) -> Self {
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
let expanded_inbound_key = node_signer.get_inbound_payment_key();
ChannelManager {
default_configuration: config.clone(),
chain_hash: ChainHash::using_genesis_block(params.network),
fee_estimator: LowerBoundedFeeEstimator::new(fee_est),
chain_monitor,
tx_broadcaster,
router,
message_router,
best_block: RwLock::new(params.best_block),
outbound_scid_aliases: Mutex::new(new_hash_set()),
pending_outbound_payments: OutboundPayments::new(new_hash_map()),
forward_htlcs: Mutex::new(new_hash_map()),
decode_update_add_htlcs: Mutex::new(new_hash_map()),
claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }),
pending_intercepted_htlcs: Mutex::new(new_hash_map()),
outpoint_to_peer: Mutex::new(new_hash_map()),
short_to_chan_info: FairRwLock::new(new_hash_map()),
our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(),
secp_ctx,
inbound_payment_key: expanded_inbound_key,
fake_scid_rand_bytes: entropy_source.get_secure_random_bytes(),
probing_cookie_secret: entropy_source.get_secure_random_bytes(),
inbound_payment_id_secret: entropy_source.get_secure_random_bytes(),
highest_seen_timestamp: AtomicUsize::new(current_timestamp as usize),
per_peer_state: FairRwLock::new(new_hash_map()),
pending_events: Mutex::new(VecDeque::new()),
pending_events_processor: AtomicBool::new(false),
pending_background_events: Mutex::new(Vec::new()),
total_consistency_lock: RwLock::new(()),
background_events_processed_since_startup: AtomicBool::new(false),
event_persist_notifier: Notifier::new(),
needs_persist_flag: AtomicBool::new(false),
funding_batch_states: Mutex::new(BTreeMap::new()),
pending_offers_messages: Mutex::new(Vec::new()),
pending_async_payments_messages: Mutex::new(Vec::new()),
pending_broadcast_messages: Mutex::new(Vec::new()),
last_days_feerates: Mutex::new(VecDeque::new()),
entropy_source,
node_signer,
signer_provider,
logger,
#[cfg(feature = "dnssec")]
hrn_resolver: OMNameResolver::new(current_timestamp, params.best_block.height),
#[cfg(feature = "dnssec")]
pending_dns_onion_messages: Mutex::new(Vec::new()),
#[cfg(feature = "_test_utils")]
testing_dnssec_proof_offer_resolution_override: Mutex::new(new_hash_map()),
}
}
pub fn get_current_default_configuration(&self) -> &UserConfig {
&self.default_configuration
}
#[cfg(test)]
pub fn create_and_insert_outbound_scid_alias_for_test(&self) -> u64 {
self.create_and_insert_outbound_scid_alias()
}
fn create_and_insert_outbound_scid_alias(&self) -> u64 {
let height = self.best_block.read().unwrap().height;
let mut outbound_scid_alias = 0;
let mut i = 0;
loop {
if cfg!(fuzzing) { outbound_scid_alias += 1;
} else {
outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
}
if outbound_scid_alias != 0 && self.outbound_scid_aliases.lock().unwrap().insert(outbound_scid_alias) {
break;
}
i += 1;
if i > 1_000_000 { panic!("Your RNG is busted or we ran out of possible outbound SCID aliases (which should never happen before we run out of memory to store channels"); }
}
outbound_scid_alias
}
pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u128, temporary_channel_id: Option<ChannelId>, override_config: Option<UserConfig>) -> Result<ChannelId, APIError> {
if channel_value_satoshis < 1000 {
return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
}
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
debug_assert!(&self.total_consistency_lock.try_write().is_err());
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(&their_network_key)
.ok_or_else(|| APIError::APIMisuseError{ err: format!("Not connected to node: {}", their_network_key) })?;
let mut peer_state = peer_state_mutex.lock().unwrap();
if let Some(temporary_channel_id) = temporary_channel_id {
if peer_state.channel_by_id.contains_key(&temporary_channel_id) {
return Err(APIError::APIMisuseError{ err: format!("Channel with temporary channel ID {} already exists!", temporary_channel_id)});
}
}
let mut channel = {
let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
let their_features = &peer_state.latest_features;
let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key,
their_features, channel_value_satoshis, push_msat, user_channel_id, config,
self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id, &*self.logger)
{
Ok(res) => res,
Err(e) => {
self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
return Err(e);
},
}
};
let logger = WithChannelContext::from(&self.logger, &channel.context, None);
let res = channel.get_open_channel(self.chain_hash, &&logger);
let temporary_channel_id = channel.context.channel_id();
match peer_state.channel_by_id.entry(temporary_channel_id) {
hash_map::Entry::Occupied(_) => {
if cfg!(fuzzing) {
return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
} else {
panic!("RNG is bad???");
}
},
hash_map::Entry::Vacant(entry) => { entry.insert(ChannelPhase::UnfundedOutboundV1(channel)); }
}
if let Some(msg) = res {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
node_id: their_network_key,
msg,
});
}
Ok(temporary_channel_id)
}
fn list_funded_channels_with_filter<Fn: FnMut(&(&ChannelId, &Channel<SP>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
{
let best_block_height = self.best_block.read().unwrap().height;
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
res.extend(peer_state.channel_by_id.iter()
.filter_map(|(chan_id, phase)| match phase {
ChannelPhase::Funded(chan) => Some((chan_id, chan)),
_ => None,
})
.filter(f)
.map(|(_channel_id, channel)| {
ChannelDetails::from_channel_context(&channel.context, best_block_height,
peer_state.latest_features.clone(), &self.fee_estimator)
})
);
}
}
res
}
pub fn list_channels(&self) -> Vec<ChannelDetails> {
let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
{
let best_block_height = self.best_block.read().unwrap().height;
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for context in peer_state.channel_by_id.iter().map(|(_, phase)| phase.context()) {
let details = ChannelDetails::from_channel_context(context, best_block_height,
peer_state.latest_features.clone(), &self.fee_estimator);
res.push(details);
}
}
}
res
}
pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
self.list_funded_channels_with_filter(|&(_, ref channel)| channel.context.is_live())
}
pub fn list_channels_with_counterparty(&self, counterparty_node_id: &PublicKey) -> Vec<ChannelDetails> {
let best_block_height = self.best_block.read().unwrap().height;
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let features = &peer_state.latest_features;
let context_to_details = |context| {
ChannelDetails::from_channel_context(context, best_block_height, features.clone(), &self.fee_estimator)
};
return peer_state.channel_by_id
.iter()
.map(|(_, phase)| phase.context())
.map(context_to_details)
.collect();
}
vec![]
}
pub fn list_recent_payments(&self) -> Vec<RecentPaymentDetails> {
self.pending_outbound_payments.pending_outbound_payments.lock().unwrap().iter()
.filter_map(|(payment_id, pending_outbound_payment)| match pending_outbound_payment {
PendingOutboundPayment::AwaitingInvoice { .. }
| PendingOutboundPayment::AwaitingOffer { .. }
| PendingOutboundPayment::InvoiceReceived { .. } =>
{
Some(RecentPaymentDetails::AwaitingInvoice { payment_id: *payment_id })
},
PendingOutboundPayment::StaticInvoiceReceived { .. } => {
Some(RecentPaymentDetails::AwaitingInvoice { payment_id: *payment_id })
},
PendingOutboundPayment::Retryable { payment_hash, total_msat, .. } => {
Some(RecentPaymentDetails::Pending {
payment_id: *payment_id,
payment_hash: *payment_hash,
total_msat: *total_msat,
})
},
PendingOutboundPayment::Abandoned { payment_hash, .. } => {
Some(RecentPaymentDetails::Abandoned { payment_id: *payment_id, payment_hash: *payment_hash })
},
PendingOutboundPayment::Fulfilled { payment_hash, .. } => {
Some(RecentPaymentDetails::Fulfilled { payment_id: *payment_id, payment_hash: *payment_hash })
},
PendingOutboundPayment::Legacy { .. } => None
})
.collect()
}
fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new();
let mut shutdown_result = None;
{
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(channel_id.clone()) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let funding_txo_opt = chan.context.get_funding_txo();
let their_features = &peer_state.latest_features;
let (shutdown_msg, mut monitor_update_opt, htlcs) =
chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
failed_htlcs = htlcs;
peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
node_id: *counterparty_node_id,
msg: shutdown_msg,
});
debug_assert!(monitor_update_opt.is_none() || !chan.is_shutdown(),
"We can't both complete shutdown and generate a monitor update");
if let Some(monitor_update) = monitor_update_opt.take() {
handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
peer_state_lock, peer_state, per_peer_state, chan);
}
} else {
let mut shutdown_res = chan_phase_entry.get_mut().context_mut()
.force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) });
remove_channel_phase!(self, peer_state, chan_phase_entry, shutdown_res);
shutdown_result = Some(shutdown_res);
}
},
hash_map::Entry::Vacant(_) => {
return Err(APIError::ChannelUnavailable {
err: format!(
"Channel with id {} not found for the passed counterparty node_id {}",
channel_id, counterparty_node_id,
)
});
},
}
}
for htlc_source in failed_htlcs.drain(..) {
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id };
self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
}
if let Some(shutdown_result) = shutdown_result {
self.finish_close_channel(shutdown_result);
}
Ok(())
}
pub fn close_channel(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey) -> Result<(), APIError> {
self.close_channel_internal(channel_id, counterparty_node_id, None, None)
}
pub fn close_channel_with_feerate_and_script(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
}
fn apply_post_close_monitor_update(
&self, counterparty_node_id: PublicKey, channel_id: ChannelId, funding_txo: OutPoint,
monitor_update: ChannelMonitorUpdate,
) {
let per_peer_state = self.per_peer_state.read().unwrap();
let mut peer_state_lock = per_peer_state.get(&counterparty_node_id)
.expect("We must always have a peer entry for a peer with which we have channels that have ChannelMonitors")
.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(channel_id) {
hash_map::Entry::Occupied(mut chan_phase) => {
if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
handle_new_monitor_update!(self, funding_txo,
monitor_update, peer_state_lock, peer_state, per_peer_state, chan);
return;
} else {
debug_assert!(false, "We shouldn't have an update for a non-funded channel");
}
},
hash_map::Entry::Vacant(_) => {},
}
handle_new_monitor_update!(
self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
counterparty_node_id, channel_id, POST_CHANNEL_CLOSE
);
}
fn finish_close_channel(&self, mut shutdown_res: ShutdownResult) {
debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
#[cfg(debug_assertions)]
for (_, peer) in self.per_peer_state.read().unwrap().iter() {
debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
}
let logger = WithContext::from(
&self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id), None
);
log_debug!(logger, "Finishing closure of channel due to {} with {} HTLCs to fail",
shutdown_res.closure_reason, shutdown_res.dropped_outbound_htlcs.len());
for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
debug_assert!(false, "This should have been handled in `locked_close_channel`");
self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
}
if self.background_events_processed_since_startup.load(Ordering::Acquire) {
if let Some(funding_txo) = shutdown_res.channel_funding_txo {
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mtx) = per_peer_state.get(&shutdown_res.counterparty_node_id) {
let mut peer_state = peer_state_mtx.lock().unwrap();
if peer_state.in_flight_monitor_updates.get(&funding_txo).map(|l| l.is_empty()).unwrap_or(true) {
let update_actions = peer_state.monitor_update_blocked_actions
.remove(&shutdown_res.channel_id).unwrap_or(Vec::new());
mem::drop(peer_state);
mem::drop(per_peer_state);
self.handle_monitor_update_completion_actions(update_actions);
}
}
}
}
let mut shutdown_results = Vec::new();
if let Some(txid) = shutdown_res.unbroadcasted_batch_funding_txid {
let mut funding_batch_states = self.funding_batch_states.lock().unwrap();
let affected_channels = funding_batch_states.remove(&txid).into_iter().flatten();
let per_peer_state = self.per_peer_state.read().unwrap();
let mut has_uncompleted_channel = None;
for (channel_id, counterparty_node_id, state) in affected_channels {
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state = peer_state_mutex.lock().unwrap();
if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) {
let mut close_res = chan.context_mut().force_shutdown(false, ClosureReason::FundingBatchClosure);
locked_close_channel!(self, &mut *peer_state, chan.context(), close_res);
shutdown_results.push(close_res);
}
}
has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state));
}
debug_assert!(
has_uncompleted_channel.unwrap_or(true),
"Closing a batch where all channels have completed initial monitor update",
);
}
{
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push_back((events::Event::ChannelClosed {
channel_id: shutdown_res.channel_id,
user_channel_id: shutdown_res.user_channel_id,
reason: shutdown_res.closure_reason,
counterparty_node_id: Some(shutdown_res.counterparty_node_id),
channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis),
channel_funding_txo: shutdown_res.channel_funding_txo,
last_local_balance_msat: Some(shutdown_res.last_local_balance_msat),
}, None));
if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx {
let funding_info = if shutdown_res.is_manual_broadcast {
FundingInfo::OutPoint {
outpoint: shutdown_res.channel_funding_txo
.expect("We had an unbroadcasted funding tx, so should also have had a funding outpoint"),
}
} else {
FundingInfo::Tx{ transaction }
};
pending_events.push_back((events::Event::DiscardFunding {
channel_id: shutdown_res.channel_id, funding_info
}, None));
}
}
for shutdown_result in shutdown_results.drain(..) {
self.finish_close_channel(shutdown_result);
}
}
fn force_close_channel_with_peer(&self, channel_id: &ChannelId, peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool)
-> Result<PublicKey, APIError> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(peer_node_id)
.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?;
let (update_opt, counterparty_node_id) = {
let mut peer_state = peer_state_mutex.lock().unwrap();
let closure_reason = if let Some(peer_msg) = peer_msg {
ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) }
} else {
ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(broadcast) }
};
let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None);
if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
log_error!(logger, "Force-closing channel {}", channel_id);
let (mut shutdown_res, update_opt) = match chan_phase_entry.get_mut() {
ChannelPhase::Funded(ref mut chan) => {
(
chan.context.force_shutdown(broadcast, closure_reason),
self.get_channel_update_for_broadcast(&chan).ok(),
)
},
ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) |
ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => {
(chan_phase_entry.get_mut().context_mut().force_shutdown(false, closure_reason), None)
},
};
let chan_phase = remove_channel_phase!(self, peer_state, chan_phase_entry, shutdown_res);
mem::drop(peer_state);
mem::drop(per_peer_state);
self.finish_close_channel(shutdown_res);
(update_opt, chan_phase.context().get_counterparty_node_id())
} else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() {
log_error!(logger, "Force-closing channel {}", &channel_id);
(None, *peer_node_id)
} else {
return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, peer_node_id) });
}
};
if let Some(update) = update_opt {
let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
Ok(counterparty_node_id)
}
fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, broadcast: bool, error_message: String)
-> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
log_debug!(self.logger,
"Force-closing channel, The error message sent to the peer : {}", error_message);
match self.force_close_channel_with_peer(channel_id, &counterparty_node_id, None, broadcast) {
Ok(counterparty_node_id) => {
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state = peer_state_mutex.lock().unwrap();
peer_state.pending_msg_events.push(
events::MessageSendEvent::HandleError {
node_id: counterparty_node_id,
action: msgs::ErrorAction::SendErrorMessage {
msg: msgs::ErrorMessage { channel_id: *channel_id, data: error_message }
},
}
);
}
Ok(())
},
Err(e) => Err(e)
}
}
pub fn force_close_broadcasting_latest_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String)
-> Result<(), APIError> {
self.force_close_sending_error(channel_id, counterparty_node_id, true, error_message)
}
pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String)
-> Result<(), APIError> {
self.force_close_sending_error(channel_id, counterparty_node_id, false, error_message)
}
pub fn force_close_all_channels_broadcasting_latest_txn(&self, error_message: String) {
for chan in self.list_channels() {
let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id, error_message.clone());
}
}
pub fn force_close_all_channels_without_broadcasting_txn(&self, error_message: String) {
for chan in self.list_channels() {
let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id, error_message.clone());
}
}
fn can_forward_htlc_to_outgoing_channel(
&self, chan: &mut Channel<SP>, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails
) -> Result<(), (&'static str, u16)> {
if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
return Err(("Refusing to forward to a private channel based on our config.", 0x4000 | 10));
}
if chan.context.get_channel_type().supports_scid_privacy() && next_packet.outgoing_scid != chan.context.outbound_scid_alias() {
return Err(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10));
}
if !chan.context.is_live() {
if !chan.context.is_enabled() {
return Err(("Forwarding channel has been disconnected for some time.", 0x1000 | 20));
} else {
return Err(("Forwarding channel is not in a ready state.", 0x1000 | 7));
}
}
if next_packet.outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { return Err(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11));
}
if let Err((err, code)) = chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value) {
return Err((err, code));
}
Ok(())
}
fn do_funded_channel_callback<X, C: Fn(&mut Channel<SP>) -> X>(
&self, scid: u64, callback: C,
) -> Option<X> {
let (counterparty_node_id, channel_id) = match self.short_to_chan_info.read().unwrap().get(&scid).cloned() {
None => return None,
Some((cp_id, id)) => (cp_id, id),
};
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
if peer_state_mutex_opt.is_none() {
return None;
}
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.get_mut(&channel_id).and_then(
|chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None }
) {
None => None,
Some(chan) => Some(callback(chan)),
}
}
fn can_forward_htlc(
&self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails
) -> Result<(), (&'static str, u16)> {
match self.do_funded_channel_callback(next_packet_details.outgoing_scid, |chan: &mut Channel<SP>| {
self.can_forward_htlc_to_outgoing_channel(chan, msg, next_packet_details)
}) {
Some(Ok(())) => {},
Some(Err(e)) => return Err(e),
None => {
if (self.default_configuration.accept_intercept_htlcs &&
fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash)) ||
fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash)
{} else {
return Err(("Don't have available channel for forwarding as requested.", 0x4000 | 10));
}
}
}
let cur_height = self.best_block.read().unwrap().height + 1;
if let Err((err_msg, err_code)) = check_incoming_htlc_cltv(
cur_height, next_packet_details.outgoing_cltv_value, msg.cltv_expiry
) {
return Err((err_msg, err_code));
}
Ok(())
}
fn htlc_failure_from_update_add_err(
&self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, err_msg: &'static str,
err_code: u16, is_intro_node_blinded_forward: bool,
shared_secret: &[u8; 32]
) -> HTLCFailureMsg {
let mut res = VecWriter(Vec::with_capacity(8 + 2));
if err_code & 0x1000 == 0x1000 {
if err_code == 0x1000 | 11 || err_code == 0x1000 | 12 {
msg.amount_msat.write(&mut res).expect("Writes cannot fail");
}
else if err_code == 0x1000 | 13 {
msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
}
else if err_code == 0x1000 | 20 {
0u16.write(&mut res).expect("Writes cannot fail");
}
(0u16).write(&mut res).expect("Writes cannot fail");
}
log_info!(
WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash)),
"Failed to accept/forward incoming HTLC: {}", err_msg
);
if msg.blinding_point.is_some() {
return HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
channel_id: msg.channel_id,
htlc_id: msg.htlc_id,
sha256_of_onion: [0; 32],
failure_code: INVALID_ONION_BLINDING,
});
}
let (err_code, err_data) = if is_intro_node_blinded_forward {
(INVALID_ONION_BLINDING, &[0; 32][..])
} else {
(err_code, &res.0[..])
};
HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
channel_id: msg.channel_id,
htlc_id: msg.htlc_id,
reason: HTLCFailReason::reason(err_code, err_data.to_vec())
.get_encrypted_failure_packet(shared_secret, &None),
})
}
fn decode_update_add_htlc_onion(
&self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey,
) -> Result<
(onion_utils::Hop, [u8; 32], Option<Result<PublicKey, secp256k1::Error>>), HTLCFailureMsg
> {
let (next_hop, shared_secret, next_packet_details_opt) = decode_incoming_update_add_htlc_onion(
msg, &*self.node_signer, &*self.logger, &self.secp_ctx
)?;
let next_packet_details = match next_packet_details_opt {
Some(next_packet_details) => next_packet_details,
None => return Ok((next_hop, shared_secret, None)),
};
self.can_forward_htlc(&msg, &next_packet_details).map_err(|e| {
let (err_msg, err_code) = e;
self.htlc_failure_from_update_add_err(
msg, counterparty_node_id, err_msg, err_code,
next_hop.is_intro_node_blinded_forward(), &shared_secret
)
})?;
Ok((next_hop, shared_secret, Some(next_packet_details.next_packet_pubkey)))
}
fn construct_pending_htlc_status<'a>(
&self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, shared_secret: [u8; 32],
decoded_hop: onion_utils::Hop, allow_underpay: bool,
next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>,
) -> PendingHTLCStatus {
macro_rules! return_err {
($msg: expr, $err_code: expr, $data: expr) => {
{
let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash));
log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
if msg.blinding_point.is_some() {
return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
msgs::UpdateFailMalformedHTLC {
channel_id: msg.channel_id,
htlc_id: msg.htlc_id,
sha256_of_onion: [0; 32],
failure_code: INVALID_ONION_BLINDING,
}
))
}
return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
channel_id: msg.channel_id,
htlc_id: msg.htlc_id,
reason: HTLCFailReason::reason($err_code, $data.to_vec())
.get_encrypted_failure_packet(&shared_secret, &None),
}));
}
}
}
match decoded_hop {
onion_utils::Hop::Receive(next_hop_data) => {
let current_height: u32 = self.best_block.read().unwrap().height;
match create_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash,
msg.amount_msat, msg.cltv_expiry, None, allow_underpay, msg.skimmed_fee_msat,
current_height)
{
Ok(info) => {
PendingHTLCStatus::Forward(info)
},
Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
}
},
onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => {
match create_fwd_pending_htlc_info(msg, next_hop_data, next_hop_hmac,
new_packet_bytes, shared_secret, next_packet_pubkey_opt) {
Ok(info) => PendingHTLCStatus::Forward(info),
Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
}
}
}
}
fn get_channel_update_for_broadcast(&self, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
if !chan.context.should_announce() {
return Err(LightningError {
err: "Cannot broadcast a channel_update for a private channel".to_owned(),
action: msgs::ErrorAction::IgnoreError
});
}
if chan.context.get_short_channel_id().is_none() {
return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError});
}
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Attempting to generate broadcast channel update for channel {}", &chan.context.channel_id());
self.get_channel_update_for_unicast(chan)
}
fn get_channel_update_for_unicast(&self, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Attempting to generate channel update for channel {}", chan.context.channel_id());
let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) {
None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
Some(id) => id,
};
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Generating channel update for channel {}", chan.context.channel_id());
let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
let enabled = chan.context.is_enabled();
let unsigned = msgs::UnsignedChannelUpdate {
chain_hash: self.chain_hash,
short_channel_id,
timestamp: chan.context.get_update_time_counter(),
message_flags: 1, channel_flags: (!were_node_one) as u8 | ((!enabled as u8) << 1),
cltv_expiry_delta: chan.context.get_cltv_expiry_delta(),
htlc_minimum_msat: chan.context.get_counterparty_htlc_minimum_msat(),
htlc_maximum_msat: chan.context.get_announced_htlc_max_msat(),
fee_base_msat: chan.context.get_outbound_forwarding_fee_base_msat(),
fee_proportional_millionths: chan.context.get_fee_proportional_millionths(),
excess_data: Vec::new(),
};
let sig = self.node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelUpdate(&unsigned)).unwrap();
Ok(msgs::ChannelUpdate {
signature: sig,
contents: unsigned
})
}
#[cfg(test)]
pub(crate) fn test_send_payment_along_path(&self, path: &Path, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
let _lck = self.total_consistency_lock.read().unwrap();
self.send_payment_along_path(SendAlongPathArgs {
path, payment_hash, recipient_onion: &recipient_onion, total_value,
cur_height, payment_id, keysend_preimage, invoice_request: None, session_priv_bytes
})
}
fn send_payment_along_path(&self, args: SendAlongPathArgs) -> Result<(), APIError> {
let SendAlongPathArgs {
path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage,
invoice_request, session_priv_bytes
} = args;
debug_assert!(self.total_consistency_lock.try_write().is_err());
let prng_seed = self.entropy_source.get_secure_random_bytes();
let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
let (onion_packet, htlc_msat, htlc_cltv) = onion_utils::create_payment_onion(
&self.secp_ctx, &path, &session_priv, total_value, recipient_onion, cur_height,
payment_hash, keysend_preimage, invoice_request, prng_seed
).map_err(|e| {
let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None, Some(*payment_hash));
log_error!(logger, "Failed to build an onion for path for payment hash {}", payment_hash);
e
})?;
let err: Result<(), _> = loop {
let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.hops.first().unwrap().short_channel_id) {
None => {
let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None, Some(*payment_hash));
log_error!(logger, "Failed to find first-hop for payment hash {}", payment_hash);
return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()})
},
Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
};
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(id), Some(*payment_hash));
log_trace!(logger,
"Attempting to send payment with payment hash {} along path with next hop {}",
payment_hash, path.hops.first().unwrap().short_channel_id);
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
.ok_or_else(|| APIError::ChannelUnavailable{err: "No peer matching the path's first hop found!".to_owned() })?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(id) {
match chan_phase_entry.get_mut() {
ChannelPhase::Funded(chan) => {
if !chan.context.is_live() {
return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
}
let funding_txo = chan.context.get_funding_txo().unwrap();
let logger = WithChannelContext::from(&self.logger, &chan.context, Some(*payment_hash));
let send_res = chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(),
htlc_cltv, HTLCSource::OutboundRoute {
path: path.clone(),
session_priv: session_priv.clone(),
first_hop_htlc_msat: htlc_msat,
payment_id,
}, onion_packet, None, &self.fee_estimator, &&logger);
match break_chan_phase_entry!(self, peer_state, send_res, chan_phase_entry) {
Some(monitor_update) => {
match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
false => {
return Err(APIError::MonitorUpdateInProgress);
},
true => {},
}
},
None => {},
}
},
_ => return Err(APIError::ChannelUnavailable{err: "Channel to first hop is unfunded".to_owned()}),
};
} else {
return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()});
}
return Ok(());
};
match handle_error!(self, err, path.hops.first().unwrap().pubkey) {
Ok(_) => unreachable!(),
Err(e) => {
Err(APIError::ChannelUnavailable { err: e.err })
},
}
}
#[cfg(test)]
pub(crate) fn send_payment_with_route(
&self, route: Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields,
payment_id: PaymentId
) -> Result<(), PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments
.send_payment_with_route(&route, payment_hash, recipient_onion, payment_id,
&self.entropy_source, &self.node_signer, best_block_height,
|args| self.send_payment_along_path(args))
}
pub fn send_payment(
&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId,
route_params: RouteParameters, retry_strategy: Retry
) -> Result<(), RetryableSendFailure> {
let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments
.send_payment(payment_hash, recipient_onion, payment_id, retry_strategy, route_params,
&self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
&self.entropy_source, &self.node_signer, best_block_height, &self.logger,
&self.pending_events, |args| self.send_payment_along_path(args))
}
#[cfg(test)]
pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion,
keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer,
best_block_height, |args| self.send_payment_along_path(args))
}
#[cfg(test)]
pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route: &Route) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height;
self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, recipient_onion, payment_id, route, None, &self.entropy_source, best_block_height)
}
#[cfg(test)]
pub(crate) fn test_set_payment_metadata(&self, payment_id: PaymentId, new_payment_metadata: Option<Vec<u8>>) {
self.pending_outbound_payments.test_set_payment_metadata(payment_id, new_payment_metadata);
}
pub fn send_payment_for_bolt12_invoice(
&self, invoice: &Bolt12Invoice, context: Option<&OffersContext>,
) -> Result<(), Bolt12PaymentError> {
match self.verify_bolt12_invoice(invoice, context) {
Ok(payment_id) => self.send_payment_for_verified_bolt12_invoice(invoice, payment_id),
Err(()) => Err(Bolt12PaymentError::UnexpectedInvoice),
}
}
fn verify_bolt12_invoice(
&self, invoice: &Bolt12Invoice, context: Option<&OffersContext>,
) -> Result<PaymentId, ()> {
let secp_ctx = &self.secp_ctx;
let expanded_key = &self.inbound_payment_key;
match context {
None if invoice.is_for_refund_without_paths() => {
invoice.verify_using_metadata(expanded_key, secp_ctx)
},
Some(&OffersContext::OutboundPayment { payment_id, nonce, .. }) => {
invoice.verify_using_payer_data(payment_id, nonce, expanded_key, secp_ctx)
},
_ => Err(()),
}
}
fn send_payment_for_verified_bolt12_invoice(&self, invoice: &Bolt12Invoice, payment_id: PaymentId) -> Result<(), Bolt12PaymentError> {
let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let features = self.bolt12_invoice_features();
self.pending_outbound_payments
.send_payment_for_bolt12_invoice(
invoice, payment_id, &self.router, self.list_usable_channels(), features,
|| self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, &self,
&self.secp_ctx, best_block_height, &self.logger, &self.pending_events,
|args| self.send_payment_along_path(args)
)
}
#[cfg(async_payments)]
fn initiate_async_payment(
&self, invoice: &StaticInvoice, payment_id: PaymentId
) -> Result<(), Bolt12PaymentError> {
let mut res = Ok(());
PersistenceNotifierGuard::optionally_notify(self, || {
let best_block_height = self.best_block.read().unwrap().height;
let features = self.bolt12_invoice_features();
let outbound_pmts_res = self.pending_outbound_payments.static_invoice_received(
invoice, payment_id, features, best_block_height, &*self.entropy_source,
&self.pending_events
);
match outbound_pmts_res {
Ok(()) => {},
Err(Bolt12PaymentError::UnexpectedInvoice) | Err(Bolt12PaymentError::DuplicateInvoice) => {
res = outbound_pmts_res.map(|_| ());
return NotifyOption::SkipPersistNoEvents
},
Err(e) => {
res = Err(e);
return NotifyOption::DoPersist
}
};
let nonce = Nonce::from_entropy_source(&*self.entropy_source);
let hmac = payment_id.hmac_for_async_payment(nonce, &self.inbound_payment_key);
let reply_paths = match self.create_blinded_paths(
MessageContext::AsyncPayments(
AsyncPaymentsContext::OutboundPayment { payment_id, nonce, hmac }
)
) {
Ok(paths) => paths,
Err(()) => {
self.abandon_payment_with_reason(payment_id, PaymentFailureReason::BlindedPathCreationFailed);
res = Err(Bolt12PaymentError::BlindedPathCreationFailed);
return NotifyOption::DoPersist
}
};
let mut pending_async_payments_messages = self.pending_async_payments_messages.lock().unwrap();
const HTLC_AVAILABLE_LIMIT: usize = 10;
reply_paths
.iter()
.flat_map(|reply_path| invoice.message_paths().iter().map(move |invoice_path| (invoice_path, reply_path)))
.take(HTLC_AVAILABLE_LIMIT)
.for_each(|(invoice_path, reply_path)| {
let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
destination: Destination::BlindedPath(invoice_path.clone()),
reply_path: reply_path.clone(),
};
let message = AsyncPaymentsMessage::HeldHtlcAvailable(HeldHtlcAvailable {});
pending_async_payments_messages.push((message, instructions));
});
NotifyOption::DoPersist
});
res
}
#[cfg(async_payments)]
fn send_payment_for_static_invoice(
&self, payment_id: PaymentId
) -> Result<(), Bolt12PaymentError> {
let best_block_height = self.best_block.read().unwrap().height;
let mut res = Ok(());
PersistenceNotifierGuard::optionally_notify(self, || {
let outbound_pmts_res = self.pending_outbound_payments.send_payment_for_static_invoice(
payment_id, &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
&self.entropy_source, &self.node_signer, &self, &self.secp_ctx, best_block_height,
&self.logger, &self.pending_events, |args| self.send_payment_along_path(args)
);
match outbound_pmts_res {
Err(Bolt12PaymentError::UnexpectedInvoice) | Err(Bolt12PaymentError::DuplicateInvoice) => {
res = outbound_pmts_res.map(|_| ());
NotifyOption::SkipPersistNoEvents
},
other_res => {
res = other_res;
NotifyOption::DoPersist
}
}
});
res
}
pub fn abandon_payment(&self, payment_id: PaymentId) {
self.abandon_payment_with_reason(payment_id, PaymentFailureReason::UserAbandoned)
}
fn abandon_payment_with_reason(&self, payment_id: PaymentId, reason: PaymentFailureReason) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.abandon_payment(payment_id, reason, &self.pending_events);
}
pub fn send_spontaneous_payment(
&self, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields,
payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry
) -> Result<PaymentHash, RetryableSendFailure> {
let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, recipient_onion,
payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(),
|| self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
&self.logger, &self.pending_events, |args| self.send_payment_along_path(args))
}
pub fn send_probe(&self, path: Path) -> Result<(PaymentHash, PaymentId), ProbeSendFailure> {
let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.send_probe(path, self.probing_cookie_secret,
&self.entropy_source, &self.node_signer, best_block_height,
|args| self.send_payment_along_path(args))
}
#[cfg(test)]
pub(crate) fn payment_is_probe(&self, payment_hash: &PaymentHash, payment_id: &PaymentId) -> bool {
outbound_payment::payment_is_probe(payment_hash, payment_id, self.probing_cookie_secret)
}
pub fn send_spontaneous_preflight_probes(
&self, node_id: PublicKey, amount_msat: u64, final_cltv_expiry_delta: u32,
liquidity_limit_multiplier: Option<u64>,
) -> Result<Vec<(PaymentHash, PaymentId)>, ProbeSendFailure> {
let payment_params =
PaymentParameters::from_node_id(node_id, final_cltv_expiry_delta);
let route_params = RouteParameters::from_payment_params_and_value(payment_params, amount_msat);
self.send_preflight_probes(route_params, liquidity_limit_multiplier)
}
pub fn send_preflight_probes(
&self, route_params: RouteParameters, liquidity_limit_multiplier: Option<u64>,
) -> Result<Vec<(PaymentHash, PaymentId)>, ProbeSendFailure> {
let liquidity_limit_multiplier = liquidity_limit_multiplier.unwrap_or(3);
let payer = self.get_our_node_id();
let usable_channels = self.list_usable_channels();
let first_hops = usable_channels.iter().collect::<Vec<_>>();
let inflight_htlcs = self.compute_inflight_htlcs();
let route = self
.router
.find_route(&payer, &route_params, Some(&first_hops), inflight_htlcs)
.map_err(|e| {
log_error!(self.logger, "Failed to find path for payment probe: {:?}", e);
ProbeSendFailure::RouteNotFound
})?;
let mut used_liquidity_map = hash_map_with_capacity(first_hops.len());
let mut res = Vec::new();
for mut path in route.paths {
while let Some(last_path_hop) = path.hops.last() {
if last_path_hop.maybe_announced_channel {
break;
} else {
log_debug!(
self.logger,
"Avoided sending payment probe all the way to last hop {} as it is likely unannounced.",
last_path_hop.short_channel_id
);
let final_value_msat = path.final_value_msat();
path.hops.pop();
if let Some(new_last) = path.hops.last_mut() {
new_last.fee_msat += final_value_msat;
}
}
}
if path.hops.len() < 2 {
log_debug!(
self.logger,
"Skipped sending payment probe over path with less than two hops."
);
continue;
}
if let Some(first_path_hop) = path.hops.first() {
if let Some(first_hop) = first_hops.iter().find(|h| {
h.get_outbound_payment_scid() == Some(first_path_hop.short_channel_id)
}) {
let path_value = path.final_value_msat() + path.fee_msat();
let used_liquidity =
used_liquidity_map.entry(first_path_hop.short_channel_id).or_insert(0);
if first_hop.next_outbound_htlc_limit_msat
< (*used_liquidity + path_value) * liquidity_limit_multiplier
{
log_debug!(self.logger, "Skipped sending payment probe to avoid putting channel {} under the liquidity limit.", first_path_hop.short_channel_id);
continue;
} else {
*used_liquidity += path_value;
}
}
}
res.push(self.send_probe(path).map_err(|e| {
log_error!(self.logger, "Failed to send pre-flight probe: {:?}", e);
e
})?);
}
Ok(res)
}
fn funding_transaction_generated_intern<FundingOutput: FnMut(&OutboundV1Channel<SP>) -> Result<OutPoint, &'static str>>(
&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding_transaction: Transaction, is_batch_funding: bool,
mut find_funding_output: FundingOutput, is_manual_broadcast: bool,
) -> Result<(), APIError> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let funding_txo;
let (mut chan, msg_opt) = match peer_state.channel_by_id.remove(&temporary_channel_id) {
Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => {
macro_rules! close_chan { ($err: expr, $api_err: expr, $chan: expr) => { {
let counterparty;
let err = if let ChannelError::Close((msg, reason)) = $err {
let channel_id = $chan.context.channel_id();
counterparty = chan.context.get_counterparty_node_id();
let shutdown_res = $chan.context.force_shutdown(false, reason);
MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None)
} else { unreachable!(); };
mem::drop(peer_state_lock);
mem::drop(per_peer_state);
let _: Result<(), _> = handle_error!(self, Err(err), counterparty);
Err($api_err)
} } }
match find_funding_output(&chan) {
Ok(found_funding_txo) => funding_txo = found_funding_txo,
Err(err) => {
let chan_err = ChannelError::close(err.to_owned());
let api_err = APIError::APIMisuseError { err: err.to_owned() };
return close_chan!(chan_err, api_err, chan);
},
}
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger);
match funding_res {
Ok(funding_msg) => (chan, funding_msg),
Err((mut chan, chan_err)) => {
let api_err = APIError::ChannelUnavailable { err: "Signer refused to sign the initial commitment transaction".to_owned() };
return close_chan!(chan_err, api_err, chan);
}
}
},
Some(phase) => {
peer_state.channel_by_id.insert(temporary_channel_id, phase);
return Err(APIError::APIMisuseError {
err: format!(
"Channel with id {} for the passed counterparty node_id {} is not an unfunded, outbound V1 channel",
temporary_channel_id, counterparty_node_id),
})
},
None => return Err(APIError::ChannelUnavailable {err: format!(
"Channel with id {} not found for the passed counterparty node_id {}",
temporary_channel_id, counterparty_node_id),
}),
};
if let Some(msg) = msg_opt {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
node_id: chan.context.get_counterparty_node_id(),
msg,
});
}
if is_manual_broadcast {
chan.context.set_manual_broadcast();
}
match peer_state.channel_by_id.entry(chan.context.channel_id()) {
hash_map::Entry::Occupied(_) => {
panic!("Generated duplicate funding txid?");
},
hash_map::Entry::Vacant(e) => {
let mut outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
match outpoint_to_peer.entry(funding_txo) {
hash_map::Entry::Vacant(e) => { e.insert(chan.context.get_counterparty_node_id()); },
hash_map::Entry::Occupied(o) => {
let err = format!(
"An existing channel using outpoint {} is open with peer {}",
funding_txo, o.get()
);
mem::drop(outpoint_to_peer);
mem::drop(peer_state_lock);
mem::drop(per_peer_state);
let reason = ClosureReason::ProcessingError { err: err.clone() };
self.finish_close_channel(chan.context.force_shutdown(true, reason));
return Err(APIError::ChannelUnavailable { err });
}
}
e.insert(ChannelPhase::UnfundedOutboundV1(chan));
}
}
Ok(())
}
#[cfg(test)]
pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> {
let txid = funding_transaction.compute_txid();
self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, false, |_| {
Ok(OutPoint { txid, index: output_index })
}, false)
}
pub fn funding_transaction_generated(&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
self.batch_funding_transaction_generated(&[(&temporary_channel_id, &counterparty_node_id)], funding_transaction)
}
pub fn unsafe_manual_funding_transaction_generated(&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding: OutPoint) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let temporary_channels = &[(&temporary_channel_id, &counterparty_node_id)];
return self.batch_funding_transaction_generated_intern(temporary_channels, FundingType::Unchecked(funding));
}
pub fn batch_funding_transaction_generated(&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding_transaction: Transaction) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.batch_funding_transaction_generated_intern(temporary_channels, FundingType::Checked(funding_transaction))
}
fn batch_funding_transaction_generated_intern(&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding: FundingType) -> Result<(), APIError> {
let mut result = Ok(());
if let FundingType::Checked(funding_transaction) = &funding {
if !funding_transaction.is_coinbase() {
for inp in funding_transaction.input.iter() {
if inp.witness.is_empty() {
result = result.and(Err(APIError::APIMisuseError {
err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned()
}));
}
}
}
if funding_transaction.output.len() > u16::max_value() as usize {
result = result.and(Err(APIError::APIMisuseError {
err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
}));
}
let height = self.best_block.read().unwrap().height;
if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) &&
funding_transaction.lock_time.is_block_height() &&
funding_transaction.lock_time.to_consensus_u32() > height + 1
{
result = result.and(Err(APIError::APIMisuseError {
err: "Funding transaction absolute timelock is non-final".to_owned()
}));
}
}
let txid = funding.txid();
let is_batch_funding = temporary_channels.len() > 1;
let mut funding_batch_states = if is_batch_funding {
Some(self.funding_batch_states.lock().unwrap())
} else {
None
};
let mut funding_batch_state = funding_batch_states.as_mut().and_then(|states| {
match states.entry(txid) {
btree_map::Entry::Occupied(_) => {
result = result.clone().and(Err(APIError::APIMisuseError {
err: "Batch funding transaction with the same txid already exists".to_owned()
}));
None
},
btree_map::Entry::Vacant(vacant) => Some(vacant.insert(Vec::new())),
}
});
let is_manual_broadcast = funding.is_manual_broadcast();
for &(temporary_channel_id, counterparty_node_id) in temporary_channels {
result = result.and_then(|_| self.funding_transaction_generated_intern(
*temporary_channel_id,
*counterparty_node_id,
funding.transaction_or_dummy(),
is_batch_funding,
|chan| {
let mut output_index = None;
let expected_spk = chan.context.get_funding_redeemscript().to_p2wsh();
let outpoint = match &funding {
FundingType::Checked(tx) => {
for (idx, outp) in tx.output.iter().enumerate() {
if outp.script_pubkey == expected_spk && outp.value.to_sat() == chan.context.get_value_satoshis() {
if output_index.is_some() {
return Err("Multiple outputs matched the expected script and value");
}
output_index = Some(idx as u16);
}
}
if output_index.is_none() {
return Err("No output matched the script_pubkey and value in the FundingGenerationReady event");
}
OutPoint { txid, index: output_index.unwrap() }
},
FundingType::Unchecked(outpoint) => outpoint.clone(),
};
if let Some(funding_batch_state) = funding_batch_state.as_mut() {
funding_batch_state.push((ChannelId::v1_from_funding_outpoint(outpoint), *counterparty_node_id, false));
}
Ok(outpoint)
},
is_manual_broadcast)
);
}
if let Err(ref e) = result {
let e = format!("Error in transaction funding: {:?}", e);
let mut channels_to_remove = Vec::new();
channels_to_remove.extend(funding_batch_states.as_mut()
.and_then(|states| states.remove(&txid))
.into_iter().flatten()
.map(|(chan_id, node_id, _state)| (chan_id, node_id))
);
channels_to_remove.extend(temporary_channels.iter()
.map(|(&chan_id, &node_id)| (chan_id, node_id))
);
let mut shutdown_results = Vec::new();
{
let per_peer_state = self.per_peer_state.read().unwrap();
for (channel_id, counterparty_node_id) in channels_to_remove {
per_peer_state.get(&counterparty_node_id)
.map(|peer_state_mutex| peer_state_mutex.lock().unwrap())
.and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id).map(|chan| (chan, peer_state)))
.map(|(mut chan, mut peer_state)| {
let closure_reason = ClosureReason::ProcessingError { err: e.clone() };
let mut close_res = chan.context_mut().force_shutdown(false, closure_reason);
locked_close_channel!(self, peer_state, chan.context(), close_res);
shutdown_results.push(close_res);
peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: counterparty_node_id,
action: msgs::ErrorAction::SendErrorMessage {
msg: msgs::ErrorMessage {
channel_id,
data: "Failed to fund channel".to_owned(),
}
},
});
});
}
}
mem::drop(funding_batch_states);
for shutdown_result in shutdown_results.drain(..) {
self.finish_close_channel(shutdown_result);
}
}
result
}
pub fn update_partial_channel_config(
&self, counterparty_node_id: &PublicKey, channel_ids: &[ChannelId], config_update: &ChannelConfigUpdate,
) -> Result<(), APIError> {
if config_update.cltv_expiry_delta.map(|delta| delta < MIN_CLTV_EXPIRY_DELTA).unwrap_or(false) {
return Err(APIError::APIMisuseError {
err: format!("The chosen CLTV expiry delta is below the minimum of {}", MIN_CLTV_EXPIRY_DELTA),
});
}
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for channel_id in channel_ids {
if !peer_state.has_channel(channel_id) {
return Err(APIError::ChannelUnavailable {
err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, counterparty_node_id),
});
};
}
for channel_id in channel_ids {
if let Some(channel_phase) = peer_state.channel_by_id.get_mut(channel_id) {
let mut config = channel_phase.context().config();
config.apply(config_update);
if !channel_phase.context_mut().update_config(&config) {
continue;
}
if let ChannelPhase::Funded(channel) = channel_phase {
if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
} else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
node_id: channel.context.get_counterparty_node_id(),
msg,
});
}
}
continue;
} else {
debug_assert!(false);
return Err(APIError::ChannelUnavailable {
err: format!(
"Channel with ID {} for passed counterparty_node_id {} disappeared after we confirmed its existence - this should not be reachable!",
channel_id, counterparty_node_id),
});
};
}
Ok(())
}
pub fn update_channel_config(
&self, counterparty_node_id: &PublicKey, channel_ids: &[ChannelId], config: &ChannelConfig,
) -> Result<(), APIError> {
return self.update_partial_channel_config(counterparty_node_id, channel_ids, &(*config).into());
}
pub fn forward_intercepted_htlc(&self, intercept_id: InterceptId, next_hop_channel_id: &ChannelId, next_node_id: PublicKey, amt_to_forward_msat: u64) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let next_hop_scid = {
let peer_state_lock = self.per_peer_state.read().unwrap();
let peer_state_mutex = peer_state_lock.get(&next_node_id)
.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", next_node_id) })?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.get(next_hop_channel_id) {
Some(ChannelPhase::Funded(chan)) => {
if !chan.context.is_usable() {
return Err(APIError::ChannelUnavailable {
err: format!("Channel with id {} not fully established", next_hop_channel_id)
})
}
chan.context.get_short_channel_id().unwrap_or(chan.context.outbound_scid_alias())
},
Some(_) => return Err(APIError::ChannelUnavailable {
err: format!("Channel with id {} for the passed counterparty node_id {} is still opening.",
next_hop_channel_id, next_node_id)
}),
None => {
let error = format!("Channel with id {} not found for the passed counterparty node_id {}",
next_hop_channel_id, next_node_id);
let logger = WithContext::from(&self.logger, Some(next_node_id), Some(*next_hop_channel_id), None);
log_error!(logger, "{} when attempting to forward intercepted HTLC", error);
return Err(APIError::ChannelUnavailable {
err: error
})
}
}
};
let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id)
.ok_or_else(|| APIError::APIMisuseError {
err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0))
})?;
let routing = match payment.forward_info.routing {
PendingHTLCRouting::Forward { onion_packet, blinded, .. } => {
PendingHTLCRouting::Forward {
onion_packet, blinded, short_channel_id: next_hop_scid
}
},
_ => unreachable!() };
let skimmed_fee_msat =
payment.forward_info.outgoing_amt_msat.saturating_sub(amt_to_forward_msat);
let pending_htlc_info = PendingHTLCInfo {
skimmed_fee_msat: if skimmed_fee_msat == 0 { None } else { Some(skimmed_fee_msat) },
outgoing_amt_msat: amt_to_forward_msat, routing, ..payment.forward_info
};
let mut per_source_pending_forward = [(
payment.prev_short_channel_id,
payment.prev_counterparty_node_id,
payment.prev_funding_outpoint,
payment.prev_channel_id,
payment.prev_user_channel_id,
vec![(pending_htlc_info, payment.prev_htlc_id)]
)];
self.forward_htlcs(&mut per_source_pending_forward);
Ok(())
}
pub fn fail_intercepted_htlc(&self, intercept_id: InterceptId) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id)
.ok_or_else(|| APIError::APIMisuseError {
err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0))
})?;
if let PendingHTLCRouting::Forward { short_channel_id, .. } = payment.forward_info.routing {
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: payment.prev_short_channel_id,
user_channel_id: Some(payment.prev_user_channel_id),
outpoint: payment.prev_funding_outpoint,
channel_id: payment.prev_channel_id,
counterparty_node_id: payment.prev_counterparty_node_id,
htlc_id: payment.prev_htlc_id,
incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret,
phantom_shared_secret: None,
blinded_failure: payment.forward_info.routing.blinded_failure(),
});
let failure_reason = HTLCFailReason::from_failure_code(0x4000 | 10);
let destination = HTLCDestination::UnknownNextHop { requested_forward_scid: short_channel_id };
self.fail_htlc_backwards_internal(&htlc_source, &payment.forward_info.payment_hash, &failure_reason, destination);
} else { unreachable!() }
Ok(())
}
fn process_pending_update_add_htlcs(&self) {
let mut decode_update_add_htlcs = new_hash_map();
mem::swap(&mut decode_update_add_htlcs, &mut self.decode_update_add_htlcs.lock().unwrap());
let get_failed_htlc_destination = |outgoing_scid_opt: Option<u64>, payment_hash: PaymentHash| {
if let Some(outgoing_scid) = outgoing_scid_opt {
match self.short_to_chan_info.read().unwrap().get(&outgoing_scid) {
Some((outgoing_counterparty_node_id, outgoing_channel_id)) =>
HTLCDestination::NextHopChannel {
node_id: Some(*outgoing_counterparty_node_id),
channel_id: *outgoing_channel_id,
},
None => HTLCDestination::UnknownNextHop {
requested_forward_scid: outgoing_scid,
},
}
} else {
HTLCDestination::FailedPayment { payment_hash }
}
};
'outer_loop: for (incoming_scid, update_add_htlcs) in decode_update_add_htlcs {
let incoming_channel_details_opt = self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel<SP>| {
let counterparty_node_id = chan.context.get_counterparty_node_id();
let channel_id = chan.context.channel_id();
let funding_txo = chan.context.get_funding_txo().unwrap();
let user_channel_id = chan.context.get_user_id();
let accept_underpaying_htlcs = chan.context.config().accept_underpaying_htlcs;
(counterparty_node_id, channel_id, funding_txo, user_channel_id, accept_underpaying_htlcs)
});
let (
incoming_counterparty_node_id, incoming_channel_id, incoming_funding_txo,
incoming_user_channel_id, incoming_accept_underpaying_htlcs
) = if let Some(incoming_channel_details) = incoming_channel_details_opt {
incoming_channel_details
} else {
continue;
};
let mut htlc_forwards = Vec::new();
let mut htlc_fails = Vec::new();
for update_add_htlc in &update_add_htlcs {
let (next_hop, shared_secret, next_packet_details_opt) = match decode_incoming_update_add_htlc_onion(
&update_add_htlc, &*self.node_signer, &*self.logger, &self.secp_ctx
) {
Ok(decoded_onion) => decoded_onion,
Err(htlc_fail) => {
htlc_fails.push((htlc_fail, HTLCDestination::InvalidOnion));
continue;
},
};
let is_intro_node_blinded_forward = next_hop.is_intro_node_blinded_forward();
let outgoing_scid_opt = next_packet_details_opt.as_ref().map(|d| d.outgoing_scid);
match self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel<SP>| {
let logger = WithChannelContext::from(&self.logger, &chan.context, Some(update_add_htlc.payment_hash));
chan.can_accept_incoming_htlc(
update_add_htlc, &self.fee_estimator, &logger,
)
}) {
Some(Ok(_)) => {},
Some(Err((err, code))) => {
let htlc_fail = self.htlc_failure_from_update_add_err(
&update_add_htlc, &incoming_counterparty_node_id, err, code,
is_intro_node_blinded_forward, &shared_secret,
);
let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
htlc_fails.push((htlc_fail, htlc_destination));
continue;
},
None => continue 'outer_loop,
}
if let Some(next_packet_details) = next_packet_details_opt.as_ref() {
if let Err((err, code)) = self.can_forward_htlc(
&update_add_htlc, next_packet_details
) {
let htlc_fail = self.htlc_failure_from_update_add_err(
&update_add_htlc, &incoming_counterparty_node_id, err, code,
is_intro_node_blinded_forward, &shared_secret,
);
let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
htlc_fails.push((htlc_fail, htlc_destination));
continue;
}
}
match self.construct_pending_htlc_status(
&update_add_htlc, &incoming_counterparty_node_id, shared_secret, next_hop,
incoming_accept_underpaying_htlcs, next_packet_details_opt.map(|d| d.next_packet_pubkey),
) {
PendingHTLCStatus::Forward(htlc_forward) => {
htlc_forwards.push((htlc_forward, update_add_htlc.htlc_id));
},
PendingHTLCStatus::Fail(htlc_fail) => {
let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
htlc_fails.push((htlc_fail, htlc_destination));
},
}
}
let pending_forwards = (
incoming_scid, Some(incoming_counterparty_node_id), incoming_funding_txo,
incoming_channel_id, incoming_user_channel_id, htlc_forwards.drain(..).collect()
);
self.forward_htlcs_without_forward_event(&mut [pending_forwards]);
for (htlc_fail, htlc_destination) in htlc_fails.drain(..) {
let failure = match htlc_fail {
HTLCFailureMsg::Relay(fail_htlc) => HTLCForwardInfo::FailHTLC {
htlc_id: fail_htlc.htlc_id,
err_packet: fail_htlc.reason,
},
HTLCFailureMsg::Malformed(fail_malformed_htlc) => HTLCForwardInfo::FailMalformedHTLC {
htlc_id: fail_malformed_htlc.htlc_id,
sha256_of_onion: fail_malformed_htlc.sha256_of_onion,
failure_code: fail_malformed_htlc.failure_code,
},
};
self.forward_htlcs.lock().unwrap().entry(incoming_scid).or_default().push(failure);
self.pending_events.lock().unwrap().push_back((events::Event::HTLCHandlingFailed {
prev_channel_id: incoming_channel_id,
failed_next_destination: htlc_destination,
}, None));
}
}
}
pub fn process_pending_htlc_forwards(&self) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.process_pending_update_add_htlcs();
let mut new_events = VecDeque::new();
let mut failed_forwards = Vec::new();
let mut phantom_receives: Vec<(u64, Option<PublicKey>, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
{
let mut forward_htlcs = new_hash_map();
mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
for (short_chan_id, mut pending_forwards) in forward_htlcs {
if short_chan_id != 0 {
let mut forwarding_counterparty = None;
macro_rules! forwarding_channel_not_found {
($forward_infos: expr) => {
for forward_info in $forward_infos {
match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo {
routing, incoming_shared_secret, payment_hash, outgoing_amt_msat,
outgoing_cltv_value, ..
}
}) => {
macro_rules! failure_handler {
($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id), Some(payment_hash));
log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
channel_id: prev_channel_id,
outpoint: prev_funding_outpoint,
counterparty_node_id: prev_counterparty_node_id,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
phantom_shared_secret: $phantom_ss,
blinded_failure: routing.blinded_failure(),
});
let reason = if $next_hop_unknown {
HTLCDestination::UnknownNextHop { requested_forward_scid: short_chan_id }
} else {
HTLCDestination::FailedPayment{ payment_hash }
};
failed_forwards.push((htlc_source, payment_hash,
HTLCFailReason::reason($err_code, $err_data),
reason
));
continue;
}
}
macro_rules! fail_forward {
($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
{
failure_handler!($msg, $err_code, $err_data, $phantom_ss, true);
}
}
}
macro_rules! failed_payment {
($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
{
failure_handler!($msg, $err_code, $err_data, $phantom_ss, false);
}
}
}
if let PendingHTLCRouting::Forward { ref onion_packet, .. } = routing {
let phantom_pubkey_res = self.node_signer.get_node_id(Recipient::PhantomNode);
if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.chain_hash) {
let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes();
let next_hop = match onion_utils::decode_next_payment_hop(
phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac,
payment_hash, None, &*self.node_signer
) {
Ok(res) => res,
Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).to_byte_array();
failed_payment!(err_msg, err_code, sha256_of_onion.to_vec(), None);
},
Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
failed_payment!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
},
};
match next_hop {
onion_utils::Hop::Receive(hop_data) => {
let current_height: u32 = self.best_block.read().unwrap().height;
match create_recv_pending_htlc_info(hop_data,
incoming_shared_secret, payment_hash, outgoing_amt_msat,
outgoing_cltv_value, Some(phantom_shared_secret), false, None,
current_height)
{
Ok(info) => phantom_receives.push((
prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint,
prev_channel_id, prev_user_channel_id, vec![(info, prev_htlc_id)]
)),
Err(InboundHTLCErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
}
},
_ => panic!(),
}
} else {
fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
}
} else {
fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
}
},
HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => {
}
}
}
}
}
let chan_info_opt = self.short_to_chan_info.read().unwrap().get(&short_chan_id).cloned();
let (counterparty_node_id, forward_chan_id) = match chan_info_opt {
Some((cp_id, chan_id)) => (cp_id, chan_id),
None => {
forwarding_channel_not_found!(pending_forwards.drain(..));
continue;
}
};
forwarding_counterparty = Some(counterparty_node_id);
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
if peer_state_mutex_opt.is_none() {
forwarding_channel_not_found!(pending_forwards.drain(..));
continue;
}
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
let mut draining_pending_forwards = pending_forwards.drain(..);
while let Some(forward_info) = draining_pending_forwards.next() {
let queue_fail_htlc_res = match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo {
incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
routing: PendingHTLCRouting::Forward {
ref onion_packet, blinded, ..
}, skimmed_fee_msat, ..
},
}) => {
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
counterparty_node_id: prev_counterparty_node_id,
channel_id: prev_channel_id,
outpoint: prev_funding_outpoint,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
phantom_shared_secret: None,
blinded_failure: blinded.map(|b| b.failure),
});
let next_blinding_point = blinded.and_then(|b| {
b.next_blinding_override.or_else(|| {
let encrypted_tlvs_ss = self.node_signer.ecdh(
Recipient::Node, &b.inbound_blinding_point, None
).unwrap().secret_bytes();
onion_utils::next_hop_pubkey(
&self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss
).ok()
})
});
let maybe_optimal_channel = peer_state.channel_by_id.values_mut().filter_map(|phase| match phase {
ChannelPhase::Funded(chan) => {
let balances = chan.context.get_available_balances(&self.fee_estimator);
if outgoing_amt_msat <= balances.next_outbound_htlc_limit_msat &&
outgoing_amt_msat >= balances.next_outbound_htlc_minimum_msat &&
chan.context.is_usable() {
Some((chan, balances))
} else {
None
}
},
_ => None,
}).min_by_key(|(_, balances)| balances.next_outbound_htlc_limit_msat).map(|(c, _)| c);
let optimal_channel = match maybe_optimal_channel {
Some(chan) => chan,
None => {
if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
chan
} else {
forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
break;
}
}
};
let logger = WithChannelContext::from(&self.logger, &optimal_channel.context, Some(payment_hash));
let channel_description = if optimal_channel.context.get_short_channel_id() == Some(short_chan_id) {
"specified"
} else {
"alternate"
};
log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} channel {} with corresponding peer {}",
prev_short_channel_id, &payment_hash, short_chan_id, channel_description, optimal_channel.context.channel_id(), &counterparty_node_id);
if let Err(e) = optimal_channel.queue_add_htlc(outgoing_amt_msat,
payment_hash, outgoing_cltv_value, htlc_source.clone(),
onion_packet.clone(), skimmed_fee_msat, next_blinding_point, &self.fee_estimator,
&&logger)
{
if let ChannelError::Ignore(msg) = e {
log_trace!(logger, "Failed to forward HTLC with payment_hash {} to peer {}: {}", &payment_hash, &counterparty_node_id, msg);
} else {
panic!("Stated return value requirements in send_htlc() were not met");
}
if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
let failure_code = 0x1000|7;
let data = self.get_htlc_inbound_temp_fail_data(failure_code);
failed_forwards.push((htlc_source, payment_hash,
HTLCFailReason::reason(failure_code, data),
HTLCDestination::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id }
));
} else {
forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
break;
}
}
None
},
HTLCForwardInfo::AddHTLC { .. } => {
panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
},
HTLCForwardInfo::FailHTLC { htlc_id, ref err_packet } => {
if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
Some((chan.queue_fail_htlc(htlc_id, err_packet.clone(), &&logger), htlc_id))
} else {
forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
break;
}
},
HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
let res = chan.queue_fail_malformed_htlc(
htlc_id, failure_code, sha256_of_onion, &&logger
);
Some((res, htlc_id))
} else {
forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
break;
}
},
};
if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
if let Err(e) = queue_fail_htlc_res {
if let ChannelError::Ignore(msg) = e {
if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
}
} else {
panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
}
continue;
}
}
}
} else {
'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo {
routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat,
skimmed_fee_msat, ..
}
}) => {
let blinded_failure = routing.blinded_failure();
let (
cltv_expiry, onion_payload, payment_data, payment_context, phantom_shared_secret,
mut onion_fields, has_recipient_created_payment_secret
) = match routing {
PendingHTLCRouting::Receive {
payment_data, payment_metadata, payment_context,
incoming_cltv_expiry, phantom_shared_secret, custom_tlvs,
requires_blinded_error: _
} => {
let _legacy_hop_data = Some(payment_data.clone());
let onion_fields = RecipientOnionFields { payment_secret: Some(payment_data.payment_secret),
payment_metadata, custom_tlvs };
(incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data },
Some(payment_data), payment_context, phantom_shared_secret, onion_fields,
true)
},
PendingHTLCRouting::ReceiveKeysend {
payment_data, payment_preimage, payment_metadata,
incoming_cltv_expiry, custom_tlvs, requires_blinded_error: _,
has_recipient_created_payment_secret,
} => {
let onion_fields = RecipientOnionFields {
payment_secret: payment_data.as_ref().map(|data| data.payment_secret),
payment_metadata,
custom_tlvs,
};
(incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage),
payment_data, None, None, onion_fields, has_recipient_created_payment_secret)
},
_ => {
panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
}
};
let claimable_htlc = ClaimableHTLC {
prev_hop: HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
counterparty_node_id: prev_counterparty_node_id,
channel_id: prev_channel_id,
outpoint: prev_funding_outpoint,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
phantom_shared_secret,
blinded_failure,
},
value: incoming_amt_msat.unwrap_or(outgoing_amt_msat),
sender_intended_value: outgoing_amt_msat,
timer_ticks: 0,
total_value_received: None,
total_msat: if let Some(data) = &payment_data { data.total_msat } else { outgoing_amt_msat },
cltv_expiry,
onion_payload,
counterparty_skimmed_fee_msat: skimmed_fee_msat,
};
let mut committed_to_claimable = false;
macro_rules! fail_htlc {
($htlc: expr, $payment_hash: expr) => {
debug_assert!(!committed_to_claimable);
let mut htlc_msat_height_data = $htlc.value.to_be_bytes().to_vec();
htlc_msat_height_data.extend_from_slice(
&self.best_block.read().unwrap().height.to_be_bytes(),
);
failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: $htlc.prev_hop.short_channel_id,
user_channel_id: $htlc.prev_hop.user_channel_id,
counterparty_node_id: $htlc.prev_hop.counterparty_node_id,
channel_id: prev_channel_id,
outpoint: prev_funding_outpoint,
htlc_id: $htlc.prev_hop.htlc_id,
incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
phantom_shared_secret,
blinded_failure,
}), payment_hash,
HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data),
HTLCDestination::FailedPayment { payment_hash: $payment_hash },
));
continue 'next_forwardable_htlc;
}
}
let phantom_shared_secret = claimable_htlc.prev_hop.phantom_shared_secret;
let mut receiver_node_id = self.our_network_pubkey;
if phantom_shared_secret.is_some() {
receiver_node_id = self.node_signer.get_node_id(Recipient::PhantomNode)
.expect("Failed to get node_id for phantom node recipient");
}
macro_rules! check_total_value {
($purpose: expr) => {{
let mut payment_claimable_generated = false;
let is_keysend = $purpose.is_keysend();
let mut claimable_payments = self.claimable_payments.lock().unwrap();
if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
fail_htlc!(claimable_htlc, payment_hash);
}
let ref mut claimable_payment = claimable_payments.claimable_payments
.entry(payment_hash)
.or_insert_with(|| {
committed_to_claimable = true;
ClaimablePayment {
purpose: $purpose.clone(), htlcs: Vec::new(), onion_fields: None,
}
});
if $purpose != claimable_payment.purpose {
let log_keysend = |keysend| if keysend { "keysend" } else { "non-keysend" };
log_trace!(self.logger, "Failing new {} HTLC with payment_hash {} as we already had an existing {} HTLC with the same payment hash", log_keysend(is_keysend), &payment_hash, log_keysend(!is_keysend));
fail_htlc!(claimable_htlc, payment_hash);
}
if let Some(earlier_fields) = &mut claimable_payment.onion_fields {
if earlier_fields.check_merge(&mut onion_fields).is_err() {
fail_htlc!(claimable_htlc, payment_hash);
}
} else {
claimable_payment.onion_fields = Some(onion_fields);
}
let mut total_value = claimable_htlc.sender_intended_value;
let mut earliest_expiry = claimable_htlc.cltv_expiry;
for htlc in claimable_payment.htlcs.iter() {
total_value += htlc.sender_intended_value;
earliest_expiry = cmp::min(earliest_expiry, htlc.cltv_expiry);
if htlc.total_msat != claimable_htlc.total_msat {
log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
&payment_hash, claimable_htlc.total_msat, htlc.total_msat);
total_value = msgs::MAX_VALUE_MSAT;
}
if total_value >= msgs::MAX_VALUE_MSAT { break; }
}
if total_value >= msgs::MAX_VALUE_MSAT {
fail_htlc!(claimable_htlc, payment_hash);
} else if total_value - claimable_htlc.sender_intended_value >= claimable_htlc.total_msat {
log_trace!(self.logger, "Failing HTLC with payment_hash {} as payment is already claimable",
&payment_hash);
fail_htlc!(claimable_htlc, payment_hash);
} else if total_value >= claimable_htlc.total_msat {
#[allow(unused_assignments)] {
committed_to_claimable = true;
}
claimable_payment.htlcs.push(claimable_htlc);
let amount_msat =
claimable_payment.htlcs.iter().map(|htlc| htlc.value).sum();
claimable_payment.htlcs.iter_mut()
.for_each(|htlc| htlc.total_value_received = Some(amount_msat));
let counterparty_skimmed_fee_msat = claimable_payment.htlcs.iter()
.map(|htlc| htlc.counterparty_skimmed_fee_msat.unwrap_or(0)).sum();
debug_assert!(total_value.saturating_sub(amount_msat) <=
counterparty_skimmed_fee_msat);
claimable_payment.htlcs.sort();
let payment_id =
claimable_payment.inbound_payment_id(&self.inbound_payment_id_secret);
new_events.push_back((events::Event::PaymentClaimable {
receiver_node_id: Some(receiver_node_id),
payment_hash,
purpose: $purpose,
amount_msat,
counterparty_skimmed_fee_msat,
via_channel_id: Some(prev_channel_id),
via_user_channel_id: Some(prev_user_channel_id),
claim_deadline: Some(earliest_expiry - HTLC_FAIL_BACK_BUFFER),
onion_fields: claimable_payment.onion_fields.clone(),
payment_id: Some(payment_id),
}, None));
payment_claimable_generated = true;
} else {
claimable_payment.htlcs.push(claimable_htlc);
#[allow(unused_assignments)] {
committed_to_claimable = true;
}
}
payment_claimable_generated
}}
}
let payment_preimage = if has_recipient_created_payment_secret {
if let Some(ref payment_data) = payment_data {
let (payment_preimage, min_final_cltv_expiry_delta) = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) {
Ok(result) => result,
Err(()) => {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as payment verification failed", &payment_hash);
fail_htlc!(claimable_htlc, payment_hash);
}
};
if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta {
let expected_min_expiry_height = (self.current_best_block().height + min_final_cltv_expiry_delta as u32) as u64;
if (cltv_expiry as u64) < expected_min_expiry_height {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})",
&payment_hash, cltv_expiry, expected_min_expiry_height);
fail_htlc!(claimable_htlc, payment_hash);
}
}
payment_preimage
} else { fail_htlc!(claimable_htlc, payment_hash); }
} else { None };
match claimable_htlc.onion_payload {
OnionPayload::Invoice { .. } => {
let payment_data = payment_data.unwrap();
let purpose = events::PaymentPurpose::from_parts(
payment_preimage,
payment_data.payment_secret,
payment_context,
);
check_total_value!(purpose);
},
OnionPayload::Spontaneous(preimage) => {
let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
check_total_value!(purpose);
}
}
},
HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => {
panic!("Got pending fail of our own HTLC");
}
}
}
}
}
}
let best_block_height = self.best_block.read().unwrap().height;
self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(),
|| self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
&self.pending_events, &self.logger, |args| self.send_payment_along_path(args));
for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
}
self.forward_htlcs(&mut phantom_receives);
self.check_free_holding_cells();
if new_events.is_empty() { return }
let mut events = self.pending_events.lock().unwrap();
events.append(&mut new_events);
}
fn process_background_events(&self) -> NotifyOption {
debug_assert_ne!(self.total_consistency_lock.held_by_thread(), LockHeldState::NotHeldByThread);
self.background_events_processed_since_startup.store(true, Ordering::Release);
let mut background_events = Vec::new();
mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events);
if background_events.is_empty() {
return NotifyOption::SkipPersistNoEvents;
}
for event in background_events.drain(..) {
match event {
BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, _channel_id, update)) => {
let _ = self.chain_monitor.update_channel(funding_txo, &update);
},
BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
},
BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
} else {
let update_actions = peer_state.monitor_update_blocked_actions
.remove(&channel_id).unwrap_or(Vec::new());
mem::drop(peer_state_lock);
mem::drop(per_peer_state);
self.handle_monitor_update_completion_actions(update_actions);
}
}
},
}
}
NotifyOption::DoPersist
}
#[cfg(any(test, feature = "_test_utils"))]
pub fn test_process_background_events(&self) {
let _lck = self.total_consistency_lock.read().unwrap();
let _ = self.process_background_events();
}
fn update_channel_fee(&self, chan_id: &ChannelId, chan: &mut Channel<SP>, new_feerate: u32) -> NotifyOption {
if !chan.context.is_outbound() { return NotifyOption::SkipPersistNoEvents; }
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
return NotifyOption::SkipPersistNoEvents;
}
if !chan.context.is_live() {
log_trace!(logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
return NotifyOption::SkipPersistNoEvents;
}
log_trace!(logger, "Channel {} qualifies for a feerate change from {} to {}.",
&chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
chan.queue_update_fee(new_feerate, &self.fee_estimator, &&logger);
NotifyOption::DoPersist
}
#[cfg(fuzzing)]
pub fn maybe_update_chan_fees(&self) {
PersistenceNotifierGuard::optionally_notify(self, || {
let mut should_persist = NotifyOption::SkipPersistNoEvents;
let non_anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
let anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee);
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for (chan_id, chan) in peer_state.channel_by_id.iter_mut().filter_map(
|(chan_id, phase)| if let ChannelPhase::Funded(chan) = phase { Some((chan_id, chan)) } else { None }
) {
let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
anchor_feerate
} else {
non_anchor_feerate
};
let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
}
}
should_persist
});
}
pub fn timer_tick_occurred(&self) {
PersistenceNotifierGuard::optionally_notify(self, || {
let mut should_persist = NotifyOption::SkipPersistNoEvents;
let non_anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
let anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee);
let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
let mut timed_out_mpp_htlcs = Vec::new();
let mut pending_peers_awaiting_removal = Vec::new();
let mut shutdown_channels = Vec::new();
macro_rules! process_unfunded_channel_tick {
($peer_state: expr, $chan: expr, $pending_msg_events: expr) => { {
let context = &mut $chan.context;
context.maybe_expire_prev_config();
if $chan.unfunded_context.should_expire_unfunded_channel() {
let logger = WithChannelContext::from(&self.logger, context, None);
log_error!(logger,
"Force-closing pending channel with ID {} for not establishing in a timely manner",
context.channel_id());
let mut close_res = context.force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) });
locked_close_channel!(self, $peer_state, context, close_res);
shutdown_channels.push(close_res);
$pending_msg_events.push(MessageSendEvent::HandleError {
node_id: context.get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage {
msg: msgs::ErrorMessage {
channel_id: context.channel_id(),
data: "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(),
},
},
});
false
} else {
true
}
} }
}
{
let per_peer_state = self.per_peer_state.read().unwrap();
for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
let counterparty_node_id = *counterparty_node_id;
peer_state.channel_by_id.retain(|chan_id, phase| {
match phase {
ChannelPhase::Funded(chan) => {
let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
anchor_feerate
} else {
non_anchor_feerate
};
let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
if let Err(e) = chan.timer_check_closing_negotiation_progress() {
let (needs_close, err) = convert_chan_phase_err!(self, peer_state, e, chan, chan_id, FUNDED_CHANNEL);
handle_errors.push((Err(err), counterparty_node_id));
if needs_close { return false; }
}
match chan.channel_update_status() {
ChannelUpdateStatus::Enabled if !chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)),
ChannelUpdateStatus::Disabled if chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)),
ChannelUpdateStatus::DisabledStaged(_) if chan.context.is_live()
=> chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
ChannelUpdateStatus::EnabledStaged(_) if !chan.context.is_live()
=> chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
ChannelUpdateStatus::DisabledStaged(mut n) if !chan.context.is_live() => {
n += 1;
if n >= DISABLE_GOSSIP_TICKS {
chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
should_persist = NotifyOption::DoPersist;
} else {
chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(n));
}
},
ChannelUpdateStatus::EnabledStaged(mut n) if chan.context.is_live() => {
n += 1;
if n >= ENABLE_GOSSIP_TICKS {
chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
should_persist = NotifyOption::DoPersist;
} else {
chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(n));
}
},
_ => {},
}
chan.context.maybe_expire_prev_config();
if chan.should_disconnect_peer_awaiting_response() {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_debug!(logger, "Disconnecting peer {} due to not making any progress on channel {}",
counterparty_node_id, chan_id);
pending_msg_events.push(MessageSendEvent::HandleError {
node_id: counterparty_node_id,
action: msgs::ErrorAction::DisconnectPeerWithWarning {
msg: msgs::WarningMessage {
channel_id: *chan_id,
data: "Disconnecting due to timeout awaiting response".to_owned(),
},
},
});
}
true
},
ChannelPhase::UnfundedInboundV1(chan) => {
process_unfunded_channel_tick!(peer_state, chan, pending_msg_events)
},
ChannelPhase::UnfundedOutboundV1(chan) => {
process_unfunded_channel_tick!(peer_state, chan, pending_msg_events)
},
ChannelPhase::UnfundedInboundV2(chan) => {
process_unfunded_channel_tick!(peer_state, chan, pending_msg_events)
},
ChannelPhase::UnfundedOutboundV2(chan) => {
process_unfunded_channel_tick!(peer_state, chan, pending_msg_events)
},
}
});
for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() {
if { req.ticks_remaining -= 1 ; req.ticks_remaining } <= 0 {
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*chan_id), None);
log_error!(logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id);
peer_state.pending_msg_events.push(
events::MessageSendEvent::HandleError {
node_id: counterparty_node_id,
action: msgs::ErrorAction::SendErrorMessage {
msg: msgs::ErrorMessage { channel_id: chan_id.clone(), data: "Channel force-closed".to_owned() }
},
}
);
}
}
peer_state.inbound_channel_request_by_id.retain(|_, req| req.ticks_remaining > 0);
if peer_state.ok_to_remove(true) {
pending_peers_awaiting_removal.push(counterparty_node_id);
}
}
}
if pending_peers_awaiting_removal.len() > 0 {
let mut per_peer_state = self.per_peer_state.write().unwrap();
for counterparty_node_id in pending_peers_awaiting_removal {
match per_peer_state.entry(counterparty_node_id) {
hash_map::Entry::Occupied(entry) => {
let remove_entry = {
let peer_state = entry.get().lock().unwrap();
peer_state.ok_to_remove(true)
};
if remove_entry {
entry.remove_entry();
}
},
hash_map::Entry::Vacant(_) => { }
}
}
}
self.claimable_payments.lock().unwrap().claimable_payments.retain(|payment_hash, payment| {
if payment.htlcs.is_empty() {
debug_assert!(false);
return false;
}
if let OnionPayload::Invoice { .. } = payment.htlcs[0].onion_payload {
if payment.htlcs[0].total_msat <= payment.htlcs.iter()
.fold(0, |total, htlc| total + htlc.sender_intended_value)
{
return true;
} else if payment.htlcs.iter_mut().any(|htlc| {
htlc.timer_ticks += 1;
return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
}) {
timed_out_mpp_htlcs.extend(payment.htlcs.drain(..)
.map(|htlc: ClaimableHTLC| (htlc.prev_hop, *payment_hash)));
return false;
}
}
true
});
for htlc_source in timed_out_mpp_htlcs.drain(..) {
let source = HTLCSource::PreviousHopData(htlc_source.0.clone());
let reason = HTLCFailReason::from_failure_code(23);
let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
self.fail_htlc_backwards_internal(&source, &htlc_source.1, &reason, receiver);
}
for (err, counterparty_node_id) in handle_errors.drain(..) {
let _ = handle_error!(self, err, counterparty_node_id);
}
for shutdown_res in shutdown_channels {
self.finish_close_channel(shutdown_res);
}
#[cfg(feature = "std")]
let duration_since_epoch = std::time::SystemTime::now()
.duration_since(std::time::SystemTime::UNIX_EPOCH)
.expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
#[cfg(not(feature = "std"))]
let duration_since_epoch = Duration::from_secs(
self.highest_seen_timestamp.load(Ordering::Acquire).saturating_sub(7200) as u64
);
self.pending_outbound_payments.remove_stale_payments(
duration_since_epoch, &self.pending_events
);
if self.check_free_holding_cells() {
should_persist = NotifyOption::DoPersist;
}
should_persist
});
}
pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
self.fail_htlc_backwards_with_reason(payment_hash, FailureCode::IncorrectOrUnknownPaymentDetails);
}
pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: FailureCode) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let removed_source = self.claimable_payments.lock().unwrap().claimable_payments.remove(payment_hash);
if let Some(payment) = removed_source {
for htlc in payment.htlcs {
let reason = self.get_htlc_fail_reason_from_failure_code(failure_code, &htlc);
let source = HTLCSource::PreviousHopData(htlc.prev_hop);
let receiver = HTLCDestination::FailedPayment { payment_hash: *payment_hash };
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
}
}
fn get_htlc_fail_reason_from_failure_code(&self, failure_code: FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason {
match failure_code {
FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(failure_code.into()),
FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code.into()),
FailureCode::IncorrectOrUnknownPaymentDetails => {
let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes());
HTLCFailReason::reason(failure_code.into(), htlc_msat_height_data)
},
FailureCode::InvalidOnionPayload(data) => {
let fail_data = match data {
Some((typ, offset)) => [BigSize(typ).encode(), offset.encode()].concat(),
None => Vec::new(),
};
HTLCFailReason::reason(failure_code.into(), fail_data)
}
}
}
fn get_htlc_inbound_temp_fail_data(&self, err_code: u16) -> Vec<u8> {
debug_assert_eq!(err_code & 0x1000, 0x1000);
debug_assert_ne!(err_code, 0x1000|11);
debug_assert_ne!(err_code, 0x1000|12);
debug_assert_ne!(err_code, 0x1000|13);
let mut enc = VecWriter(Vec::with_capacity(4));
if err_code == 0x1000 | 20 {
0u16.write(&mut enc).expect("Writes cannot fail");
}
(0u16).write(&mut enc).expect("Writes cannot fail");
enc.0
}
fn fail_holding_cell_htlcs(
&self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: ChannelId,
counterparty_node_id: &PublicKey
) {
let (failure_code, onion_failure_data) = {
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(channel_id) {
hash_map::Entry::Occupied(chan_phase_entry) => {
if let ChannelPhase::Funded(_chan) = chan_phase_entry.get() {
let failure_code = 0x1000|7;
let data = self.get_htlc_inbound_temp_fail_data(failure_code);
(failure_code, data)
} else {
debug_assert!(false);
(0x4000|10, Vec::new())
}
},
hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
}
} else { (0x4000|10, Vec::new()) }
};
for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
let reason = HTLCFailReason::reason(failure_code, onion_failure_data.clone());
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id };
self.fail_htlc_backwards_internal(&htlc_src, &payment_hash, &reason, receiver);
}
}
fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
let push_forward_event = self.fail_htlc_backwards_internal_without_forward_event(source, payment_hash, onion_error, destination);
if push_forward_event { self.push_pending_forwards_ev(); }
}
fn fail_htlc_backwards_internal_without_forward_event(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) -> bool {
#[cfg(debug_assertions)]
for (_, peer) in self.per_peer_state.read().unwrap().iter() {
debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
}
let mut push_forward_event;
match source {
HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => {
push_forward_event = self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path,
session_priv, payment_id, self.probing_cookie_secret, &self.secp_ctx,
&self.pending_events, &self.logger);
},
HTLCSource::PreviousHopData(HTLCPreviousHopData {
ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret,
ref phantom_shared_secret, outpoint: _, ref blinded_failure, ref channel_id, ..
}) => {
log_trace!(
WithContext::from(&self.logger, None, Some(*channel_id), Some(*payment_hash)),
"Failing {}HTLC with payment_hash {} backwards from us: {:?}",
if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error
);
let failure = match blinded_failure {
Some(BlindedFailure::FromIntroductionNode) => {
let blinded_onion_error = HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32]);
let err_packet = blinded_onion_error.get_encrypted_failure_packet(
incoming_packet_shared_secret, phantom_shared_secret
);
HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }
},
Some(BlindedFailure::FromBlindedNode) => {
HTLCForwardInfo::FailMalformedHTLC {
htlc_id: *htlc_id,
failure_code: INVALID_ONION_BLINDING,
sha256_of_onion: [0; 32]
}
},
None => {
let err_packet = onion_error.get_encrypted_failure_packet(
incoming_packet_shared_secret, phantom_shared_secret
);
HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }
}
};
push_forward_event = self.decode_update_add_htlcs.lock().unwrap().is_empty();
let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
push_forward_event &= forward_htlcs.is_empty();
match forward_htlcs.entry(*short_channel_id) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().push(failure);
},
hash_map::Entry::Vacant(entry) => {
entry.insert(vec!(failure));
}
}
mem::drop(forward_htlcs);
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push_back((events::Event::HTLCHandlingFailed {
prev_channel_id: *channel_id,
failed_next_destination: destination,
}, None));
},
}
push_forward_event
}
pub fn claim_funds(&self, payment_preimage: PaymentPreimage) {
self.claim_payment_internal(payment_preimage, false);
}
pub fn claim_funds_with_known_custom_tlvs(&self, payment_preimage: PaymentPreimage) {
self.claim_payment_internal(payment_preimage, true);
}
fn claim_payment_internal(&self, payment_preimage: PaymentPreimage, custom_tlvs_known: bool) {
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).to_byte_array());
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let (sources, claiming_payment) = {
let res = self.claimable_payments.lock().unwrap().begin_claiming_payment(
payment_hash, &self.node_signer, &self.logger, &self.inbound_payment_id_secret,
custom_tlvs_known,
);
match res {
Ok((htlcs, payment_info)) => (htlcs, payment_info),
Err(htlcs) => {
for htlc in htlcs {
let reason = self.get_htlc_fail_reason_from_failure_code(FailureCode::InvalidOnionPayload(None), &htlc);
let source = HTLCSource::PreviousHopData(htlc.prev_hop);
let receiver = HTLCDestination::FailedPayment { payment_hash };
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
return;
}
}
};
debug_assert!(!sources.is_empty());
let mut claimable_amt_msat = 0;
let mut prev_total_msat = None;
let mut expected_amt_msat = None;
let mut valid_mpp = true;
let mut errs = Vec::new();
let per_peer_state = self.per_peer_state.read().unwrap();
for htlc in sources.iter() {
if prev_total_msat.is_some() && prev_total_msat != Some(htlc.total_msat) {
log_error!(self.logger, "Somehow ended up with an MPP payment with different expected total amounts - this should not be reachable!");
debug_assert!(false);
valid_mpp = false;
break;
}
prev_total_msat = Some(htlc.total_msat);
if expected_amt_msat.is_some() && expected_amt_msat != htlc.total_value_received {
log_error!(self.logger, "Somehow ended up with an MPP payment with different received total amounts - this should not be reachable!");
debug_assert!(false);
valid_mpp = false;
break;
}
expected_amt_msat = htlc.total_value_received;
claimable_amt_msat += htlc.value;
}
mem::drop(per_peer_state);
if sources.is_empty() || expected_amt_msat.is_none() {
self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!");
return;
}
if claimable_amt_msat != expected_amt_msat.unwrap() {
self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.",
expected_amt_msat.unwrap(), claimable_amt_msat);
return;
}
if valid_mpp {
let mpp_parts: Vec<_> = sources.iter().filter_map(|htlc| {
if let Some(cp_id) = htlc.prev_hop.counterparty_node_id {
Some(MPPClaimHTLCSource {
counterparty_node_id: cp_id,
funding_txo: htlc.prev_hop.outpoint,
channel_id: htlc.prev_hop.channel_id,
htlc_id: htlc.prev_hop.htlc_id,
})
} else {
None
}
}).collect();
let pending_mpp_claim_ptr_opt = if sources.len() > 1 {
Some(Arc::new(Mutex::new(PendingMPPClaim {
channels_without_preimage: mpp_parts.clone(),
channels_with_preimage: Vec::new(),
})))
} else {
None
};
let payment_info = Some(PaymentClaimDetails { mpp_parts, claiming_payment });
for htlc in sources {
let this_mpp_claim = pending_mpp_claim_ptr_opt.as_ref().and_then(|pending_mpp_claim|
if let Some(cp_id) = htlc.prev_hop.counterparty_node_id {
let claim_ptr = PendingMPPClaimPointer(Arc::clone(pending_mpp_claim));
Some((cp_id, htlc.prev_hop.channel_id, htlc.prev_hop.htlc_id, claim_ptr))
} else {
None
}
);
let raa_blocker = pending_mpp_claim_ptr_opt.as_ref().map(|pending_claim| {
RAAMonitorUpdateBlockingAction::ClaimedMPPPayment {
pending_claim: PendingMPPClaimPointer(Arc::clone(pending_claim)),
}
});
self.claim_funds_from_hop(
htlc.prev_hop, payment_preimage, payment_info.clone(),
|_, definitely_duplicate| {
debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment");
(Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim: this_mpp_claim }), raa_blocker)
}
);
}
} else {
for htlc in sources {
let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes());
let source = HTLCSource::PreviousHopData(htlc.prev_hop);
let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data);
let receiver = HTLCDestination::FailedPayment { payment_hash };
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
}
for (counterparty_node_id, err) in errs.drain(..) {
let res: Result<(), _> = Err(err);
let _ = handle_error!(self, res, counterparty_node_id);
}
}
fn claim_funds_from_hop<
ComplFunc: FnOnce(Option<u64>, bool) -> (Option<MonitorUpdateCompletionAction>, Option<RAAMonitorUpdateBlockingAction>)
>(
&self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage,
payment_info: Option<PaymentClaimDetails>, completion_action: ComplFunc,
) {
let counterparty_node_id = prev_hop.counterparty_node_id.or_else(|| {
let short_to_chan_info = self.short_to_chan_info.read().unwrap();
short_to_chan_info.get(&prev_hop.short_channel_id).map(|(cp_id, _)| *cp_id)
});
let htlc_source = HTLCClaimSource {
counterparty_node_id,
funding_txo: prev_hop.outpoint,
channel_id: prev_hop.channel_id,
htlc_id: prev_hop.htlc_id,
};
self.claim_mpp_part(htlc_source, payment_preimage, payment_info, completion_action)
}
fn claim_mpp_part<
ComplFunc: FnOnce(Option<u64>, bool) -> (Option<MonitorUpdateCompletionAction>, Option<RAAMonitorUpdateBlockingAction>)
>(
&self, prev_hop: HTLCClaimSource, payment_preimage: PaymentPreimage,
payment_info: Option<PaymentClaimDetails>, completion_action: ComplFunc,
) {
let during_init = !self.background_events_processed_since_startup.load(Ordering::Acquire);
debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread);
let per_peer_state = self.per_peer_state.read().unwrap();
let chan_id = prev_hop.channel_id;
const MISSING_MON_ERROR: &'static str =
"If we're going to claim an HTLC against a channel, we should always have *some* state for the channel, even if just the latest ChannelMonitor update_id. This failure indicates we need to claim an HTLC from a channel for which we did not have a ChannelMonitor at startup and didn't create one while running.";
let mut peer_state_opt = prev_hop.counterparty_node_id.as_ref().map(
|counterparty_node_id| per_peer_state.get(counterparty_node_id)
.map(|peer_mutex| peer_mutex.lock().unwrap())
.expect(MISSING_MON_ERROR)
);
if let Some(peer_state_lock) = peer_state_opt.as_mut() {
let peer_state = &mut **peer_state_lock;
if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(chan_id) {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, payment_info, &&logger);
match fulfill_res {
UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } => {
let (action_opt, raa_blocker_opt) = completion_action(Some(htlc_value_msat), false);
if let Some(action) = action_opt {
log_trace!(logger, "Tracking monitor update completion action for channel {}: {:?}",
chan_id, action);
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
}
if let Some(raa_blocker) = raa_blocker_opt {
peer_state.actions_blocking_raa_monitor_updates.entry(chan_id).or_insert_with(Vec::new).push(raa_blocker);
}
handle_new_monitor_update!(self, prev_hop.funding_txo, monitor_update, peer_state_opt,
peer_state, per_peer_state, chan);
}
UpdateFulfillCommitFetch::DuplicateClaim {} => {
let (action_opt, raa_blocker_opt) = completion_action(None, true);
if let Some(raa_blocker) = raa_blocker_opt {
debug_assert!(during_init ||
peer_state.actions_blocking_raa_monitor_updates.get(&chan_id).unwrap().contains(&raa_blocker));
}
let action = if let Some(action) = action_opt {
action
} else {
return;
};
mem::drop(peer_state_opt);
log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
chan_id, action);
if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
downstream_counterparty_node_id: node_id,
downstream_funding_outpoint: _,
blocking_action: blocker, downstream_channel_id: channel_id,
} = action {
if let Some(peer_state_mtx) = per_peer_state.get(&node_id) {
let mut peer_state = peer_state_mtx.lock().unwrap();
if let Some(blockers) = peer_state
.actions_blocking_raa_monitor_updates
.get_mut(&channel_id)
{
let mut found_blocker = false;
blockers.retain(|iter| {
let first_blocker = !found_blocker;
if *iter == blocker { found_blocker = true; }
*iter != blocker || !first_blocker
});
debug_assert!(found_blocker);
}
} else {
debug_assert!(false);
}
} else if matches!(action, MonitorUpdateCompletionAction::PaymentClaimed { .. }) {
debug_assert!(during_init,
"Duplicate claims should always either be for forwarded payments(freeing another channel immediately) or during init (for claim replay)");
mem::drop(per_peer_state);
self.handle_monitor_update_completion_actions([action]);
} else {
debug_assert!(false,
"Duplicate claims should always either be for forwarded payments(freeing another channel immediately) or during init (for claim replay)");
return;
};
}
}
}
return;
}
}
if prev_hop.counterparty_node_id.is_none() {
let payment_hash: PaymentHash = payment_preimage.into();
panic!(
"Prior to upgrading to LDK 0.1, all pending HTLCs forwarded by LDK 0.0.123 or before must be resolved. It appears at least the HTLC with payment_hash {} (preimage {}) was not resolved. Please downgrade to LDK 0.0.125 and resolve the HTLC prior to upgrading.",
payment_hash,
payment_preimage,
);
}
let counterparty_node_id = prev_hop.counterparty_node_id.expect("Checked immediately above");
let mut peer_state = peer_state_opt.expect("peer_state_opt is always Some when the counterparty_node_id is Some");
let update_id = if let Some(latest_update_id) = peer_state.closed_channel_monitor_update_ids.get_mut(&chan_id) {
*latest_update_id = latest_update_id.saturating_add(1);
*latest_update_id
} else {
let err = "We need the latest ChannelMonitorUpdate ID to build a new update.
This should have been checked for availability on startup but somehow it is no longer available.
This indicates a bug inside LDK. Please report this error at https://github.com/lightningdevkit/rust-lightning/issues/new";
log_error!(self.logger, "{}", err);
panic!("{}", err);
};
let preimage_update = ChannelMonitorUpdate {
update_id,
counterparty_node_id: Some(counterparty_node_id),
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
payment_preimage,
payment_info,
}],
channel_id: Some(prev_hop.channel_id),
};
let (action_opt, raa_blocker_opt) = completion_action(None, false);
if let Some(raa_blocker) = raa_blocker_opt {
peer_state.actions_blocking_raa_monitor_updates
.entry(prev_hop.channel_id)
.or_default()
.push(raa_blocker);
}
let payment_hash = payment_preimage.into();
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(chan_id), Some(payment_hash));
if let Some(action) = action_opt {
log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
chan_id, action);
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
}
handle_new_monitor_update!(
self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state,
counterparty_node_id, chan_id, POST_CHANNEL_CLOSE
);
}
fn finalize_claims(&self, sources: Vec<HTLCSource>) {
self.pending_outbound_payments.finalize_claims(sources, &self.pending_events);
}
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
forwarded_htlc_value_msat: Option<u64>, skimmed_fee_msat: Option<u64>, from_onchain: bool,
startup_replay: bool, next_channel_counterparty_node_id: Option<PublicKey>,
next_channel_outpoint: OutPoint, next_channel_id: ChannelId, next_user_channel_id: Option<u128>,
) {
match source {
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire),
"We don't support claim_htlc claims during startup - monitors may not be available yet");
if let Some(pubkey) = next_channel_counterparty_node_id {
debug_assert_eq!(pubkey, path.hops[0].pubkey);
}
let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint: next_channel_outpoint, channel_id: next_channel_id,
counterparty_node_id: path.hops[0].pubkey,
};
self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage,
session_priv, path, from_onchain, ev_completion_action, &self.pending_events,
&self.logger);
},
HTLCSource::PreviousHopData(hop_data) => {
let prev_channel_id = hop_data.channel_id;
let prev_user_channel_id = hop_data.user_channel_id;
let prev_node_id = hop_data.counterparty_node_id;
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
self.claim_funds_from_hop(hop_data, payment_preimage, None,
|htlc_claim_value_msat, definitely_duplicate| {
let chan_to_release =
if let Some(node_id) = next_channel_counterparty_node_id {
Some(EventUnblockedChannel {
counterparty_node_id: node_id,
funding_txo: next_channel_outpoint,
channel_id: next_channel_id,
blocking_action: completed_blocker
})
} else {
None
};
if definitely_duplicate && startup_replay {
(None, None)
} else if definitely_duplicate {
if let Some(other_chan) = chan_to_release {
(Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
downstream_counterparty_node_id: other_chan.counterparty_node_id,
downstream_funding_outpoint: other_chan.funding_txo,
downstream_channel_id: other_chan.channel_id,
blocking_action: other_chan.blocking_action,
}), None)
} else { (None, None) }
} else {
let total_fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
if let Some(claimed_htlc_value) = htlc_claim_value_msat {
Some(claimed_htlc_value - forwarded_htlc_value)
} else { None }
} else { None };
debug_assert!(skimmed_fee_msat <= total_fee_earned_msat,
"skimmed_fee_msat must always be included in total_fee_earned_msat");
(Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
event: events::Event::PaymentForwarded {
prev_channel_id: Some(prev_channel_id),
next_channel_id: Some(next_channel_id),
prev_user_channel_id,
next_user_channel_id,
prev_node_id,
next_node_id: next_channel_counterparty_node_id,
total_fee_earned_msat,
skimmed_fee_msat,
claim_from_onchain_tx: from_onchain,
outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
},
downstream_counterparty_and_funding_outpoint: chan_to_release,
}), None)
}
});
},
}
}
pub fn get_our_node_id(&self) -> PublicKey {
self.our_network_pubkey
}
fn handle_monitor_update_completion_actions<I: IntoIterator<Item=MonitorUpdateCompletionAction>>(&self, actions: I) {
debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread);
debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
let mut freed_channels = Vec::new();
for action in actions.into_iter() {
match action {
MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim } => {
if let Some((counterparty_node_id, chan_id, htlc_id, claim_ptr)) = pending_mpp_claim {
let per_peer_state = self.per_peer_state.read().unwrap();
per_peer_state.get(&counterparty_node_id).map(|peer_state_mutex| {
let mut peer_state = peer_state_mutex.lock().unwrap();
let blockers_entry = peer_state.actions_blocking_raa_monitor_updates.entry(chan_id);
if let btree_map::Entry::Occupied(mut blockers) = blockers_entry {
blockers.get_mut().retain(|blocker|
if let &RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { pending_claim } = &blocker {
if *pending_claim == claim_ptr {
let mut pending_claim_state_lock = pending_claim.0.lock().unwrap();
let pending_claim_state = &mut *pending_claim_state_lock;
pending_claim_state.channels_without_preimage.retain(|htlc_info| {
let this_claim =
htlc_info.counterparty_node_id == counterparty_node_id
&& htlc_info.channel_id == chan_id
&& htlc_info.htlc_id == htlc_id;
if this_claim {
pending_claim_state.channels_with_preimage.push(htlc_info.clone());
false
} else { true }
});
if pending_claim_state.channels_without_preimage.is_empty() {
for htlc_info in pending_claim_state.channels_with_preimage.iter() {
let freed_chan = (
htlc_info.counterparty_node_id,
htlc_info.funding_txo,
htlc_info.channel_id,
blocker.clone()
);
freed_channels.push(freed_chan);
}
}
!pending_claim_state.channels_without_preimage.is_empty()
} else { true }
} else { true }
);
if blockers.get().is_empty() {
blockers.remove();
}
}
});
}
let payment = self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
if let Some(ClaimingPayment {
amount_msat,
payment_purpose: purpose,
receiver_node_id,
htlcs,
sender_intended_value: sender_intended_total_msat,
onion_fields,
payment_id,
}) = payment {
let event = events::Event::PaymentClaimed {
payment_hash,
purpose,
amount_msat,
receiver_node_id: Some(receiver_node_id),
htlcs,
sender_intended_total_msat,
onion_fields,
payment_id,
};
let event_action = (event, None);
let mut pending_events = self.pending_events.lock().unwrap();
if !pending_events.contains(&event_action) {
pending_events.push_back(event_action);
}
}
},
MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
event, downstream_counterparty_and_funding_outpoint
} => {
self.pending_events.lock().unwrap().push_back((event, None));
if let Some(unblocked) = downstream_counterparty_and_funding_outpoint {
self.handle_monitor_update_release(
unblocked.counterparty_node_id, unblocked.funding_txo,
unblocked.channel_id, Some(unblocked.blocking_action),
);
}
},
MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
downstream_counterparty_node_id, downstream_funding_outpoint, downstream_channel_id, blocking_action,
} => {
self.handle_monitor_update_release(
downstream_counterparty_node_id,
downstream_funding_outpoint,
downstream_channel_id,
Some(blocking_action),
);
},
}
}
for (node_id, funding_outpoint, channel_id, blocker) in freed_channels {
self.handle_monitor_update_release(node_id, funding_outpoint, channel_id, Some(blocker));
}
}
fn handle_channel_resumption(&self, pending_msg_events: &mut Vec<MessageSendEvent>,
channel: &mut Channel<SP>, raa: Option<msgs::RevokeAndACK>,
commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_update_adds: Vec<msgs::UpdateAddHTLC>,
funding_broadcastable: Option<Transaction>,
channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>,
tx_signatures: Option<msgs::TxSignatures>
) -> (Option<(u64, Option<PublicKey>, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec<msgs::UpdateAddHTLC>)>) {
let logger = WithChannelContext::from(&self.logger, &channel.context, None);
log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures",
&channel.context.channel_id(),
if raa.is_some() { "an" } else { "no" },
if commitment_update.is_some() { "a" } else { "no" },
pending_forwards.len(), pending_update_adds.len(),
if funding_broadcastable.is_some() { "" } else { "not " },
if channel_ready.is_some() { "sending" } else { "without" },
if announcement_sigs.is_some() { "sending" } else { "without" },
if tx_signatures.is_some() { "sending" } else { "without" },
);
let counterparty_node_id = channel.context.get_counterparty_node_id();
let short_channel_id = channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias());
let mut htlc_forwards = None;
if !pending_forwards.is_empty() {
htlc_forwards = Some((
short_channel_id, Some(channel.context.get_counterparty_node_id()),
channel.context.get_funding_txo().unwrap(), channel.context.channel_id(),
channel.context.get_user_id(), pending_forwards
));
}
let mut decode_update_add_htlcs = None;
if !pending_update_adds.is_empty() {
decode_update_add_htlcs = Some((short_channel_id, pending_update_adds));
}
if let Some(msg) = channel_ready {
send_channel_ready!(self, pending_msg_events, channel, msg);
}
if let Some(msg) = announcement_sigs {
pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
node_id: counterparty_node_id,
msg,
});
}
if let Some(msg) = tx_signatures {
pending_msg_events.push(events::MessageSendEvent::SendTxSignatures {
node_id: counterparty_node_id,
msg,
});
}
macro_rules! handle_cs { () => {
if let Some(update) = commitment_update {
pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
node_id: counterparty_node_id,
updates: update,
});
}
} }
macro_rules! handle_raa { () => {
if let Some(revoke_and_ack) = raa {
pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
node_id: counterparty_node_id,
msg: revoke_and_ack,
});
}
} }
match order {
RAACommitmentOrder::CommitmentFirst => {
handle_cs!();
handle_raa!();
},
RAACommitmentOrder::RevokeAndACKFirst => {
handle_raa!();
handle_cs!();
},
}
if let Some(tx) = funding_broadcastable {
if channel.context.is_manual_broadcast() {
log_info!(logger, "Not broadcasting funding transaction with txid {} as it is manually managed", tx.compute_txid());
let mut pending_events = self.pending_events.lock().unwrap();
match channel.context.get_funding_txo() {
Some(funding_txo) => {
emit_funding_tx_broadcast_safe_event!(pending_events, channel, funding_txo.into_bitcoin_outpoint())
},
None => {
debug_assert!(false, "Channel resumed without a funding txo, this should never happen!");
return (htlc_forwards, decode_update_add_htlcs);
}
};
} else {
log_info!(logger, "Broadcasting funding transaction with txid {}", tx.compute_txid());
self.tx_broadcaster.broadcast_transactions(&[&tx]);
}
}
{
let mut pending_events = self.pending_events.lock().unwrap();
emit_channel_pending_event!(pending_events, channel);
emit_channel_ready_event!(pending_events, channel);
}
(htlc_forwards, decode_update_add_htlcs)
}
fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
debug_assert!(self.total_consistency_lock.try_write().is_err());
let counterparty_node_id = match counterparty_node_id {
Some(cp_id) => cp_id.clone(),
None => {
let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
match outpoint_to_peer.get(funding_txo) {
Some(cp_id) => cp_id.clone(),
None => return,
}
}
};
let per_peer_state = self.per_peer_state.read().unwrap();
let mut peer_state_lock;
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
if peer_state_mutex_opt.is_none() { return }
peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
let remaining_in_flight =
if let Some(pending) = peer_state.in_flight_monitor_updates.get_mut(funding_txo) {
pending.retain(|upd| upd.update_id > highest_applied_update_id);
pending.len()
} else { 0 };
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*channel_id), None);
log_trace!(logger, "ChannelMonitor updated to {}. {} pending in-flight updates.",
highest_applied_update_id, remaining_in_flight);
if remaining_in_flight != 0 {
return;
}
if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(channel_id) {
if chan.is_awaiting_monitor_update() {
log_trace!(logger, "Channel is open and awaiting update, resuming it");
handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
} else {
log_trace!(logger, "Channel is open but not awaiting update");
}
} else {
let update_actions = peer_state.monitor_update_blocked_actions
.remove(channel_id).unwrap_or(Vec::new());
log_trace!(logger, "Channel is closed, applying {} post-update actions", update_actions.len());
mem::drop(peer_state_lock);
mem::drop(per_peer_state);
self.handle_monitor_update_completion_actions(update_actions);
}
}
pub fn accept_inbound_channel(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, false, user_channel_id, vec![], Weight::from_wu(0))
}
pub fn accept_inbound_channel_from_trusted_peer_0conf(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, true, user_channel_id, vec![], Weight::from_wu(0))
}
fn do_accept_inbound_channel(
&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool,
user_channel_id: u128, _funding_inputs: Vec<(TxIn, TransactionU16LenLimited)>,
_total_witness_weight: Weight,
) -> Result<(), APIError> {
let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id), None);
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let peers_without_funded_channels =
self.peers_without_funded_channels(|peer| { peer.total_channel_count() > 0 });
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
let err_str = format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id);
log_error!(logger, "{}", err_str);
APIError::ChannelUnavailable { err: err_str }
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let is_only_peer_channel = peer_state.total_channel_count() == 1;
let res = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) {
Some(unaccepted_channel) => {
let best_block_height = self.best_block.read().unwrap().height;
match unaccepted_channel.open_channel_msg {
OpenChannelMessage::V1(open_channel_msg) => {
InboundV1Channel::new(
&self.fee_estimator, &self.entropy_source, &self.signer_provider, *counterparty_node_id,
&self.channel_type_features(), &peer_state.latest_features, &open_channel_msg,
user_channel_id, &self.default_configuration, best_block_height, &self.logger, accept_0conf
).map_err(|err| MsgHandleErrInternal::from_chan_no_close(err, *temporary_channel_id)
).map(|mut channel| {
let logger = WithChannelContext::from(&self.logger, &channel.context, None);
let message_send_event = channel.accept_inbound_channel(&&logger).map(|msg| {
events::MessageSendEvent::SendAcceptChannel {
node_id: *counterparty_node_id,
msg,
}
});
(*temporary_channel_id, ChannelPhase::UnfundedInboundV1(channel), message_send_event)
})
},
#[cfg(dual_funding)]
OpenChannelMessage::V2(open_channel_msg) => {
InboundV2Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
self.get_our_node_id(), *counterparty_node_id, &self.channel_type_features(), &peer_state.latest_features,
&open_channel_msg, _funding_inputs, _total_witness_weight, user_channel_id,
&self.default_configuration, best_block_height, &self.logger
).map_err(|_| MsgHandleErrInternal::from_chan_no_close(
ChannelError::Close(
(
"V2 channel rejected due to sender error".into(),
ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) },
)
), *temporary_channel_id)
).map(|channel| {
let message_send_event = events::MessageSendEvent::SendAcceptChannelV2 {
node_id: channel.context.get_counterparty_node_id(),
msg: channel.accept_inbound_dual_funded_channel()
};
(channel.context.channel_id(), ChannelPhase::UnfundedInboundV2(channel), Some(message_send_event))
})
},
}
},
None => {
let err_str = "No such channel awaiting to be accepted.".to_owned();
log_error!(logger, "{}", err_str);
return Err(APIError::APIMisuseError { err: err_str });
}
};
let (channel_id, mut channel_phase, message_send_event) = match res {
Ok(res) => res,
Err(err) => {
mem::drop(peer_state_lock);
mem::drop(per_peer_state);
match handle_error!(self, Result::<(), MsgHandleErrInternal>::Err(err), *counterparty_node_id) {
Ok(_) => unreachable!("`handle_error` only returns Err as we've passed in an Err"),
Err(e) => {
return Err(APIError::ChannelUnavailable { err: e.err });
},
}
}
};
if accept_0conf {
debug_assert!(channel_phase.context().minimum_depth().unwrap() == 0);
} else if channel_phase.context().get_channel_type().requires_zero_conf() {
let send_msg_err_event = events::MessageSendEvent::HandleError {
node_id: channel_phase.context().get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage{
msg: msgs::ErrorMessage { channel_id: *temporary_channel_id, data: "No zero confirmation channels accepted".to_owned(), }
}
};
peer_state.pending_msg_events.push(send_msg_err_event);
let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
log_error!(logger, "{}", err_str);
return Err(APIError::APIMisuseError { err: err_str });
} else {
if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
let send_msg_err_event = events::MessageSendEvent::HandleError {
node_id: channel_phase.context().get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage{
msg: msgs::ErrorMessage { channel_id: *temporary_channel_id, data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
}
};
peer_state.pending_msg_events.push(send_msg_err_event);
let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
log_error!(logger, "{}", err_str);
return Err(APIError::APIMisuseError { err: err_str });
}
}
let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
channel_phase.context_mut().set_outbound_scid_alias(outbound_scid_alias);
if let Some(message_send_event) = message_send_event {
peer_state.pending_msg_events.push(message_send_event);
}
peer_state.channel_by_id.insert(channel_id, channel_phase);
Ok(())
}
fn peers_without_funded_channels<Filter>(&self, maybe_count_peer: Filter) -> usize
where Filter: Fn(&PeerState<SP>) -> bool {
let mut peers_without_funded_channels = 0;
let best_block_height = self.best_block.read().unwrap().height;
{
let peer_state_lock = self.per_peer_state.read().unwrap();
for (_, peer_mtx) in peer_state_lock.iter() {
let peer = peer_mtx.lock().unwrap();
if !maybe_count_peer(&*peer) { continue; }
let num_unfunded_channels = Self::unfunded_channel_count(&peer, best_block_height);
if num_unfunded_channels == peer.total_channel_count() {
peers_without_funded_channels += 1;
}
}
}
return peers_without_funded_channels;
}
fn unfunded_channel_count(
peer: &PeerState<SP>, best_block_height: u32
) -> usize {
let mut num_unfunded_channels = 0;
for (_, phase) in peer.channel_by_id.iter() {
match phase {
ChannelPhase::Funded(chan) => {
if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 &&
chan.context.get_funding_tx_confirmations(best_block_height) == 0
{
num_unfunded_channels += 1;
}
},
ChannelPhase::UnfundedInboundV1(chan) => {
if chan.context.minimum_depth().unwrap_or(1) != 0 {
num_unfunded_channels += 1;
}
},
ChannelPhase::UnfundedInboundV2(chan) => {
if chan.context.minimum_depth().unwrap_or(1) != 0 &&
chan.dual_funding_context.our_funding_satoshis == 0 {
num_unfunded_channels += 1;
}
},
ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedOutboundV2(_) => {
continue;
},
}
}
num_unfunded_channels + peer.inbound_channel_request_by_id.len()
}
fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: OpenChannelMessageRef<'_>) -> Result<(), MsgHandleErrInternal> {
let common_fields = match msg {
OpenChannelMessageRef::V1(msg) => &msg.common_fields,
#[cfg(dual_funding)]
OpenChannelMessageRef::V2(msg) => &msg.common_fields,
};
if common_fields.chain_hash != self.chain_hash {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(),
common_fields.temporary_channel_id));
}
if !self.default_configuration.accept_inbound_channels {
return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(),
common_fields.temporary_channel_id));
}
let channeled_peers_without_funding =
self.peers_without_funded_channels(|node| node.total_channel_count() > 0);
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(
format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
common_fields.temporary_channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if peer_state.total_channel_count() == 0 &&
channeled_peers_without_funding >= MAX_UNFUNDED_CHANNEL_PEERS &&
!self.default_configuration.manually_accept_inbound_channels
{
return Err(MsgHandleErrInternal::send_err_msg_no_close(
"Have too many peers with unfunded channels, not accepting new ones".to_owned(),
common_fields.temporary_channel_id));
}
let best_block_height = self.best_block.read().unwrap().height;
if Self::unfunded_channel_count(peer_state, best_block_height) >= MAX_UNFUNDED_CHANS_PER_PEER {
return Err(MsgHandleErrInternal::send_err_msg_no_close(
format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER),
common_fields.temporary_channel_id));
}
let channel_id = common_fields.temporary_channel_id;
let channel_exists = peer_state.has_channel(&channel_id);
if channel_exists {
return Err(MsgHandleErrInternal::send_err_msg_no_close(
"temporary_channel_id collision for the same peer!".to_owned(),
common_fields.temporary_channel_id));
}
let channel_type = channel::channel_type_from_open_channel(
common_fields, &peer_state.latest_features, &self.channel_type_features()
).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, common_fields.temporary_channel_id))?;
if self.default_configuration.manually_accept_inbound_channels {
let mut pending_events = self.pending_events.lock().unwrap();
let is_announced = (common_fields.channel_flags & 1) == 1;
pending_events.push_back((events::Event::OpenChannelRequest {
temporary_channel_id: common_fields.temporary_channel_id,
counterparty_node_id: *counterparty_node_id,
funding_satoshis: common_fields.funding_satoshis,
channel_negotiation_type: match msg {
OpenChannelMessageRef::V1(msg) => InboundChannelFunds::PushMsat(msg.push_msat),
#[cfg(dual_funding)]
OpenChannelMessageRef::V2(_) => InboundChannelFunds::DualFunded,
},
channel_type,
is_announced,
params: common_fields.channel_parameters(),
}, None));
peer_state.inbound_channel_request_by_id.insert(channel_id, InboundChannelRequest {
open_channel_msg: match msg {
OpenChannelMessageRef::V1(msg) => OpenChannelMessage::V1(msg.clone()),
#[cfg(dual_funding)]
OpenChannelMessageRef::V2(msg) => OpenChannelMessage::V2(msg.clone()),
},
ticks_remaining: UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS,
});
return Ok(());
}
let mut random_bytes = [0u8; 16];
random_bytes.copy_from_slice(&self.entropy_source.get_secure_random_bytes()[..16]);
let user_channel_id = u128::from_be_bytes(random_bytes);
if channel_type.requires_zero_conf() {
return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), common_fields.temporary_channel_id));
}
if channel_type.requires_anchors_zero_fee_htlc_tx() {
return Err(MsgHandleErrInternal::send_err_msg_no_close("No channels with anchor outputs accepted".to_owned(), common_fields.temporary_channel_id));
}
let (mut channel_phase, message_send_event) = match msg {
OpenChannelMessageRef::V1(msg) => {
let mut channel = InboundV1Channel::new(
&self.fee_estimator, &self.entropy_source, &self.signer_provider, *counterparty_node_id,
&self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id,
&self.default_configuration, best_block_height, &self.logger, false
).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id))?;
let logger = WithChannelContext::from(&self.logger, &channel.context, None);
let message_send_event = channel.accept_inbound_channel(&&logger).map(|msg| {
events::MessageSendEvent::SendAcceptChannel {
node_id: *counterparty_node_id,
msg,
}
});
(ChannelPhase::UnfundedInboundV1(channel), message_send_event)
},
#[cfg(dual_funding)]
OpenChannelMessageRef::V2(msg) => {
let channel = InboundV2Channel::new(&self.fee_estimator, &self.entropy_source,
&self.signer_provider, self.get_our_node_id(), *counterparty_node_id,
&self.channel_type_features(), &peer_state.latest_features, msg, vec![], Weight::from_wu(0),
user_channel_id, &self.default_configuration, best_block_height, &self.logger
).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id))?;
let message_send_event = events::MessageSendEvent::SendAcceptChannelV2 {
node_id: *counterparty_node_id,
msg: channel.accept_inbound_dual_funded_channel(),
};
(ChannelPhase::UnfundedInboundV2(channel), Some(message_send_event))
},
};
let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
channel_phase.context_mut().set_outbound_scid_alias(outbound_scid_alias);
if let Some(message_send_event) = message_send_event {
peer_state.pending_msg_events.push(message_send_event);
}
peer_state.channel_by_id.insert(channel_phase.context().channel_id(), channel_phase);
Ok(())
}
fn internal_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
let (value, output_script, user_id) = {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.common_fields.temporary_channel_id) {
hash_map::Entry::Occupied(mut phase) => {
match phase.get_mut() {
ChannelPhase::UnfundedOutboundV1(chan) => {
try_chan_phase_entry!(self, peer_state, chan.accept_channel(msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), phase);
(chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_p2wsh(), chan.context.get_user_id())
},
_ => {
return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id));
}
}
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id))
}
};
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push_back((events::Event::FundingGenerationReady {
temporary_channel_id: msg.common_fields.temporary_channel_id,
counterparty_node_id: *counterparty_node_id,
channel_value_satoshis: value,
output_script,
user_channel_id: user_id,
}, None));
Ok(())
}
fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
let best_block = *self.best_block.read().unwrap();
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let (mut chan, funding_msg_opt, monitor) =
match peer_state.channel_by_id.remove(&msg.temporary_channel_id) {
Some(ChannelPhase::UnfundedInboundV1(inbound_chan)) => {
let logger = WithChannelContext::from(&self.logger, &inbound_chan.context, None);
match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) {
Ok(res) => res,
Err((inbound_chan, err)) => {
debug_assert!(matches!(err, ChannelError::Close(_)));
return Err(convert_chan_phase_err!(self, peer_state, err, &mut ChannelPhase::UnfundedInboundV1(inbound_chan), &msg.temporary_channel_id).1);
},
}
},
Some(mut phase) => {
let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id);
let err = ChannelError::close(err_msg);
return Err(convert_chan_phase_err!(self, peer_state, err, &mut phase, &msg.temporary_channel_id).1);
},
None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
};
let funded_channel_id = chan.context.channel_id();
macro_rules! fail_chan { ($err: expr) => { {
let err = ChannelError::close($err.to_owned());
chan.unset_funding_info(msg.temporary_channel_id);
return Err(convert_chan_phase_err!(self, peer_state, err, chan, &funded_channel_id, UNFUNDED_CHANNEL).1);
} } }
match peer_state.channel_by_id.entry(funded_channel_id) {
hash_map::Entry::Occupied(_) => {
fail_chan!("Already had channel with the new channel_id");
},
hash_map::Entry::Vacant(e) => {
let mut outpoint_to_peer_lock = self.outpoint_to_peer.lock().unwrap();
match outpoint_to_peer_lock.entry(monitor.get_funding_txo().0) {
hash_map::Entry::Occupied(_) => {
fail_chan!("The funding_created message had the same funding_txid as an existing channel - funding is not possible");
},
hash_map::Entry::Vacant(i_e) => {
let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
if let Ok(persist_state) = monitor_res {
i_e.insert(chan.context.get_counterparty_node_id());
mem::drop(outpoint_to_peer_lock);
if let Some(msg) = funding_msg_opt {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
node_id: counterparty_node_id.clone(),
msg,
});
}
if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) {
handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
per_peer_state, chan, INITIAL_MONITOR);
} else {
unreachable!("This must be a funded channel as we just inserted it.");
}
Ok(())
} else {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
fail_chan!("Duplicate funding outpoint");
}
}
}
}
}
}
fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
let best_block = *self.best_block.read().unwrap();
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(chan_phase_entry) => {
if matches!(chan_phase_entry.get(), ChannelPhase::UnfundedOutboundV1(_)) {
let chan = if let ChannelPhase::UnfundedOutboundV1(chan) = chan_phase_entry.remove() { chan } else { unreachable!() };
let logger = WithContext::from(
&self.logger,
Some(chan.context.get_counterparty_node_id()),
Some(chan.context.channel_id()),
None
);
let res =
chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger);
match res {
Ok((mut chan, monitor)) => {
if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
let mut chan = peer_state.channel_by_id.entry(msg.channel_id).or_insert(ChannelPhase::Funded(chan));
if let ChannelPhase::Funded(ref mut chan) = &mut chan {
handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
} else { unreachable!(); }
Ok(())
} else {
let e = ChannelError::close("Channel funding outpoint was a duplicate".to_owned());
chan.unset_funding_info(msg.channel_id);
return Err(convert_chan_phase_err!(self, peer_state, e, &mut ChannelPhase::Funded(chan), &msg.channel_id).1);
}
},
Err((chan, e)) => {
debug_assert!(matches!(e, ChannelError::Close(_)),
"We don't have a channel anymore, so the error better have expected close");
return Err(convert_chan_phase_err!(self, peer_state, e, &mut ChannelPhase::UnfundedOutboundV1(chan), &msg.channel_id).1);
}
}
} else {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id));
}
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
}
fn internal_tx_msg<HandleTxMsgFn: Fn(&mut ChannelPhase<SP>) -> Result<MessageSendEvent, &'static str>>(
&self, counterparty_node_id: &PublicKey, channel_id: ChannelId, tx_msg_handler: HandleTxMsgFn
) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(
format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
let channel_phase = chan_phase_entry.get_mut();
let msg_send_event = match tx_msg_handler(channel_phase) {
Ok(msg_send_event) => msg_send_event,
Err(tx_msg_str) => return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(
format!("Got a {tx_msg_str} message with no interactive transaction construction expected or in-progress")
), channel_id)),
};
peer_state.pending_msg_events.push(msg_send_event);
Ok(())
},
hash_map::Entry::Vacant(_) => {
Err(MsgHandleErrInternal::send_err_msg_no_close(format!(
"Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}",
counterparty_node_id), channel_id)
)
}
}
}
fn internal_tx_add_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddInput) -> Result<(), MsgHandleErrInternal> {
self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase<SP>| {
match channel_phase {
ChannelPhase::UnfundedInboundV2(ref mut channel) => {
Ok(channel.tx_add_input(msg).into_msg_send_event(counterparty_node_id))
},
ChannelPhase::UnfundedOutboundV2(ref mut channel) => {
Ok(channel.tx_add_input(msg).into_msg_send_event(counterparty_node_id))
},
_ => Err("tx_add_input"),
}
})
}
fn internal_tx_add_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddOutput) -> Result<(), MsgHandleErrInternal> {
self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase<SP>| {
match channel_phase {
ChannelPhase::UnfundedInboundV2(ref mut channel) => {
Ok(channel.tx_add_output(msg).into_msg_send_event(counterparty_node_id))
},
ChannelPhase::UnfundedOutboundV2(ref mut channel) => {
Ok(channel.tx_add_output(msg).into_msg_send_event(counterparty_node_id))
},
_ => Err("tx_add_output"),
}
})
}
fn internal_tx_remove_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveInput) -> Result<(), MsgHandleErrInternal> {
self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase<SP>| {
match channel_phase {
ChannelPhase::UnfundedInboundV2(ref mut channel) => {
Ok(channel.tx_remove_input(msg).into_msg_send_event(counterparty_node_id))
},
ChannelPhase::UnfundedOutboundV2(ref mut channel) => {
Ok(channel.tx_remove_input(msg).into_msg_send_event(counterparty_node_id))
},
_ => Err("tx_remove_input"),
}
})
}
fn internal_tx_remove_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveOutput) -> Result<(), MsgHandleErrInternal> {
self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase<SP>| {
match channel_phase {
ChannelPhase::UnfundedInboundV2(ref mut channel) => {
Ok(channel.tx_remove_output(msg).into_msg_send_event(counterparty_node_id))
},
ChannelPhase::UnfundedOutboundV2(ref mut channel) => {
Ok(channel.tx_remove_output(msg).into_msg_send_event(counterparty_node_id))
},
_ => Err("tx_remove_output"),
}
})
}
fn internal_tx_complete(&self, counterparty_node_id: PublicKey, msg: &msgs::TxComplete) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(
format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
let channel_phase = chan_phase_entry.get_mut();
let (msg_send_event_opt, signing_session_opt) = match channel_phase {
ChannelPhase::UnfundedInboundV2(channel) => channel.tx_complete(msg)
.into_msg_send_event_or_signing_session(counterparty_node_id),
ChannelPhase::UnfundedOutboundV2(channel) => channel.tx_complete(msg)
.into_msg_send_event_or_signing_session(counterparty_node_id),
_ => try_chan_phase_entry!(self, peer_state, Err(ChannelError::Close(
(
"Got a tx_complete message with no interactive transaction construction expected or in-progress".into(),
ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) },
))), chan_phase_entry)
};
if let Some(msg_send_event) = msg_send_event_opt {
peer_state.pending_msg_events.push(msg_send_event);
};
if let Some(mut signing_session) = signing_session_opt {
let (commitment_signed, funding_ready_for_sig_event_opt) = match chan_phase_entry.get_mut() {
ChannelPhase::UnfundedOutboundV2(chan) => {
chan.funding_tx_constructed(&mut signing_session, &self.logger)
},
ChannelPhase::UnfundedInboundV2(chan) => {
chan.funding_tx_constructed(&mut signing_session, &self.logger)
},
_ => Err(ChannelError::Warn(
"Got a tx_complete message with no interactive transaction construction expected or in-progress"
.into())),
}.map_err(|err| MsgHandleErrInternal::send_err_msg_no_close(format!("{}", err), msg.channel_id))?;
let (channel_id, channel_phase) = chan_phase_entry.remove_entry();
let channel = match channel_phase {
ChannelPhase::UnfundedOutboundV2(chan) => chan.into_channel(signing_session),
ChannelPhase::UnfundedInboundV2(chan) => chan.into_channel(signing_session),
_ => {
debug_assert!(false); Err(ChannelError::Warn(
"Got a tx_complete message with no interactive transaction construction expected or in-progress"
.into()))
},
}.map_err(|err| MsgHandleErrInternal::send_err_msg_no_close(format!("{}", err), msg.channel_id))?;
peer_state.channel_by_id.insert(channel_id, ChannelPhase::Funded(channel));
if let Some(funding_ready_for_sig_event) = funding_ready_for_sig_event_opt {
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push_back((funding_ready_for_sig_event, None));
}
peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
node_id: counterparty_node_id,
updates: CommitmentUpdate {
commitment_signed,
update_add_htlcs: vec![],
update_fulfill_htlcs: vec![],
update_fail_htlcs: vec![],
update_fail_malformed_htlcs: vec![],
update_fee: None,
},
});
}
Ok(())
},
hash_map::Entry::Vacant(_) => {
Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
}
}
fn internal_tx_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxSignatures)
-> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(
format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
let channel_phase = chan_phase_entry.get_mut();
match channel_phase {
ChannelPhase::Funded(chan) => {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let (tx_signatures_opt, funding_tx_opt) = try_chan_phase_entry!(self, peer_state, chan.tx_signatures(msg, &&logger), chan_phase_entry);
if let Some(tx_signatures) = tx_signatures_opt {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendTxSignatures {
node_id: *counterparty_node_id,
msg: tx_signatures,
});
}
if let Some(ref funding_tx) = funding_tx_opt {
self.tx_broadcaster.broadcast_transactions(&[funding_tx]);
{
let mut pending_events = self.pending_events.lock().unwrap();
emit_channel_pending_event!(pending_events, chan);
}
}
},
_ => try_chan_phase_entry!(self, peer_state, Err(ChannelError::Close(
(
"Got an unexpected tx_signatures message".into(),
ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) },
))), chan_phase_entry)
}
Ok(())
},
hash_map::Entry::Vacant(_) => {
Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
}
}
fn internal_tx_abort(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAbort)
-> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(
format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
let channel_phase = chan_phase_entry.get_mut();
let tx_constructor = match channel_phase {
ChannelPhase::UnfundedInboundV2(chan) => chan.interactive_tx_constructor_mut(),
ChannelPhase::UnfundedOutboundV2(chan) => chan.interactive_tx_constructor_mut(),
ChannelPhase::Funded(_) => {
try_chan_phase_entry!(self, peer_state, Err(ChannelError::Warn(
"Got an unexpected tx_abort message: After initial funding transaction is signed, \
splicing and RBF attempts of interactive funding transactions are not supported yet so \
we don't have any negotiation in progress".into(),
)), chan_phase_entry)
}
ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
try_chan_phase_entry!(self, peer_state, Err(ChannelError::Warn(
"Got an unexpected tx_abort message: This is an unfunded channel created with V1 channel \
establishment".into(),
)), chan_phase_entry)
},
};
if tx_constructor.take().is_some() {
let msg = msgs::TxAbort {
channel_id: msg.channel_id,
data: "Acknowledged tx_abort".to_string().into_bytes(),
};
peer_state.pending_msg_events.push(events::MessageSendEvent::SendTxAbort {
node_id: *counterparty_node_id,
msg,
});
}
Ok(())
},
hash_map::Entry::Vacant(_) => {
Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
}
}
fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let announcement_sigs_opt = try_chan_phase_entry!(self, peer_state, chan.channel_ready(&msg, &self.node_signer,
self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &&logger), chan_phase_entry);
if let Some(announcement_sigs) = announcement_sigs_opt {
log_trace!(logger, "Sending announcement_signatures for channel {}", chan.context.channel_id());
peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
node_id: counterparty_node_id.clone(),
msg: announcement_sigs,
});
} else if chan.context.is_usable() {
log_trace!(logger, "Sending private initial channel_update for our counterparty on channel {}", chan.context.channel_id());
if let Ok(msg) = self.get_channel_update_for_unicast(chan) {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
node_id: counterparty_node_id.clone(),
msg,
});
}
}
{
let mut pending_events = self.pending_events.lock().unwrap();
emit_channel_ready_event!(pending_events, chan);
}
Ok(())
} else {
try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
"Got a channel_ready message for an unfunded channel!".into())), chan_phase_entry)
}
},
hash_map::Entry::Vacant(_) => {
Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
}
}
fn internal_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new();
let mut finish_shutdown = None;
{
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) {
let phase = chan_phase_entry.get_mut();
match phase {
ChannelPhase::Funded(chan) => {
if !chan.received_shutdown() {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_info!(logger, "Received a shutdown message from our counterparty for channel {}{}.",
msg.channel_id,
if chan.sent_shutdown() { " after we initiated shutdown" } else { "" });
}
let funding_txo_opt = chan.context.get_funding_txo();
let (shutdown, monitor_update_opt, htlcs) = try_chan_phase_entry!(self, peer_state,
chan.shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_phase_entry);
dropped_htlcs = htlcs;
if let Some(msg) = shutdown {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
node_id: *counterparty_node_id,
msg,
});
}
if let Some(monitor_update) = monitor_update_opt {
handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
peer_state_lock, peer_state, per_peer_state, chan);
}
},
ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) |
ChannelPhase::UnfundedInboundV2(_) | ChannelPhase::UnfundedOutboundV2(_) => {
let context = phase.context_mut();
let logger = WithChannelContext::from(&self.logger, context, None);
log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
let mut close_res = phase.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
remove_channel_phase!(self, peer_state, chan_phase_entry, close_res);
finish_shutdown = Some(close_res);
},
}
} else {
return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
}
for htlc_source in dropped_htlcs.drain(..) {
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id };
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
}
if let Some(shutdown_res) = finish_shutdown {
self.finish_close_channel(shutdown_res);
}
Ok(())
}
fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
})?;
let (tx, chan_option, shutdown_result) = {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let (closing_signed, tx, shutdown_result) = try_chan_phase_entry!(self, peer_state, chan.closing_signed(&self.fee_estimator, &msg, &&logger), chan_phase_entry);
debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown());
if let Some(msg) = closing_signed {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
node_id: counterparty_node_id.clone(),
msg,
});
}
if let Some(mut close_res) = shutdown_result {
debug_assert!(tx.is_some());
let channel_phase = remove_channel_phase!(self, peer_state, chan_phase_entry, close_res);
(tx, Some(channel_phase), Some(close_res))
} else {
debug_assert!(tx.is_none());
(tx, None, None)
}
} else {
return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
"Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry);
}
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
if let Some(broadcast_tx) = tx {
let channel_id = chan_option.as_ref().map(|channel| channel.context().channel_id());
log_info!(WithContext::from(&self.logger, Some(*counterparty_node_id), channel_id, None), "Broadcasting {}", log_tx!(broadcast_tx));
self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
}
if let Some(ChannelPhase::Funded(chan)) = chan_option {
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
}
mem::drop(per_peer_state);
if let Some(shutdown_result) = shutdown_result {
self.finish_close_channel(shutdown_result);
}
Ok(())
}
fn internal_update_add_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), MsgHandleErrInternal> {
let decoded_hop_res = self.decode_update_add_htlc_onion(msg, counterparty_node_id);
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let mut pending_forward_info = match decoded_hop_res {
Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
self.construct_pending_htlc_status(
msg, counterparty_node_id, shared_secret, next_hop,
chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt,
),
Err(e) => PendingHTLCStatus::Fail(e)
};
let logger = WithChannelContext::from(&self.logger, &chan.context, Some(msg.payment_hash));
if let Err((_, error_code)) = chan.can_accept_incoming_htlc(&msg, &self.fee_estimator, &logger) {
if msg.blinding_point.is_some() {
pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
msgs::UpdateFailMalformedHTLC {
channel_id: msg.channel_id,
htlc_id: msg.htlc_id,
sha256_of_onion: [0; 32],
failure_code: INVALID_ONION_BLINDING,
}
))
} else {
match pending_forward_info {
PendingHTLCStatus::Forward(PendingHTLCInfo {
ref incoming_shared_secret, ref routing, ..
}) => {
let reason = if routing.blinded_failure().is_some() {
HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
} else if (error_code & 0x1000) != 0 {
let error_data = self.get_htlc_inbound_temp_fail_data(error_code);
HTLCFailReason::reason(error_code, error_data)
} else {
HTLCFailReason::from_failure_code(error_code)
}.get_encrypted_failure_packet(incoming_shared_secret, &None);
let msg = msgs::UpdateFailHTLC {
channel_id: msg.channel_id,
htlc_id: msg.htlc_id,
reason
};
pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg));
},
_ => {},
}
}
}
try_chan_phase_entry!(self, peer_state, chan.update_add_htlc(&msg, pending_forward_info, &self.fee_estimator), chan_phase_entry);
} else {
return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
"Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry);
}
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
Ok(())
}
fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
let funding_txo;
let next_user_channel_id;
let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let res = try_chan_phase_entry!(self, peer_state, chan.update_fulfill_htlc(&msg), chan_phase_entry);
if let HTLCSource::PreviousHopData(prev_hop) = &res.0 {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger,
"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
msg.channel_id);
peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id)
.or_insert_with(Vec::new)
.push(RAAMonitorUpdateBlockingAction::from_prev_hop_data(&prev_hop));
}
funding_txo = chan.context.get_funding_txo().expect("We won't accept a fulfill until funded");
next_user_channel_id = chan.context.get_user_id();
res
} else {
return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
"Got an update_fulfill_htlc message for an unfunded channel!".into())), chan_phase_entry);
}
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(),
Some(forwarded_htlc_value), skimmed_fee_msat, false, false, Some(*counterparty_node_id),
funding_txo, msg.channel_id, Some(next_user_channel_id),
);
Ok(())
}
fn internal_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
try_chan_phase_entry!(self, peer_state, chan.update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan_phase_entry);
} else {
return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
"Got an update_fail_htlc message for an unfunded channel!".into())), chan_phase_entry);
}
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
Ok(())
}
fn internal_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if (msg.failure_code & 0x8000) == 0 {
let chan_err = ChannelError::close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
try_chan_phase_entry!(self, peer_state, Err(chan_err), chan_phase_entry);
}
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
try_chan_phase_entry!(self, peer_state, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan_phase_entry);
} else {
return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
"Got an update_fail_malformed_htlc message for an unfunded channel!".into())), chan_phase_entry);
}
Ok(())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
}
fn internal_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> {
let best_block = *self.best_block.read().unwrap();
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let funding_txo = chan.context.get_funding_txo();
if chan.interactive_tx_signing_session.is_some() {
let monitor = try_chan_phase_entry!(
self, peer_state, chan.commitment_signed_initial_v2(msg, best_block, &self.signer_provider, &&logger),
chan_phase_entry);
let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
if let Ok(persist_state) = monitor_res {
handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
per_peer_state, chan, INITIAL_MONITOR);
} else {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
try_chan_phase_entry!(self, peer_state, Err(ChannelError::Close(
(
"Channel funding outpoint was a duplicate".to_owned(),
ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) },
)
)), chan_phase_entry)
}
} else {
let monitor_update_opt = try_chan_phase_entry!(
self, peer_state, chan.commitment_signed(msg, &&logger), chan_phase_entry);
if let Some(monitor_update) = monitor_update_opt {
handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
peer_state, per_peer_state, chan);
}
}
Ok(())
} else {
return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
"Got a commitment_signed message for an unfunded channel!".into())), chan_phase_entry);
}
},
hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
}
fn push_decode_update_add_htlcs(&self, mut update_add_htlcs: (u64, Vec<msgs::UpdateAddHTLC>)) {
let mut push_forward_event = self.forward_htlcs.lock().unwrap().is_empty();
let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
push_forward_event &= decode_update_add_htlcs.is_empty();
let scid = update_add_htlcs.0;
match decode_update_add_htlcs.entry(scid) {
hash_map::Entry::Occupied(mut e) => { e.get_mut().append(&mut update_add_htlcs.1); },
hash_map::Entry::Vacant(e) => { e.insert(update_add_htlcs.1); },
}
if push_forward_event { self.push_pending_forwards_ev(); }
}
#[inline]
fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Option<PublicKey>, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) {
let push_forward_event = self.forward_htlcs_without_forward_event(per_source_pending_forwards);
if push_forward_event { self.push_pending_forwards_ev() }
}
#[inline]
fn forward_htlcs_without_forward_event(&self, per_source_pending_forwards: &mut [(u64, Option<PublicKey>, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) -> bool {
let mut push_forward_event = false;
for &mut (prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
let mut new_intercept_events = VecDeque::new();
let mut failed_intercept_forwards = Vec::new();
if !pending_forwards.is_empty() {
for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
let scid = match forward_info.routing {
PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
PendingHTLCRouting::Receive { .. } => 0,
PendingHTLCRouting::ReceiveKeysend { .. } => 0,
};
let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid);
let decode_update_add_htlcs_empty = self.decode_update_add_htlcs.lock().unwrap().is_empty();
let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
let forward_htlcs_empty = forward_htlcs.is_empty();
match forward_htlcs.entry(scid) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint,
prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info
}));
},
hash_map::Entry::Vacant(entry) => {
if !is_our_scid && forward_info.incoming_amt_msat.is_some() &&
fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, scid, &self.chain_hash)
{
let intercept_id = InterceptId(Sha256::hash(&forward_info.incoming_shared_secret).to_byte_array());
let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
match pending_intercepts.entry(intercept_id) {
hash_map::Entry::Vacant(entry) => {
new_intercept_events.push_back((events::Event::HTLCIntercepted {
requested_next_hop_scid: scid,
payment_hash: forward_info.payment_hash,
inbound_amount_msat: forward_info.incoming_amt_msat.unwrap(),
expected_outbound_amount_msat: forward_info.outgoing_amt_msat,
intercept_id
}, None));
entry.insert(PendingAddHTLCInfo {
prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint,
prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info
});
},
hash_map::Entry::Occupied(_) => {
let logger = WithContext::from(&self.logger, None, Some(prev_channel_id), Some(forward_info.payment_hash));
log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
counterparty_node_id: prev_counterparty_node_id,
outpoint: prev_funding_outpoint,
channel_id: prev_channel_id,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: forward_info.incoming_shared_secret,
phantom_shared_secret: None,
blinded_failure: forward_info.routing.blinded_failure(),
});
failed_intercept_forwards.push((htlc_source, forward_info.payment_hash,
HTLCFailReason::from_failure_code(0x4000 | 10),
HTLCDestination::InvalidForward { requested_forward_scid: scid },
));
}
}
} else {
push_forward_event |= forward_htlcs_empty && decode_update_add_htlcs_empty;
entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint,
prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info
})));
}
}
}
}
}
for (htlc_source, payment_hash, failure_reason, destination) in failed_intercept_forwards.drain(..) {
push_forward_event |= self.fail_htlc_backwards_internal_without_forward_event(&htlc_source, &payment_hash, &failure_reason, destination);
}
if !new_intercept_events.is_empty() {
let mut events = self.pending_events.lock().unwrap();
events.append(&mut new_intercept_events);
}
}
push_forward_event
}
fn push_pending_forwards_ev(&self) {
let mut pending_events = self.pending_events.lock().unwrap();
let is_processing_events = self.pending_events_processor.load(Ordering::Acquire);
let num_forward_events = pending_events.iter().filter(|(ev, _)|
if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false }
).count();
if (is_processing_events && num_forward_events <= 1) || num_forward_events < 1 {
pending_events.push_back((Event::PendingHTLCsForwardable {
time_forwardable: Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS),
}, None));
}
}
fn raa_monitor_updates_held(&self,
actions_blocking_raa_monitor_updates: &BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
channel_funding_outpoint: OutPoint, channel_id: ChannelId, counterparty_node_id: PublicKey
) -> bool {
actions_blocking_raa_monitor_updates
.get(&channel_id).map(|v| !v.is_empty()).unwrap_or(false)
|| self.pending_events.lock().unwrap().iter().any(|(_, action)| {
action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint,
channel_id,
counterparty_node_id,
})
})
}
#[cfg(any(test, feature = "_test_utils"))]
pub(crate) fn test_raa_monitor_updates_held(&self,
counterparty_node_id: PublicKey, channel_id: ChannelId
) -> bool {
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state_lck = peer_state_mtx.lock().unwrap();
let peer_state = &mut *peer_state_lck;
if let Some(chan) = peer_state.channel_by_id.get(&channel_id) {
return self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
chan.context().get_funding_txo().unwrap(), channel_id, counterparty_node_id);
}
}
false
}
fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
let htlcs_to_fail = {
let per_peer_state = self.per_peer_state.read().unwrap();
let mut peer_state_lock = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
}).map(|mtx| mtx.lock().unwrap())?;
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let funding_txo_opt = chan.context.get_funding_txo();
let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
self.raa_monitor_updates_held(
&peer_state.actions_blocking_raa_monitor_updates, funding_txo, msg.channel_id,
*counterparty_node_id)
} else { false };
let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self, peer_state,
chan.revoke_and_ack(&msg, &self.fee_estimator, &&logger, mon_update_blocked), chan_phase_entry);
if let Some(monitor_update) = monitor_update_opt {
let funding_txo = funding_txo_opt
.expect("Funding outpoint must have been set for RAA handling to succeed");
handle_new_monitor_update!(self, funding_txo, monitor_update,
peer_state_lock, peer_state, per_peer_state, chan);
}
htlcs_to_fail
} else {
return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
"Got a revoke_and_ack message for an unfunded channel!".into())), chan_phase_entry);
}
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id);
Ok(())
}
fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
try_chan_phase_entry!(self, peer_state, chan.update_fee(&self.fee_estimator, &msg, &&logger), chan_phase_entry);
} else {
return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
"Got an update_fee message for an unfunded channel!".into())), chan_phase_entry);
}
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
Ok(())
}
fn internal_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
if !chan.context.is_usable() {
return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
}
peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
msg: try_chan_phase_entry!(self, peer_state, chan.announcement_signatures(
&self.node_signer, self.chain_hash, self.best_block.read().unwrap().height,
msg, &self.default_configuration
), chan_phase_entry),
update_msg: Some(self.get_channel_update_for_broadcast(chan).unwrap()),
});
} else {
return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
"Got an announcement_signatures message for an unfunded channel!".into())), chan_phase_entry);
}
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
Ok(())
}
fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
let (chan_counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&msg.contents.short_channel_id) {
Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
None => {
return Ok(NotifyOption::SkipPersistNoEvents)
}
};
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&chan_counterparty_node_id);
if peer_state_mutex_opt.is_none() {
return Ok(NotifyOption::SkipPersistNoEvents)
}
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(chan_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
if chan.context.get_counterparty_node_id() != *counterparty_node_id {
if chan.context.should_announce() {
return Ok(NotifyOption::SkipPersistNoEvents);
}
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
}
let were_node_one = self.get_our_node_id().serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
let msg_from_node_one = msg.contents.channel_flags & 1 == 0;
if were_node_one == msg_from_node_one {
return Ok(NotifyOption::SkipPersistNoEvents);
} else {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_debug!(logger, "Received channel_update {:?} for channel {}.", msg, chan_id);
let did_change = try_chan_phase_entry!(self, peer_state, chan.channel_update(&msg), chan_phase_entry);
if !did_change {
return Ok(NotifyOption::SkipPersistNoEvents);
}
}
} else {
return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
"Got a channel_update for an unfunded channel!".into())), chan_phase_entry);
}
},
hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersistNoEvents)
}
Ok(NotifyOption::DoPersist)
}
fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<NotifyOption, MsgHandleErrInternal> {
let need_lnd_workaround = {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
MsgHandleErrInternal::send_err_msg_no_close(
format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
msg.channel_id
)
})?;
let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), None);
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let responses = try_chan_phase_entry!(self, peer_state, chan.channel_reestablish(
msg, &&logger, &self.node_signer, self.chain_hash,
&self.default_configuration, &*self.best_block.read().unwrap()), chan_phase_entry);
let mut channel_update = None;
if let Some(msg) = responses.shutdown_msg {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
node_id: counterparty_node_id.clone(),
msg,
});
} else if chan.context.is_usable() {
if let Ok(msg) = self.get_channel_update_for_unicast(chan) {
channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
node_id: chan.context.get_counterparty_node_id(),
msg,
});
}
}
let need_lnd_workaround = chan.context.workaround_lnd_bug_4006.take();
let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption(
&mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.order,
Vec::new(), Vec::new(), None, responses.channel_ready, responses.announcement_sigs, None);
debug_assert!(htlc_forwards.is_none());
debug_assert!(decode_update_add_htlcs.is_none());
if let Some(upd) = channel_update {
peer_state.pending_msg_events.push(upd);
}
need_lnd_workaround
} else {
return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
"Got a channel_reestablish message for an unfunded channel!".into())), chan_phase_entry);
}
},
hash_map::Entry::Vacant(_) => {
log_debug!(logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure",
msg.channel_id);
peer_state.pending_msg_events.push(MessageSendEvent::SendChannelReestablish {
node_id: *counterparty_node_id,
msg: msgs::ChannelReestablish {
channel_id: msg.channel_id,
next_local_commitment_number: 0,
next_remote_commitment_number: 0,
your_last_per_commitment_secret: [1u8; 32],
my_current_per_commitment_point: PublicKey::from_slice(&[2u8; 33]).unwrap(),
next_funding_txid: None,
},
});
return Err(MsgHandleErrInternal::send_err_msg_no_close(
format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}",
counterparty_node_id), msg.channel_id)
)
}
}
};
if let Some(channel_ready_msg) = need_lnd_workaround {
self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
}
Ok(NotifyOption::SkipPersistHandleEvents)
}
fn process_pending_monitor_events(&self) -> bool {
debug_assert!(self.total_consistency_lock.try_write().is_err());
let mut failed_channels = Vec::new();
let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
let has_pending_monitor_events = !pending_monitor_events.is_empty();
for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
for monitor_event in monitor_events.drain(..) {
match monitor_event {
MonitorEvent::HTLCEvent(htlc_update) => {
let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id), Some(htlc_update.payment_hash));
if let Some(preimage) = htlc_update.payment_preimage {
log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
self.claim_funds_internal(htlc_update.source, preimage,
htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
false, counterparty_node_id, funding_outpoint, channel_id, None);
} else {
log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
}
},
MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => {
let counterparty_node_id_opt = match counterparty_node_id {
Some(cp_id) => Some(cp_id),
None => {
let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
outpoint_to_peer.get(&funding_outpoint).cloned()
}
};
if let Some(counterparty_node_id) = counterparty_node_id_opt {
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) {
let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
reason
} else {
ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
};
let mut shutdown_res = chan_phase_entry.get_mut().context_mut().force_shutdown(false, reason.clone());
let chan_phase = remove_channel_phase!(self, peer_state, chan_phase_entry, shutdown_res);
failed_channels.push(shutdown_res);
if let ChannelPhase::Funded(chan) = chan_phase {
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: chan.context.get_counterparty_node_id(),
action: msgs::ErrorAction::DisconnectPeer {
msg: Some(msgs::ErrorMessage {
channel_id: chan.context.channel_id(),
data: reason.to_string()
})
},
});
}
}
}
}
},
MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => {
self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref());
},
}
}
}
for failure in failed_channels.drain(..) {
self.finish_close_channel(failure);
}
has_pending_monitor_events
}
#[cfg(fuzzing)]
pub fn process_monitor_events(&self) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.process_pending_monitor_events();
}
fn check_free_holding_cells(&self) -> bool {
let mut has_monitor_update = false;
let mut failed_htlcs = Vec::new();
'peer_loop: loop {
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
'chan_loop: loop {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
for (channel_id, chan) in peer_state.channel_by_id.iter_mut().filter_map(
|(chan_id, phase)| if let ChannelPhase::Funded(chan) = phase { Some((chan_id, chan)) } else { None }
) {
let counterparty_node_id = chan.context.get_counterparty_node_id();
let funding_txo = chan.context.get_funding_txo();
let (monitor_opt, holding_cell_failed_htlcs) =
chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &&WithChannelContext::from(&self.logger, &chan.context, None));
if !holding_cell_failed_htlcs.is_empty() {
failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
}
if let Some(monitor_update) = monitor_opt {
has_monitor_update = true;
handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
peer_state_lock, peer_state, per_peer_state, chan);
continue 'peer_loop;
}
}
break 'chan_loop;
}
}
break 'peer_loop;
}
let has_update = has_monitor_update || !failed_htlcs.is_empty();
for (failures, channel_id, counterparty_node_id) in failed_htlcs.drain(..) {
self.fail_holding_cell_htlcs(failures, channel_id, &counterparty_node_id);
}
has_update
}
pub fn signer_unblocked(&self, channel_opt: Option<(PublicKey, ChannelId)>) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let unblock_chan = |phase: &mut ChannelPhase<SP>, pending_msg_events: &mut Vec<MessageSendEvent>| -> Option<ShutdownResult> {
let node_id = phase.context().get_counterparty_node_id();
match phase {
ChannelPhase::Funded(chan) => {
let msgs = chan.signer_maybe_unblocked(&self.logger);
let cu_msg = msgs.commitment_update.map(|updates| events::MessageSendEvent::UpdateHTLCs {
node_id,
updates,
});
let raa_msg = msgs.revoke_and_ack.map(|msg| events::MessageSendEvent::SendRevokeAndACK {
node_id,
msg,
});
match (cu_msg, raa_msg) {
(Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::CommitmentFirst => {
pending_msg_events.push(cu);
pending_msg_events.push(raa);
},
(Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::RevokeAndACKFirst => {
pending_msg_events.push(raa);
pending_msg_events.push(cu);
},
(Some(cu), _) => pending_msg_events.push(cu),
(_, Some(raa)) => pending_msg_events.push(raa),
(_, _) => {},
}
if let Some(msg) = msgs.funding_signed {
pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
node_id,
msg,
});
}
if let Some(msg) = msgs.channel_ready {
send_channel_ready!(self, pending_msg_events, chan, msg);
}
if let Some(msg) = msgs.closing_signed {
pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
node_id,
msg,
});
}
if let Some(broadcast_tx) = msgs.signed_closing_tx {
let channel_id = chan.context.channel_id();
let counterparty_node_id = chan.context.get_counterparty_node_id();
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None);
log_info!(logger, "Broadcasting closing tx {}", log_tx!(broadcast_tx));
self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
}
msgs.shutdown_result
}
ChannelPhase::UnfundedOutboundV1(chan) => {
let (open_channel, funding_created) = chan.signer_maybe_unblocked(self.chain_hash.clone(), &self.logger);
if let Some(msg) = open_channel {
pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
node_id,
msg,
});
}
if let Some(msg) = funding_created {
pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
node_id,
msg,
});
}
None
}
ChannelPhase::UnfundedInboundV1(chan) => {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
if let Some(msg) = chan.signer_maybe_unblocked(&&logger) {
pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
node_id,
msg,
});
}
None
},
ChannelPhase::UnfundedInboundV2(_) | ChannelPhase::UnfundedOutboundV2(_) => None,
}
};
let mut shutdown_results = Vec::new();
let per_peer_state = self.per_peer_state.read().unwrap();
let per_peer_state_iter = per_peer_state.iter().filter(|(cp_id, _)| {
if let Some((counterparty_node_id, _)) = channel_opt {
**cp_id == counterparty_node_id
} else { true }
});
for (_cp_id, peer_state_mutex) in per_peer_state_iter {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
peer_state.channel_by_id.retain(|_, chan| {
let shutdown_result = match channel_opt {
Some((_, channel_id)) if chan.context().channel_id() != channel_id => None,
_ => unblock_chan(chan, &mut peer_state.pending_msg_events),
};
if let Some(mut shutdown_result) = shutdown_result {
let context = &chan.context();
let logger = WithChannelContext::from(&self.logger, context, None);
log_trace!(logger, "Removing channel {} now that the signer is unblocked", context.channel_id());
locked_close_channel!(self, peer_state, context, shutdown_result);
shutdown_results.push(shutdown_result);
false
} else {
true
}
});
}
drop(per_peer_state);
for shutdown_result in shutdown_results.drain(..) {
self.finish_close_channel(shutdown_result);
}
}
fn maybe_generate_initial_closing_signed(&self) -> bool {
let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new();
let mut has_update = false;
let mut shutdown_results = Vec::new();
{
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
peer_state.channel_by_id.retain(|channel_id, phase| {
match phase {
ChannelPhase::Funded(chan) => {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
match chan.maybe_propose_closing_signed(&self.fee_estimator, &&logger) {
Ok((msg_opt, tx_opt, shutdown_result_opt)) => {
if let Some(msg) = msg_opt {
has_update = true;
pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
node_id: chan.context.get_counterparty_node_id(), msg,
});
}
debug_assert_eq!(shutdown_result_opt.is_some(), chan.is_shutdown());
if let Some(mut shutdown_result) = shutdown_result_opt {
locked_close_channel!(self, peer_state, &chan.context, shutdown_result);
shutdown_results.push(shutdown_result);
}
if let Some(tx) = tx_opt {
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
log_info!(logger, "Broadcasting {}", log_tx!(tx));
self.tx_broadcaster.broadcast_transactions(&[&tx]);
false
} else { true }
},
Err(e) => {
has_update = true;
let (close_channel, res) = convert_chan_phase_err!(self, peer_state, e, chan, channel_id, FUNDED_CHANNEL);
handle_errors.push((chan.context.get_counterparty_node_id(), Err(res)));
!close_channel
}
}
},
_ => true, }
});
}
}
for (counterparty_node_id, err) in handle_errors.drain(..) {
let _ = handle_error!(self, err, counterparty_node_id);
}
for shutdown_result in shutdown_results.drain(..) {
self.finish_close_channel(shutdown_result);
}
has_update
}
pub fn create_bolt11_invoice(
&self, params: Bolt11InvoiceParameters,
) -> Result<Bolt11Invoice, SignOrCreationError<()>> {
let Bolt11InvoiceParameters {
amount_msats, description, invoice_expiry_delta_secs, min_final_cltv_expiry_delta,
payment_hash,
} = params;
let currency =
Network::from_chain_hash(self.chain_hash).map(Into::into).unwrap_or(Currency::Bitcoin);
#[cfg(feature = "std")]
let duration_since_epoch = {
use std::time::SystemTime;
SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
.expect("SystemTime::now() should be after SystemTime::UNIX_EPOCH")
};
#[cfg(not(feature = "std"))]
let duration_since_epoch =
Duration::from_secs(self.highest_seen_timestamp.load(Ordering::Acquire) as u64);
if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta {
if min_final_cltv_expiry_delta.saturating_add(3) < MIN_FINAL_CLTV_EXPIRY_DELTA {
return Err(SignOrCreationError::CreationError(CreationError::MinFinalCltvExpiryDeltaTooShort));
}
}
let (payment_hash, payment_secret) = match payment_hash {
Some(payment_hash) => {
let payment_secret = self
.create_inbound_payment_for_hash(
payment_hash, amount_msats,
invoice_expiry_delta_secs.unwrap_or(DEFAULT_EXPIRY_TIME as u32),
min_final_cltv_expiry_delta,
)
.map_err(|()| SignOrCreationError::CreationError(CreationError::InvalidAmount))?;
(payment_hash, payment_secret)
},
None => {
self
.create_inbound_payment(
amount_msats, invoice_expiry_delta_secs.unwrap_or(DEFAULT_EXPIRY_TIME as u32),
min_final_cltv_expiry_delta,
)
.map_err(|()| SignOrCreationError::CreationError(CreationError::InvalidAmount))?
},
};
log_trace!(self.logger, "Creating invoice with payment hash {}", &payment_hash);
let invoice = Bolt11InvoiceBuilder::new(currency);
let invoice = match description {
Bolt11InvoiceDescription::Direct(description) => invoice.description(description.into_inner().0),
Bolt11InvoiceDescription::Hash(hash) => invoice.description_hash(hash.0),
};
let mut invoice = invoice
.duration_since_epoch(duration_since_epoch)
.payee_pub_key(self.get_our_node_id())
.payment_hash(Hash::from_slice(&payment_hash.0).unwrap())
.payment_secret(payment_secret)
.basic_mpp()
.min_final_cltv_expiry_delta(
min_final_cltv_expiry_delta.map(|x| x.saturating_add(3)).unwrap_or(MIN_FINAL_CLTV_EXPIRY_DELTA).into()
);
if let Some(invoice_expiry_delta_secs) = invoice_expiry_delta_secs{
invoice = invoice.expiry_time(Duration::from_secs(invoice_expiry_delta_secs.into()));
}
if let Some(amount_msats) = amount_msats {
invoice = invoice.amount_milli_satoshis(amount_msats);
}
let channels = self.list_channels();
let route_hints = super::invoice_utils::sort_and_filter_channels(channels, amount_msats, &self.logger);
for hint in route_hints {
invoice = invoice.private_route(hint);
}
let raw_invoice = invoice.build_raw().map_err(|e| SignOrCreationError::CreationError(e))?;
let signature = self.node_signer.sign_invoice(&raw_invoice, Recipient::Node);
raw_invoice
.sign(|_| signature)
.map(|invoice| Bolt11Invoice::from_signed(invoice).unwrap())
.map_err(|e| SignOrCreationError::SignError(e))
}
}
pub struct Bolt11InvoiceParameters {
pub amount_msats: Option<u64>,
pub description: Bolt11InvoiceDescription,
pub invoice_expiry_delta_secs: Option<u32>,
pub min_final_cltv_expiry_delta: Option<u16>,
pub payment_hash: Option<PaymentHash>,
}
impl Default for Bolt11InvoiceParameters {
fn default() -> Self {
Self {
amount_msats: None,
description: Bolt11InvoiceDescription::Direct(Description::empty()),
invoice_expiry_delta_secs: None,
min_final_cltv_expiry_delta: None,
payment_hash: None,
}
}
}
macro_rules! create_offer_builder { ($self: ident, $builder: ty) => {
pub fn create_offer_builder(
&$self, absolute_expiry: Option<Duration>
) -> Result<$builder, Bolt12SemanticError> {
let node_id = $self.get_our_node_id();
let expanded_key = &$self.inbound_payment_key;
let entropy = &*$self.entropy_source;
let secp_ctx = &$self.secp_ctx;
let nonce = Nonce::from_entropy_source(entropy);
let context = OffersContext::InvoiceRequest { nonce };
let path = $self.create_blinded_paths_using_absolute_expiry(context, absolute_expiry)
.and_then(|paths| paths.into_iter().next().ok_or(()))
.map_err(|_| Bolt12SemanticError::MissingPaths)?;
let builder = OfferBuilder::deriving_signing_pubkey(node_id, expanded_key, nonce, secp_ctx)
.chain_hash($self.chain_hash)
.path(path);
let builder = match absolute_expiry {
None => builder,
Some(absolute_expiry) => builder.absolute_expiry(absolute_expiry),
};
Ok(builder.into())
}
} }
macro_rules! create_refund_builder { ($self: ident, $builder: ty) => {
pub fn create_refund_builder(
&$self, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId,
retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>
) -> Result<$builder, Bolt12SemanticError> {
let node_id = $self.get_our_node_id();
let expanded_key = &$self.inbound_payment_key;
let entropy = &*$self.entropy_source;
let secp_ctx = &$self.secp_ctx;
let nonce = Nonce::from_entropy_source(entropy);
let context = OffersContext::OutboundPayment { payment_id, nonce, hmac: None };
let path = $self.create_blinded_paths_using_absolute_expiry(context, Some(absolute_expiry))
.and_then(|paths| paths.into_iter().next().ok_or(()))
.map_err(|_| Bolt12SemanticError::MissingPaths)?;
let builder = RefundBuilder::deriving_signing_pubkey(
node_id, expanded_key, nonce, secp_ctx, amount_msats, payment_id
)?
.chain_hash($self.chain_hash)
.absolute_expiry(absolute_expiry)
.path(path);
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop($self);
let expiration = StaleExpiration::AbsoluteTimeout(absolute_expiry);
$self.pending_outbound_payments
.add_new_awaiting_invoice(
payment_id, expiration, retry_strategy, max_total_routing_fee_msat, None,
)
.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?;
Ok(builder.into())
}
} }
const OFFERS_MESSAGE_REQUEST_LIMIT: usize = 10;
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
#[cfg(not(c_bindings))]
create_offer_builder!(self, OfferBuilder<DerivedMetadata, secp256k1::All>);
#[cfg(not(c_bindings))]
create_refund_builder!(self, RefundBuilder<secp256k1::All>);
#[cfg(c_bindings)]
create_offer_builder!(self, OfferWithDerivedMetadataBuilder);
#[cfg(c_bindings)]
create_refund_builder!(self, RefundMaybeWithDerivedMetadataBuilder);
pub fn pay_for_offer(
&self, offer: &Offer, quantity: Option<u64>, amount_msats: Option<u64>,
payer_note: Option<String>, payment_id: PaymentId, retry_strategy: Retry,
max_total_routing_fee_msat: Option<u64>
) -> Result<(), Bolt12SemanticError> {
self.pay_for_offer_intern(offer, quantity, amount_msats, payer_note, payment_id, None, |invoice_request, nonce| {
let expiration = StaleExpiration::TimerTicks(1);
let retryable_invoice_request = RetryableInvoiceRequest {
invoice_request: invoice_request.clone(),
nonce,
};
self.pending_outbound_payments
.add_new_awaiting_invoice(
payment_id, expiration, retry_strategy, max_total_routing_fee_msat,
Some(retryable_invoice_request)
)
.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
})
}
fn pay_for_offer_intern<CPP: FnOnce(&InvoiceRequest, Nonce) -> Result<(), Bolt12SemanticError>>(
&self, offer: &Offer, quantity: Option<u64>, amount_msats: Option<u64>,
payer_note: Option<String>, payment_id: PaymentId,
human_readable_name: Option<HumanReadableName>, create_pending_payment: CPP,
) -> Result<(), Bolt12SemanticError> {
let expanded_key = &self.inbound_payment_key;
let entropy = &*self.entropy_source;
let secp_ctx = &self.secp_ctx;
let nonce = Nonce::from_entropy_source(entropy);
let builder: InvoiceRequestBuilder<secp256k1::All> = offer
.request_invoice(expanded_key, nonce, secp_ctx, payment_id)?
.into();
let builder = builder.chain_hash(self.chain_hash)?;
let builder = match quantity {
None => builder,
Some(quantity) => builder.quantity(quantity)?,
};
let builder = match amount_msats {
None => builder,
Some(amount_msats) => builder.amount_msats(amount_msats)?,
};
let builder = match payer_note {
None => builder,
Some(payer_note) => builder.payer_note(payer_note),
};
let builder = match human_readable_name {
None => builder,
Some(hrn) => builder.sourced_from_human_readable_name(hrn),
};
let invoice_request = builder.build_and_sign()?;
let hmac = payment_id.hmac_for_offer_payment(nonce, expanded_key);
let context = MessageContext::Offers(
OffersContext::OutboundPayment { payment_id, nonce, hmac: Some(hmac) }
);
let reply_paths = self.create_blinded_paths(context)
.map_err(|_| Bolt12SemanticError::MissingPaths)?;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
create_pending_payment(&invoice_request, nonce)?;
self.enqueue_invoice_request(invoice_request, reply_paths)
}
fn enqueue_invoice_request(
&self,
invoice_request: InvoiceRequest,
reply_paths: Vec<BlindedMessagePath>,
) -> Result<(), Bolt12SemanticError> {
let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
if !invoice_request.paths().is_empty() {
reply_paths
.iter()
.flat_map(|reply_path| invoice_request.paths().iter().map(move |path| (path, reply_path)))
.take(OFFERS_MESSAGE_REQUEST_LIMIT)
.for_each(|(path, reply_path)| {
let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
destination: Destination::BlindedPath(path.clone()),
reply_path: reply_path.clone(),
};
let message = OffersMessage::InvoiceRequest(invoice_request.clone());
pending_offers_messages.push((message, instructions));
});
} else if let Some(node_id) = invoice_request.issuer_signing_pubkey() {
for reply_path in reply_paths {
let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
destination: Destination::Node(node_id),
reply_path,
};
let message = OffersMessage::InvoiceRequest(invoice_request.clone());
pending_offers_messages.push((message, instructions));
}
} else {
debug_assert!(false);
return Err(Bolt12SemanticError::MissingIssuerSigningPubkey);
}
Ok(())
}
pub fn request_refund_payment(
&self, refund: &Refund
) -> Result<Bolt12Invoice, Bolt12SemanticError> {
let expanded_key = &self.inbound_payment_key;
let entropy = &*self.entropy_source;
let secp_ctx = &self.secp_ctx;
let amount_msats = refund.amount_msats();
let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
if refund.chain() != self.chain_hash {
return Err(Bolt12SemanticError::UnsupportedChain);
}
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
match self.create_inbound_payment(Some(amount_msats), relative_expiry, None) {
Ok((payment_hash, payment_secret)) => {
let payment_context = PaymentContext::Bolt12Refund(Bolt12RefundContext {});
let payment_paths = self.create_blinded_payment_paths(
amount_msats, payment_secret, payment_context
)
.map_err(|_| Bolt12SemanticError::MissingPaths)?;
#[cfg(feature = "std")]
let builder = refund.respond_using_derived_keys(
payment_paths, payment_hash, expanded_key, entropy
)?;
#[cfg(not(feature = "std"))]
let created_at = Duration::from_secs(
self.highest_seen_timestamp.load(Ordering::Acquire) as u64
);
#[cfg(not(feature = "std"))]
let builder = refund.respond_using_derived_keys_no_std(
payment_paths, payment_hash, created_at, expanded_key, entropy
)?;
let builder: InvoiceBuilder<DerivedSigningPubkey> = builder.into();
let invoice = builder.allow_mpp().build_and_sign(secp_ctx)?;
let nonce = Nonce::from_entropy_source(entropy);
let hmac = payment_hash.hmac_for_offer_payment(nonce, expanded_key);
let context = MessageContext::Offers(OffersContext::InboundPayment {
payment_hash: invoice.payment_hash(), nonce, hmac
});
let reply_paths = self.create_blinded_paths(context)
.map_err(|_| Bolt12SemanticError::MissingPaths)?;
let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
if refund.paths().is_empty() {
for reply_path in reply_paths {
let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
destination: Destination::Node(refund.payer_signing_pubkey()),
reply_path,
};
let message = OffersMessage::Invoice(invoice.clone());
pending_offers_messages.push((message, instructions));
}
} else {
reply_paths
.iter()
.flat_map(|reply_path| refund.paths().iter().map(move |path| (path, reply_path)))
.take(OFFERS_MESSAGE_REQUEST_LIMIT)
.for_each(|(path, reply_path)| {
let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
destination: Destination::BlindedPath(path.clone()),
reply_path: reply_path.clone(),
};
let message = OffersMessage::Invoice(invoice.clone());
pending_offers_messages.push((message, instructions));
});
}
Ok(invoice)
},
Err(()) => Err(Bolt12SemanticError::InvalidAmount),
}
}
#[cfg(feature = "dnssec")]
pub fn pay_for_offer_from_human_readable_name(
&self, name: HumanReadableName, amount_msats: u64, payment_id: PaymentId,
retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>,
dns_resolvers: Vec<Destination>,
) -> Result<(), ()> {
let (onion_message, context) =
self.hrn_resolver.resolve_name(payment_id, name, &*self.entropy_source)?;
let reply_paths = self.create_blinded_paths(MessageContext::DNSResolver(context))?;
let expiration = StaleExpiration::TimerTicks(1);
self.pending_outbound_payments.add_new_awaiting_offer(payment_id, expiration, retry_strategy, max_total_routing_fee_msat, amount_msats)?;
let message_params = dns_resolvers
.iter()
.flat_map(|destination| reply_paths.iter().map(move |path| (path, destination)))
.take(OFFERS_MESSAGE_REQUEST_LIMIT);
for (reply_path, destination) in message_params {
self.pending_dns_onion_messages.lock().unwrap().push((
DNSResolverMessage::DNSSECQuery(onion_message.clone()),
MessageSendInstructions::WithSpecifiedReplyPath {
destination: destination.clone(),
reply_path: reply_path.clone(),
},
));
}
Ok(())
}
pub fn create_inbound_payment(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32,
min_final_cltv_expiry_delta: Option<u16>) -> Result<(PaymentHash, PaymentSecret), ()> {
inbound_payment::create(&self.inbound_payment_key, min_value_msat, invoice_expiry_delta_secs,
&self.entropy_source, self.highest_seen_timestamp.load(Ordering::Acquire) as u64,
min_final_cltv_expiry_delta)
}
pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>,
invoice_expiry_delta_secs: u32, min_final_cltv_expiry: Option<u16>) -> Result<PaymentSecret, ()> {
inbound_payment::create_from_hash(&self.inbound_payment_key, min_value_msat, payment_hash,
invoice_expiry_delta_secs, self.highest_seen_timestamp.load(Ordering::Acquire) as u64,
min_final_cltv_expiry)
}
pub fn get_payment_preimage(&self, payment_hash: PaymentHash, payment_secret: PaymentSecret) -> Result<PaymentPreimage, APIError> {
inbound_payment::get_payment_preimage(payment_hash, payment_secret, &self.inbound_payment_key)
}
fn create_blinded_paths_using_absolute_expiry(
&self, context: OffersContext, absolute_expiry: Option<Duration>,
) -> Result<Vec<BlindedMessagePath>, ()> {
let now = self.duration_since_epoch();
let max_short_lived_absolute_expiry = now.saturating_add(MAX_SHORT_LIVED_RELATIVE_EXPIRY);
if absolute_expiry.unwrap_or(Duration::MAX) <= max_short_lived_absolute_expiry {
self.create_compact_blinded_paths(context)
} else {
self.create_blinded_paths(MessageContext::Offers(context))
}
}
pub(super) fn duration_since_epoch(&self) -> Duration {
#[cfg(not(feature = "std"))]
let now = Duration::from_secs(
self.highest_seen_timestamp.load(Ordering::Acquire) as u64
);
#[cfg(feature = "std")]
let now = std::time::SystemTime::now()
.duration_since(std::time::SystemTime::UNIX_EPOCH)
.expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
now
}
fn create_blinded_paths(&self, context: MessageContext) -> Result<Vec<BlindedMessagePath>, ()> {
let recipient = self.get_our_node_id();
let secp_ctx = &self.secp_ctx;
let peers = self.per_peer_state.read().unwrap()
.iter()
.map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
.filter(|(_, peer)| peer.is_connected)
.filter(|(_, peer)| peer.latest_features.supports_onion_messages())
.map(|(node_id, _)| *node_id)
.collect::<Vec<_>>();
self.message_router
.create_blinded_paths(recipient, context, peers, secp_ctx)
.and_then(|paths| (!paths.is_empty()).then(|| paths).ok_or(()))
}
fn create_compact_blinded_paths(&self, context: OffersContext) -> Result<Vec<BlindedMessagePath>, ()> {
let recipient = self.get_our_node_id();
let secp_ctx = &self.secp_ctx;
let peers = self.per_peer_state.read().unwrap()
.iter()
.map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
.filter(|(_, peer)| peer.is_connected)
.filter(|(_, peer)| peer.latest_features.supports_onion_messages())
.map(|(node_id, peer)| MessageForwardNode {
node_id: *node_id,
short_channel_id: peer.channel_by_id
.iter()
.filter(|(_, channel)| channel.context().is_usable())
.min_by_key(|(_, channel)| channel.context().channel_creation_height)
.and_then(|(_, channel)| channel.context().get_short_channel_id()),
})
.collect::<Vec<_>>();
self.message_router
.create_compact_blinded_paths(recipient, MessageContext::Offers(context), peers, secp_ctx)
.and_then(|paths| (!paths.is_empty()).then(|| paths).ok_or(()))
}
fn create_blinded_payment_paths(
&self, amount_msats: u64, payment_secret: PaymentSecret, payment_context: PaymentContext
) -> Result<Vec<BlindedPaymentPath>, ()> {
let expanded_key = &self.inbound_payment_key;
let entropy = &*self.entropy_source;
let secp_ctx = &self.secp_ctx;
let first_hops = self.list_usable_channels();
let payee_node_id = self.get_our_node_id();
let max_cltv_expiry = self.best_block.read().unwrap().height + CLTV_FAR_FAR_AWAY
+ LATENCY_GRACE_PERIOD_BLOCKS;
let payee_tlvs = UnauthenticatedReceiveTlvs {
payment_secret,
payment_constraints: PaymentConstraints {
max_cltv_expiry,
htlc_minimum_msat: 1,
},
payment_context,
};
let nonce = Nonce::from_entropy_source(entropy);
let payee_tlvs = payee_tlvs.authenticate(nonce, expanded_key);
self.router.create_blinded_payment_paths(
payee_node_id, first_hops, payee_tlvs, amount_msats, secp_ctx
)
}
pub fn get_phantom_scid(&self) -> u64 {
let best_block_height = self.best_block.read().unwrap().height;
let short_to_chan_info = self.short_to_chan_info.read().unwrap();
loop {
let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
match short_to_chan_info.get(&scid_candidate) {
Some(_) => continue,
None => return scid_candidate
}
}
}
pub fn get_phantom_route_hints(&self) -> PhantomRouteHints {
PhantomRouteHints {
channels: self.list_usable_channels(),
phantom_scid: self.get_phantom_scid(),
real_node_pubkey: self.get_our_node_id(),
}
}
pub fn get_intercept_scid(&self) -> u64 {
let best_block_height = self.best_block.read().unwrap().height;
let short_to_chan_info = self.short_to_chan_info.read().unwrap();
loop {
let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
if short_to_chan_info.contains_key(&scid_candidate) { continue }
return scid_candidate
}
}
pub fn compute_inflight_htlcs(&self) -> InFlightHtlcs {
let mut inflight_htlcs = InFlightHtlcs::new();
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for chan in peer_state.channel_by_id.values().filter_map(
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
) {
for (htlc_source, _) in chan.inflight_htlc_sources() {
if let HTLCSource::OutboundRoute { path, .. } = htlc_source {
inflight_htlcs.process_path(path, self.get_our_node_id());
}
}
}
}
inflight_htlcs
}
#[cfg(any(test, feature = "_test_utils"))]
pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
let events = core::cell::RefCell::new(Vec::new());
let event_handler = |event: events::Event| Ok(events.borrow_mut().push(event));
self.process_pending_events(&event_handler);
events.into_inner()
}
#[cfg(feature = "_test_utils")]
pub fn push_pending_event(&self, event: events::Event) {
let mut events = self.pending_events.lock().unwrap();
events.push_back((event, None));
}
#[cfg(test)]
pub fn pop_pending_event(&self) -> Option<events::Event> {
let mut events = self.pending_events.lock().unwrap();
events.pop_front().map(|(e, _)| e)
}
#[cfg(test)]
pub fn has_pending_payments(&self) -> bool {
self.pending_outbound_payments.has_pending_payments()
}
#[cfg(test)]
pub fn clear_pending_payments(&self) {
self.pending_outbound_payments.clear_pending_payments()
}
fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey,
channel_funding_outpoint: OutPoint, channel_id: ChannelId,
mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
let logger = WithContext::from(
&self.logger, Some(counterparty_node_id), Some(channel_id), None
);
loop {
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state_lck = peer_state_mtx.lock().unwrap();
let peer_state = &mut *peer_state_lck;
if let Some(blocker) = completed_blocker.take() {
if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
.get_mut(&channel_id)
{
blockers.retain(|iter| iter != &blocker);
}
}
if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
channel_funding_outpoint, channel_id, counterparty_node_id) {
log_trace!(logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
&channel_id);
break;
}
if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(
channel_id) {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint);
if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor",
channel_id);
handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
peer_state_lck, peer_state, per_peer_state, chan);
if further_update_exists {
continue;
}
} else {
log_trace!(logger, "Unlocked monitor updating for channel {} without monitors to update",
channel_id);
}
}
}
} else {
log_debug!(logger,
"Got a release post-RAA monitor update for peer {} but the channel is gone",
log_pubkey!(counterparty_node_id));
}
break;
}
}
fn handle_post_event_actions(&self, actions: Vec<EventCompletionAction>) {
for action in actions {
match action {
EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint, channel_id, counterparty_node_id
} => {
self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, channel_id, None);
}
}
}
}
pub async fn process_pending_events_async<Future: core::future::Future<Output = Result<(), ReplayEvent>>, H: Fn(Event) -> Future>(
&self, handler: H
) {
let mut ev;
process_events_body!(self, ev, { handler(ev).await });
}
}
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
let events = RefCell::new(Vec::new());
PersistenceNotifierGuard::optionally_notify(self, || {
let mut result = NotifyOption::SkipPersistNoEvents;
if self.process_pending_monitor_events() {
result = NotifyOption::DoPersist;
}
if self.check_free_holding_cells() {
result = NotifyOption::DoPersist;
}
if self.maybe_generate_initial_closing_signed() {
result = NotifyOption::DoPersist;
}
let mut is_any_peer_connected = false;
let mut pending_events = Vec::new();
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if peer_state.pending_msg_events.len() > 0 {
pending_events.append(&mut peer_state.pending_msg_events);
}
if peer_state.is_connected {
is_any_peer_connected = true
}
}
if is_any_peer_connected {
let mut broadcast_msgs = self.pending_broadcast_messages.lock().unwrap();
pending_events.append(&mut broadcast_msgs);
}
if !pending_events.is_empty() {
events.replace(pending_events);
}
result
});
events.into_inner()
}
}
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> EventsProvider for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
let mut ev;
process_events_body!(self, ev, handler.handle_event(ev));
}
}
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> chain::Listen for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
{
let best_block = self.best_block.read().unwrap();
assert_eq!(best_block.block_hash, header.prev_blockhash,
"Blocks must be connected in chain-order - the connected header must build on the last connected header");
assert_eq!(best_block.height, height - 1,
"Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
}
self.transactions_confirmed(header, txdata, height);
self.best_block_updated(header, height);
}
fn block_disconnected(&self, header: &Header, height: u32) {
let _persistence_guard =
PersistenceNotifierGuard::optionally_notify_skipping_background_events(
self, || -> NotifyOption { NotifyOption::DoPersist });
let new_height = height - 1;
{
let mut best_block = self.best_block.write().unwrap();
assert_eq!(best_block.block_hash, header.block_hash(),
"Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
assert_eq!(best_block.height, height,
"Blocks must be disconnected in chain-order - the disconnected block must have the correct height");
*best_block = BestBlock::new(header.prev_blockhash, new_height)
}
self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)));
}
}
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> chain::Confirm for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) {
let block_hash = header.block_hash();
log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);
let _persistence_guard =
PersistenceNotifierGuard::optionally_notify_skipping_background_events(
self, || -> NotifyOption { NotifyOption::DoPersist });
self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None))
.map(|(a, b)| (a, Vec::new(), b)));
let last_best_block_height = self.best_block.read().unwrap().height;
if height < last_best_block_height {
let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)));
}
}
fn best_block_updated(&self, header: &Header, height: u32) {
let block_hash = header.block_hash();
log_trace!(self.logger, "New best block: {} at height {}", block_hash, height);
let _persistence_guard =
PersistenceNotifierGuard::optionally_notify_skipping_background_events(
self, || -> NotifyOption { NotifyOption::DoPersist });
*self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
let mut min_anchor_feerate = None;
let mut min_non_anchor_feerate = None;
if self.background_events_processed_since_startup.load(Ordering::Relaxed) {
let mut last_days_feerates = self.last_days_feerates.lock().unwrap();
if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS {
last_days_feerates.pop_front();
}
let anchor_feerate = self.fee_estimator
.bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedAnchorChannelRemoteFee);
let non_anchor_feerate = self.fee_estimator
.bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee);
last_days_feerates.push_back((anchor_feerate, non_anchor_feerate));
if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS {
min_anchor_feerate = last_days_feerates.iter().map(|(f, _)| f).min().copied();
min_non_anchor_feerate = last_days_feerates.iter().map(|(_, f)| f).min().copied();
}
}
self.do_chain_event(Some(height), |channel| {
let logger = WithChannelContext::from(&self.logger, &channel.context, None);
if channel.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
if let Some(feerate) = min_anchor_feerate {
channel.check_for_stale_feerate(&logger, feerate)?;
}
} else {
if let Some(feerate) = min_non_anchor_feerate {
channel.check_for_stale_feerate(&logger, feerate)?;
}
}
channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None))
});
macro_rules! max_time {
($timestamp: expr) => {
loop {
let old_serial = $timestamp.load(Ordering::Acquire);
if old_serial >= header.time as usize { break; }
if $timestamp.compare_exchange(old_serial, header.time as usize, Ordering::AcqRel, Ordering::Relaxed).is_ok() {
break;
}
}
}
}
max_time!(self.highest_seen_timestamp);
#[cfg(feature = "dnssec")] {
let timestamp = self.highest_seen_timestamp.load(Ordering::Relaxed) as u32;
self.hrn_resolver.new_best_block(height, timestamp);
}
}
fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option<BlockHash>)> {
let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
for (_cp_id, peer_state_mutex) in self.per_peer_state.read().unwrap().iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for chan in peer_state.channel_by_id.values().filter_map(|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }) {
let txid_opt = chan.context.get_funding_txo();
let height_opt = chan.context.get_funding_tx_confirmation_height();
let hash_opt = chan.context.get_funding_tx_confirmed_in();
if let (Some(funding_txo), Some(conf_height), Some(block_hash)) = (txid_opt, height_opt, hash_opt) {
res.push((funding_txo.txid, conf_height, Some(block_hash)));
}
}
}
res
}
fn transaction_unconfirmed(&self, txid: &Txid) {
let _persistence_guard =
PersistenceNotifierGuard::optionally_notify_skipping_background_events(
self, || -> NotifyOption { NotifyOption::DoPersist });
self.do_chain_event(None, |channel| {
if let Some(funding_txo) = channel.context.get_funding_txo() {
if funding_txo.txid == *txid {
channel.funding_transaction_unconfirmed(&&WithChannelContext::from(&self.logger, &channel.context, None)).map(|()| (None, Vec::new(), None))
} else { Ok((None, Vec::new(), None)) }
} else { Ok((None, Vec::new(), None)) }
});
}
}
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
fn do_chain_event<FN: Fn(&mut Channel<SP>) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>>
(&self, height_opt: Option<u32>, f: FN) {
let mut failed_channels = Vec::new();
let mut timed_out_htlcs = Vec::new();
{
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
peer_state.channel_by_id.retain(|_, phase| {
match phase {
ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) |
ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => true,
ChannelPhase::Funded(channel) => {
let res = f(channel);
if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
let failure_code = 0x1000|14;
let data = self.get_htlc_inbound_temp_fail_data(failure_code);
timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() }));
}
let logger = WithChannelContext::from(&self.logger, &channel.context, None);
if let Some(channel_ready) = channel_ready_opt {
send_channel_ready!(self, pending_msg_events, channel, channel_ready);
if channel.context.is_usable() {
log_trace!(logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", channel.context.channel_id());
if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
node_id: channel.context.get_counterparty_node_id(),
msg,
});
}
} else {
log_trace!(logger, "Sending channel_ready WITHOUT channel_update for {}", channel.context.channel_id());
}
}
{
let mut pending_events = self.pending_events.lock().unwrap();
emit_channel_ready_event!(pending_events, channel);
}
if let Some(height) = height_opt {
let funding_conf_height =
channel.context.get_funding_tx_confirmation_height().unwrap_or(height);
let rebroadcast_announcement = funding_conf_height < height + 1008
&& funding_conf_height % 6 == height % 6;
#[allow(unused_mut, unused_assignments)]
let mut should_announce = announcement_sigs.is_some() || rebroadcast_announcement;
#[cfg(test)]
{
should_announce = announcement_sigs.is_some();
}
if should_announce {
if let Some(announcement) = channel.get_signed_channel_announcement(
&self.node_signer, self.chain_hash, height, &self.default_configuration,
) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
msg: announcement,
update_msg: Some(self.get_channel_update_for_broadcast(channel).unwrap()),
});
}
}
}
if let Some(announcement_sigs) = announcement_sigs {
log_trace!(logger, "Sending announcement_signatures for channel {}", channel.context.channel_id());
pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
node_id: channel.context.get_counterparty_node_id(),
msg: announcement_sigs,
});
}
if channel.is_our_channel_ready() {
if let Some(real_scid) = channel.context.get_short_channel_id() {
let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
let scid_insert = short_to_chan_info.insert(real_scid, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.context.get_counterparty_node_id(), channel.context.channel_id()),
"SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
}
}
} else if let Err(reason) = res {
let reason_message = format!("{}", reason);
let mut close_res = channel.context.force_shutdown(true, reason);
locked_close_channel!(self, peer_state, &channel.context, close_res);
failed_channels.push(close_res);
if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: channel.context.get_counterparty_node_id(),
action: msgs::ErrorAction::DisconnectPeer {
msg: Some(msgs::ErrorMessage {
channel_id: channel.context.channel_id(),
data: reason_message,
})
},
});
return false;
}
true
}
}
});
}
}
if let Some(height) = height_opt {
self.claimable_payments.lock().unwrap().claimable_payments.retain(|payment_hash, payment| {
payment.htlcs.retain(|htlc| {
if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
htlc_msat_height_data.extend_from_slice(&height.to_be_bytes());
timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(),
HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data),
HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
false
} else { true }
});
!payment.htlcs.is_empty() });
let mut intercepted_htlcs = self.pending_intercepted_htlcs.lock().unwrap();
intercepted_htlcs.retain(|_, htlc| {
if height >= htlc.forward_info.outgoing_cltv_value - HTLC_FAIL_BACK_BUFFER {
let prev_hop_data = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: htlc.prev_short_channel_id,
user_channel_id: Some(htlc.prev_user_channel_id),
htlc_id: htlc.prev_htlc_id,
incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret,
phantom_shared_secret: None,
counterparty_node_id: htlc.prev_counterparty_node_id,
outpoint: htlc.prev_funding_outpoint,
channel_id: htlc.prev_channel_id,
blinded_failure: htlc.forward_info.routing.blinded_failure(),
});
let requested_forward_scid = match htlc.forward_info.routing {
PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
_ => unreachable!(),
};
timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash,
HTLCFailReason::from_failure_code(0x2000 | 2),
HTLCDestination::InvalidForward { requested_forward_scid }));
let logger = WithContext::from(
&self.logger, None, Some(htlc.prev_channel_id), Some(htlc.forward_info.payment_hash)
);
log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
false
} else { true }
});
}
for failure in failed_channels {
self.finish_close_channel(failure);
}
for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) {
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, destination);
}
}
pub fn get_event_or_persistence_needed_future(&self) -> Future {
self.event_persist_notifier.get_future()
}
pub fn get_and_clear_needs_persistence(&self) -> bool {
self.needs_persist_flag.swap(false, Ordering::AcqRel)
}
#[cfg(any(test, feature = "_test_utils"))]
pub fn get_event_or_persist_condvar_value(&self) -> bool {
self.event_persist_notifier.notify_pending()
}
pub fn current_best_block(&self) -> BestBlock {
self.best_block.read().unwrap().clone()
}
pub fn node_features(&self) -> NodeFeatures {
provided_node_features(&self.default_configuration)
}
#[cfg(any(feature = "_test_utils", test))]
pub fn bolt11_invoice_features(&self) -> Bolt11InvoiceFeatures {
provided_bolt11_invoice_features(&self.default_configuration)
}
fn bolt12_invoice_features(&self) -> Bolt12InvoiceFeatures {
provided_bolt12_invoice_features(&self.default_configuration)
}
pub fn channel_features(&self) -> ChannelFeatures {
provided_channel_features(&self.default_configuration)
}
pub fn channel_type_features(&self) -> ChannelTypeFeatures {
provided_channel_type_features(&self.default_configuration)
}
pub fn init_features(&self) -> InitFeatures {
provided_init_features(&self.default_configuration)
}
}
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
ChannelMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
fn handle_open_channel(&self, counterparty_node_id: PublicKey, msg: &msgs::OpenChannel) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_open_channel(&counterparty_node_id, OpenChannelMessageRef::V1(msg));
let persist = match &res {
Err(e) if e.closes_channel() => {
debug_assert!(false, "We shouldn't close a new channel");
NotifyOption::DoPersist
},
_ => NotifyOption::SkipPersistHandleEvents,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
fn handle_open_channel_v2(&self, counterparty_node_id: PublicKey, msg: &msgs::OpenChannelV2) {
#[cfg(dual_funding)]
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_open_channel(&counterparty_node_id, OpenChannelMessageRef::V2(msg));
let persist = match &res {
Err(e) if e.closes_channel() => {
debug_assert!(false, "We shouldn't close a new channel");
NotifyOption::DoPersist
},
_ => NotifyOption::SkipPersistHandleEvents,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
#[cfg(not(dual_funding))]
let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
"Dual-funded channels not supported".to_owned(),
msg.common_fields.temporary_channel_id.clone())), counterparty_node_id);
}
fn handle_accept_channel(&self, counterparty_node_id: PublicKey, msg: &msgs::AcceptChannel) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let _ = handle_error!(self, self.internal_accept_channel(&counterparty_node_id, msg), counterparty_node_id);
NotifyOption::SkipPersistHandleEvents
});
}
fn handle_accept_channel_v2(&self, counterparty_node_id: PublicKey, msg: &msgs::AcceptChannelV2) {
let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
"Dual-funded channels not supported".to_owned(),
msg.common_fields.temporary_channel_id.clone())), counterparty_node_id);
}
fn handle_funding_created(&self, counterparty_node_id: PublicKey, msg: &msgs::FundingCreated) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_funding_created(&counterparty_node_id, msg), counterparty_node_id);
}
fn handle_funding_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::FundingSigned) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_funding_signed(&counterparty_node_id, msg), counterparty_node_id);
}
fn handle_channel_ready(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelReady) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_channel_ready(&counterparty_node_id, msg);
let persist = match &res {
Err(e) if e.closes_channel() => NotifyOption::DoPersist,
_ => NotifyOption::SkipPersistHandleEvents,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
fn handle_stfu(&self, counterparty_node_id: PublicKey, msg: &msgs::Stfu) {
let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
"Quiescence not supported".to_owned(),
msg.channel_id.clone())), counterparty_node_id);
}
#[cfg(splicing)]
fn handle_splice_init(&self, counterparty_node_id: PublicKey, msg: &msgs::SpliceInit) {
let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
"Splicing not supported".to_owned(),
msg.channel_id.clone())), counterparty_node_id);
}
#[cfg(splicing)]
fn handle_splice_ack(&self, counterparty_node_id: PublicKey, msg: &msgs::SpliceAck) {
let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
"Splicing not supported (splice_ack)".to_owned(),
msg.channel_id.clone())), counterparty_node_id);
}
#[cfg(splicing)]
fn handle_splice_locked(&self, counterparty_node_id: PublicKey, msg: &msgs::SpliceLocked) {
let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
"Splicing not supported (splice_locked)".to_owned(),
msg.channel_id.clone())), counterparty_node_id);
}
fn handle_shutdown(&self, counterparty_node_id: PublicKey, msg: &msgs::Shutdown) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_shutdown(&counterparty_node_id, msg), counterparty_node_id);
}
fn handle_closing_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::ClosingSigned) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_closing_signed(&counterparty_node_id, msg), counterparty_node_id);
}
fn handle_update_add_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateAddHTLC) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_update_add_htlc(&counterparty_node_id, msg);
let persist = match &res {
Err(e) if e.closes_channel() => NotifyOption::DoPersist,
Err(_) => NotifyOption::SkipPersistHandleEvents,
Ok(()) => NotifyOption::SkipPersistNoEvents,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
fn handle_update_fulfill_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFulfillHTLC) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_update_fulfill_htlc(&counterparty_node_id, msg), counterparty_node_id);
}
fn handle_update_fail_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFailHTLC) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_update_fail_htlc(&counterparty_node_id, msg);
let persist = match &res {
Err(e) if e.closes_channel() => NotifyOption::DoPersist,
Err(_) => NotifyOption::SkipPersistHandleEvents,
Ok(()) => NotifyOption::SkipPersistNoEvents,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
fn handle_update_fail_malformed_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFailMalformedHTLC) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_update_fail_malformed_htlc(&counterparty_node_id, msg);
let persist = match &res {
Err(e) if e.closes_channel() => NotifyOption::DoPersist,
Err(_) => NotifyOption::SkipPersistHandleEvents,
Ok(()) => NotifyOption::SkipPersistNoEvents,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
fn handle_commitment_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::CommitmentSigned) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_commitment_signed(&counterparty_node_id, msg), counterparty_node_id);
}
fn handle_revoke_and_ack(&self, counterparty_node_id: PublicKey, msg: &msgs::RevokeAndACK) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_revoke_and_ack(&counterparty_node_id, msg), counterparty_node_id);
}
fn handle_update_fee(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFee) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_update_fee(&counterparty_node_id, msg);
let persist = match &res {
Err(e) if e.closes_channel() => NotifyOption::DoPersist,
Err(_) => NotifyOption::SkipPersistHandleEvents,
Ok(()) => NotifyOption::SkipPersistNoEvents,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
fn handle_announcement_signatures(&self, counterparty_node_id: PublicKey, msg: &msgs::AnnouncementSignatures) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_announcement_signatures(&counterparty_node_id, msg), counterparty_node_id);
}
fn handle_channel_update(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelUpdate) {
PersistenceNotifierGuard::optionally_notify(self, || {
if let Ok(persist) = handle_error!(self, self.internal_channel_update(&counterparty_node_id, msg), counterparty_node_id) {
persist
} else {
NotifyOption::DoPersist
}
});
}
fn handle_channel_reestablish(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelReestablish) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let res = self.internal_channel_reestablish(&counterparty_node_id, msg);
let persist = match &res {
Err(e) if e.closes_channel() => NotifyOption::DoPersist,
Err(_) => NotifyOption::SkipPersistHandleEvents,
Ok(persist) => *persist,
};
let _ = handle_error!(self, res, counterparty_node_id);
persist
});
}
fn peer_disconnected(&self, counterparty_node_id: PublicKey) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(
self, || NotifyOption::SkipPersistHandleEvents);
let mut failed_channels = Vec::new();
let mut per_peer_state = self.per_peer_state.write().unwrap();
let remove_peer = {
log_debug!(
WithContext::from(&self.logger, Some(counterparty_node_id), None, None),
"Marking channels with {} disconnected and generating channel_updates.",
log_pubkey!(counterparty_node_id)
);
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
peer_state.channel_by_id.retain(|_, phase| {
let context = match phase {
ChannelPhase::Funded(chan) => {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
if chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok() {
return true;
}
&mut chan.context
},
ChannelPhase::UnfundedOutboundV1(chan) if chan.is_resumable() => return true,
ChannelPhase::UnfundedOutboundV1(chan) => &mut chan.context,
ChannelPhase::UnfundedInboundV1(chan) => {
&mut chan.context
},
ChannelPhase::UnfundedOutboundV2(chan) => {
&mut chan.context
},
ChannelPhase::UnfundedInboundV2(chan) => {
&mut chan.context
},
};
let mut close_res = context.force_shutdown(false, ClosureReason::DisconnectedPeer);
locked_close_channel!(self, peer_state, &context, close_res);
failed_channels.push(close_res);
false
});
peer_state.inbound_channel_request_by_id.clear();
pending_msg_events.retain(|msg| {
match msg {
&events::MessageSendEvent::SendAcceptChannel { .. } => false,
&events::MessageSendEvent::SendOpenChannel { .. } => false,
&events::MessageSendEvent::SendFundingCreated { .. } => false,
&events::MessageSendEvent::SendFundingSigned { .. } => false,
&events::MessageSendEvent::SendAcceptChannelV2 { .. } => false,
&events::MessageSendEvent::SendOpenChannelV2 { .. } => false,
&events::MessageSendEvent::SendChannelReady { .. } => false,
&events::MessageSendEvent::SendAnnouncementSignatures { .. } => false,
&events::MessageSendEvent::SendStfu { .. } => false,
&events::MessageSendEvent::SendSpliceInit { .. } => false,
&events::MessageSendEvent::SendSpliceAck { .. } => false,
&events::MessageSendEvent::SendSpliceLocked { .. } => false,
&events::MessageSendEvent::SendTxAddInput { .. } => false,
&events::MessageSendEvent::SendTxAddOutput { .. } => false,
&events::MessageSendEvent::SendTxRemoveInput { .. } => false,
&events::MessageSendEvent::SendTxRemoveOutput { .. } => false,
&events::MessageSendEvent::SendTxComplete { .. } => false,
&events::MessageSendEvent::SendTxSignatures { .. } => false,
&events::MessageSendEvent::SendTxInitRbf { .. } => false,
&events::MessageSendEvent::SendTxAckRbf { .. } => false,
&events::MessageSendEvent::SendTxAbort { .. } => false,
&events::MessageSendEvent::UpdateHTLCs { .. } => false,
&events::MessageSendEvent::SendRevokeAndACK { .. } => false,
&events::MessageSendEvent::SendClosingSigned { .. } => false,
&events::MessageSendEvent::SendShutdown { .. } => false,
&events::MessageSendEvent::SendChannelReestablish { .. } => false,
&events::MessageSendEvent::HandleError { .. } => false,
&events::MessageSendEvent::SendChannelAnnouncement { .. } => false,
&events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
&events::MessageSendEvent::BroadcastChannelUpdate { .. } => {
debug_assert!(false, "This event shouldn't have been here");
false
},
&events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
&events::MessageSendEvent::SendChannelUpdate { .. } => false,
&events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
&events::MessageSendEvent::SendShortIdsQuery { .. } => false,
&events::MessageSendEvent::SendReplyChannelRange { .. } => false,
&events::MessageSendEvent::SendGossipTimestampFilter { .. } => false,
}
});
debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect");
peer_state.is_connected = false;
peer_state.ok_to_remove(true)
} else { debug_assert!(false, "Unconnected peer disconnected"); true }
};
if remove_peer {
per_peer_state.remove(&counterparty_node_id);
}
mem::drop(per_peer_state);
for failure in failed_channels.drain(..) {
self.finish_close_channel(failure);
}
}
fn peer_connected(&self, counterparty_node_id: PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> {
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None);
if !init_msg.features.supports_static_remote_key() {
log_debug!(logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
return Err(());
}
let mut res = Ok(());
PersistenceNotifierGuard::optionally_notify(self, || {
let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected);
let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
{
let mut peer_state_lock = self.per_peer_state.write().unwrap();
match peer_state_lock.entry(counterparty_node_id.clone()) {
hash_map::Entry::Vacant(e) => {
if inbound_peer_limited {
res = Err(());
return NotifyOption::SkipPersistNoEvents;
}
e.insert(Mutex::new(PeerState {
channel_by_id: new_hash_map(),
inbound_channel_request_by_id: new_hash_map(),
latest_features: init_msg.features.clone(),
pending_msg_events: Vec::new(),
in_flight_monitor_updates: BTreeMap::new(),
monitor_update_blocked_actions: BTreeMap::new(),
actions_blocking_raa_monitor_updates: BTreeMap::new(),
closed_channel_monitor_update_ids: BTreeMap::new(),
is_connected: true,
}));
},
hash_map::Entry::Occupied(e) => {
let mut peer_state = e.get().lock().unwrap();
peer_state.latest_features = init_msg.features.clone();
let best_block_height = self.best_block.read().unwrap().height;
if inbound_peer_limited &&
Self::unfunded_channel_count(&*peer_state, best_block_height) ==
peer_state.channel_by_id.len()
{
res = Err(());
return NotifyOption::SkipPersistNoEvents;
}
debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
peer_state.is_connected = true;
},
}
}
log_debug!(logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
for (_, phase) in peer_state.channel_by_id.iter_mut() {
match phase {
ChannelPhase::Funded(chan) => {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
node_id: chan.context.get_counterparty_node_id(),
msg: chan.get_channel_reestablish(&&logger),
});
}
ChannelPhase::UnfundedOutboundV1(chan) => {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
if let Some(msg) = chan.get_open_channel(self.chain_hash, &&logger) {
pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
node_id: chan.context.get_counterparty_node_id(),
msg,
});
}
}
ChannelPhase::UnfundedOutboundV2(chan) => {
pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 {
node_id: chan.context.get_counterparty_node_id(),
msg: chan.get_open_channel_v2(self.chain_hash),
});
},
ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedInboundV2(_) => {
debug_assert!(false);
}
}
}
}
return NotifyOption::SkipPersistHandleEvents;
});
res
}
fn handle_error(&self, counterparty_node_id: PublicKey, msg: &msgs::ErrorMessage) {
match &msg.data as &str {
"cannot co-op close channel w/ active htlcs"|
"link failed to shutdown" =>
{
if !msg.channel_id.is_zero() {
PersistenceNotifierGuard::optionally_notify(
self,
|| -> NotifyOption {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
if peer_state_mutex_opt.is_none() { return NotifyOption::SkipPersistNoEvents; }
let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) {
if let Some(msg) = chan.get_outbound_shutdown() {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
node_id: counterparty_node_id,
msg,
});
}
peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: counterparty_node_id,
action: msgs::ErrorAction::SendWarningMessage {
msg: msgs::WarningMessage {
channel_id: msg.channel_id,
data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned()
},
log_level: Level::Trace,
}
});
return NotifyOption::SkipPersistHandleEvents;
}
NotifyOption::SkipPersistNoEvents
}
);
}
return;
}
_ => {}
}
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
if msg.channel_id.is_zero() {
let channel_ids: Vec<ChannelId> = {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
if peer_state_mutex_opt.is_none() { return; }
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
peer_state.inbound_channel_request_by_id.clear();
peer_state.channel_by_id.keys().cloned().collect()
};
for channel_id in channel_ids {
let _ = self.force_close_channel_with_peer(&channel_id, &counterparty_node_id, Some(&msg.data), true);
}
} else {
{
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
if peer_state_mutex_opt.is_none() { return; }
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.get_mut(&msg.channel_id) {
Some(ChannelPhase::UnfundedOutboundV1(ref mut chan)) => {
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator, &&logger) {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
node_id: counterparty_node_id,
msg,
});
return;
}
},
Some(ChannelPhase::UnfundedOutboundV2(ref mut chan)) => {
if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 {
node_id: counterparty_node_id,
msg,
});
return;
}
},
None | Some(ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedInboundV2(_) | ChannelPhase::Funded(_)) => (),
}
}
let _ = self.force_close_channel_with_peer(&msg.channel_id, &counterparty_node_id, Some(&msg.data), true);
}
}
fn provided_node_features(&self) -> NodeFeatures {
provided_node_features(&self.default_configuration)
}
fn provided_init_features(&self, _their_init_features: PublicKey) -> InitFeatures {
provided_init_features(&self.default_configuration)
}
fn get_chain_hashes(&self) -> Option<Vec<ChainHash>> {
Some(vec![self.chain_hash])
}
fn handle_tx_add_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddInput) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let _ = handle_error!(self, self.internal_tx_add_input(counterparty_node_id, msg), counterparty_node_id);
NotifyOption::SkipPersistHandleEvents
});
}
fn handle_tx_add_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddOutput) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let _ = handle_error!(self, self.internal_tx_add_output(counterparty_node_id, msg), counterparty_node_id);
NotifyOption::SkipPersistHandleEvents
});
}
fn handle_tx_remove_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveInput) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let _ = handle_error!(self, self.internal_tx_remove_input(counterparty_node_id, msg), counterparty_node_id);
NotifyOption::SkipPersistHandleEvents
});
}
fn handle_tx_remove_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveOutput) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let _ = handle_error!(self, self.internal_tx_remove_output(counterparty_node_id, msg), counterparty_node_id);
NotifyOption::SkipPersistHandleEvents
});
}
fn handle_tx_complete(&self, counterparty_node_id: PublicKey, msg: &msgs::TxComplete) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let _ = handle_error!(self, self.internal_tx_complete(counterparty_node_id, msg), counterparty_node_id);
NotifyOption::SkipPersistHandleEvents
});
}
fn handle_tx_signatures(&self, counterparty_node_id: PublicKey, msg: &msgs::TxSignatures) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let _ = handle_error!(self, self.internal_tx_signatures(&counterparty_node_id, msg), counterparty_node_id);
}
fn handle_tx_init_rbf(&self, counterparty_node_id: PublicKey, msg: &msgs::TxInitRbf) {
let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
"Dual-funded channels not supported".to_owned(),
msg.channel_id.clone())), counterparty_node_id);
}
fn handle_tx_ack_rbf(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAckRbf) {
let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
"Dual-funded channels not supported".to_owned(),
msg.channel_id.clone())), counterparty_node_id);
}
fn handle_tx_abort(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAbort) {
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
let _ = handle_error!(self, self.internal_tx_abort(&counterparty_node_id, msg), counterparty_node_id);
NotifyOption::SkipPersistHandleEvents
});
}
fn message_received(&self) {
for (payment_id, retryable_invoice_request) in self
.pending_outbound_payments
.release_invoice_requests_awaiting_invoice()
{
let RetryableInvoiceRequest { invoice_request, nonce } = retryable_invoice_request;
let hmac = payment_id.hmac_for_offer_payment(nonce, &self.inbound_payment_key);
let context = MessageContext::Offers(OffersContext::OutboundPayment {
payment_id,
nonce,
hmac: Some(hmac)
});
match self.create_blinded_paths(context) {
Ok(reply_paths) => match self.enqueue_invoice_request(invoice_request, reply_paths) {
Ok(_) => {}
Err(_) => {
log_warn!(self.logger,
"Retry failed for an invoice request with payment_id: {}",
payment_id
);
}
},
Err(_) => {
log_warn!(self.logger,
"Retry failed for an invoice request with payment_id: {}. \
Reason: router could not find a blinded path to include as the reply path",
payment_id
);
}
}
}
}
}
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
OffersMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
fn handle_message(
&self, message: OffersMessage, context: Option<OffersContext>, responder: Option<Responder>,
) -> Option<(OffersMessage, ResponseInstruction)> {
let secp_ctx = &self.secp_ctx;
let expanded_key = &self.inbound_payment_key;
macro_rules! handle_pay_invoice_res {
($res: expr, $invoice: expr, $logger: expr) => {{
let error = match $res {
Err(Bolt12PaymentError::UnknownRequiredFeatures) => {
log_trace!(
$logger, "Invoice requires unknown features: {:?}",
$invoice.invoice_features()
);
InvoiceError::from(Bolt12SemanticError::UnknownRequiredFeatures)
},
Err(Bolt12PaymentError::SendingFailed(e)) => {
log_trace!($logger, "Failed paying invoice: {:?}", e);
InvoiceError::from_string(format!("{:?}", e))
},
#[cfg(async_payments)]
Err(Bolt12PaymentError::BlindedPathCreationFailed) => {
let err_msg = "Failed to create a blinded path back to ourselves";
log_trace!($logger, "{}", err_msg);
InvoiceError::from_string(err_msg.to_string())
},
Err(Bolt12PaymentError::UnexpectedInvoice)
| Err(Bolt12PaymentError::DuplicateInvoice)
| Ok(()) => return None,
};
match responder {
Some(responder) => return Some((OffersMessage::InvoiceError(error), responder.respond())),
None => {
log_trace!($logger, "No reply path to send error: {:?}", error);
return None
},
}
}}
}
match message {
OffersMessage::InvoiceRequest(invoice_request) => {
let responder = match responder {
Some(responder) => responder,
None => return None,
};
let nonce = match context {
None if invoice_request.metadata().is_some() => None,
Some(OffersContext::InvoiceRequest { nonce }) => Some(nonce),
_ => return None,
};
let invoice_request = match nonce {
Some(nonce) => match invoice_request.verify_using_recipient_data(
nonce, expanded_key, secp_ctx,
) {
Ok(invoice_request) => invoice_request,
Err(()) => return None,
},
None => match invoice_request.verify_using_metadata(expanded_key, secp_ctx) {
Ok(invoice_request) => invoice_request,
Err(()) => return None,
},
};
let amount_msats = match InvoiceBuilder::<DerivedSigningPubkey>::amount_msats(
&invoice_request.inner
) {
Ok(amount_msats) => amount_msats,
Err(error) => return Some((OffersMessage::InvoiceError(error.into()), responder.respond())),
};
let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
let (payment_hash, payment_secret) = match self.create_inbound_payment(
Some(amount_msats), relative_expiry, None
) {
Ok((payment_hash, payment_secret)) => (payment_hash, payment_secret),
Err(()) => {
let error = Bolt12SemanticError::InvalidAmount;
return Some((OffersMessage::InvoiceError(error.into()), responder.respond()));
},
};
let payment_context = PaymentContext::Bolt12Offer(Bolt12OfferContext {
offer_id: invoice_request.offer_id,
invoice_request: invoice_request.fields(),
});
let payment_paths = match self.create_blinded_payment_paths(
amount_msats, payment_secret, payment_context
) {
Ok(payment_paths) => payment_paths,
Err(()) => {
let error = Bolt12SemanticError::MissingPaths;
return Some((OffersMessage::InvoiceError(error.into()), responder.respond()));
},
};
#[cfg(not(feature = "std"))]
let created_at = Duration::from_secs(
self.highest_seen_timestamp.load(Ordering::Acquire) as u64
);
let response = if invoice_request.keys.is_some() {
#[cfg(feature = "std")]
let builder = invoice_request.respond_using_derived_keys(
payment_paths, payment_hash
);
#[cfg(not(feature = "std"))]
let builder = invoice_request.respond_using_derived_keys_no_std(
payment_paths, payment_hash, created_at
);
builder
.map(InvoiceBuilder::<DerivedSigningPubkey>::from)
.and_then(|builder| builder.allow_mpp().build_and_sign(secp_ctx))
.map_err(InvoiceError::from)
} else {
#[cfg(feature = "std")]
let builder = invoice_request.respond_with(payment_paths, payment_hash);
#[cfg(not(feature = "std"))]
let builder = invoice_request.respond_with_no_std(
payment_paths, payment_hash, created_at
);
builder
.map(InvoiceBuilder::<ExplicitSigningPubkey>::from)
.and_then(|builder| builder.allow_mpp().build())
.map_err(InvoiceError::from)
.and_then(|invoice| {
#[cfg(c_bindings)]
let mut invoice = invoice;
invoice
.sign(|invoice: &UnsignedBolt12Invoice|
self.node_signer.sign_bolt12_invoice(invoice)
)
.map_err(InvoiceError::from)
})
};
match response {
Ok(invoice) => {
let nonce = Nonce::from_entropy_source(&*self.entropy_source);
let hmac = payment_hash.hmac_for_offer_payment(nonce, expanded_key);
let context = MessageContext::Offers(OffersContext::InboundPayment { payment_hash, nonce, hmac });
Some((OffersMessage::Invoice(invoice), responder.respond_with_reply_path(context)))
},
Err(error) => Some((OffersMessage::InvoiceError(error.into()), responder.respond())),
}
},
OffersMessage::Invoice(invoice) => {
let payment_id = match self.verify_bolt12_invoice(&invoice, context.as_ref()) {
Ok(payment_id) => payment_id,
Err(()) => return None,
};
let logger = WithContext::from(
&self.logger, None, None, Some(invoice.payment_hash()),
);
if self.default_configuration.manually_handle_bolt12_invoices {
let event = Event::InvoiceReceived {
payment_id, invoice, context, responder,
};
self.pending_events.lock().unwrap().push_back((event, None));
return None;
}
let res = self.send_payment_for_verified_bolt12_invoice(&invoice, payment_id);
handle_pay_invoice_res!(res, invoice, logger);
},
#[cfg(async_payments)]
OffersMessage::StaticInvoice(invoice) => {
let payment_id = match context {
Some(OffersContext::OutboundPayment { payment_id, nonce, hmac: Some(hmac) }) => {
if payment_id.verify_for_offer_payment(hmac, nonce, expanded_key).is_err() {
return None
}
payment_id
},
_ => return None
};
let res = self.initiate_async_payment(&invoice, payment_id);
handle_pay_invoice_res!(res, invoice, self.logger);
},
OffersMessage::InvoiceError(invoice_error) => {
let payment_hash = match context {
Some(OffersContext::InboundPayment { payment_hash, nonce, hmac }) => {
match payment_hash.verify_for_offer_payment(hmac, nonce, expanded_key) {
Ok(_) => Some(payment_hash),
Err(_) => None,
}
},
_ => None,
};
let logger = WithContext::from(&self.logger, None, None, payment_hash);
log_trace!(logger, "Received invoice_error: {}", invoice_error);
match context {
Some(OffersContext::OutboundPayment { payment_id, nonce, hmac: Some(hmac) }) => {
if let Ok(()) = payment_id.verify_for_offer_payment(hmac, nonce, expanded_key) {
self.abandon_payment_with_reason(
payment_id, PaymentFailureReason::InvoiceRequestRejected,
);
}
},
_ => {},
}
None
},
}
}
fn release_pending_messages(&self) -> Vec<(OffersMessage, MessageSendInstructions)> {
core::mem::take(&mut self.pending_offers_messages.lock().unwrap())
}
}
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
AsyncPaymentsMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
fn handle_held_htlc_available(
&self, _message: HeldHtlcAvailable, _responder: Option<Responder>
) -> Option<(ReleaseHeldHtlc, ResponseInstruction)> {
None
}
fn handle_release_held_htlc(&self, _message: ReleaseHeldHtlc, _context: AsyncPaymentsContext) {
#[cfg(async_payments)] {
let AsyncPaymentsContext::OutboundPayment { payment_id, hmac, nonce } = _context;
if payment_id.verify_for_async_payment(hmac, nonce, &self.inbound_payment_key).is_err() { return }
if let Err(e) = self.send_payment_for_static_invoice(payment_id) {
log_trace!(
self.logger, "Failed to release held HTLC with payment id {}: {:?}", payment_id, e
);
}
}
}
fn release_pending_messages(&self) -> Vec<(AsyncPaymentsMessage, MessageSendInstructions)> {
core::mem::take(&mut self.pending_async_payments_messages.lock().unwrap())
}
}
#[cfg(feature = "dnssec")]
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
DNSResolverMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
fn handle_dnssec_query(
&self, _message: DNSSECQuery, _responder: Option<Responder>,
) -> Option<(DNSResolverMessage, ResponseInstruction)> {
None
}
fn handle_dnssec_proof(&self, message: DNSSECProof, context: DNSResolverContext) {
let offer_opt = self.hrn_resolver.handle_dnssec_proof_for_offer(message, context);
#[cfg_attr(not(feature = "_test_utils"), allow(unused_mut))]
if let Some((completed_requests, mut offer)) = offer_opt {
for (name, payment_id) in completed_requests {
#[cfg(feature = "_test_utils")]
if let Some(replacement_offer) = self.testing_dnssec_proof_offer_resolution_override.lock().unwrap().remove(&name) {
offer = replacement_offer;
}
if let Ok(amt_msats) = self.pending_outbound_payments.amt_msats_for_payment_awaiting_offer(payment_id) {
let offer_pay_res =
self.pay_for_offer_intern(&offer, None, Some(amt_msats), None, payment_id, Some(name),
|invoice_request, nonce| {
let retryable_invoice_request = RetryableInvoiceRequest {
invoice_request: invoice_request.clone(),
nonce,
};
self.pending_outbound_payments
.received_offer(payment_id, Some(retryable_invoice_request))
.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
});
if offer_pay_res.is_err() {
self.pending_outbound_payments.abandon_payment(
payment_id, PaymentFailureReason::RouteNotFound, &self.pending_events,
);
}
}
}
}
}
fn release_pending_messages(&self) -> Vec<(DNSResolverMessage, MessageSendInstructions)> {
core::mem::take(&mut self.pending_dns_onion_messages.lock().unwrap())
}
}
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
NodeIdLookUp for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
fn next_node_id(&self, short_channel_id: u64) -> Option<PublicKey> {
self.short_to_chan_info.read().unwrap().get(&short_channel_id).map(|(pubkey, _)| *pubkey)
}
}
pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures {
let mut node_features = provided_init_features(config).to_context();
node_features.set_keysend_optional();
node_features
}
#[cfg(any(feature = "_test_utils", test))]
pub(crate) fn provided_bolt11_invoice_features(config: &UserConfig) -> Bolt11InvoiceFeatures {
provided_init_features(config).to_context()
}
pub(crate) fn provided_bolt12_invoice_features(config: &UserConfig) -> Bolt12InvoiceFeatures {
provided_init_features(config).to_context()
}
pub(crate) fn provided_channel_features(config: &UserConfig) -> ChannelFeatures {
provided_init_features(config).to_context()
}
pub(crate) fn provided_channel_type_features(config: &UserConfig) -> ChannelTypeFeatures {
ChannelTypeFeatures::from_init(&provided_init_features(config))
}
pub fn provided_init_features(config: &UserConfig) -> InitFeatures {
let mut features = InitFeatures::empty();
features.set_data_loss_protect_required();
features.set_upfront_shutdown_script_optional();
features.set_variable_length_onion_required();
features.set_static_remote_key_required();
features.set_payment_secret_required();
features.set_basic_mpp_optional();
features.set_wumbo_optional();
features.set_shutdown_any_segwit_optional();
features.set_channel_type_optional();
features.set_scid_privacy_optional();
features.set_zero_conf_optional();
features.set_route_blinding_optional();
if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx {
features.set_anchors_zero_fee_htlc_tx_optional();
}
#[cfg(dual_funding)]
features.set_dual_fund_optional();
features
}
const SERIALIZATION_VERSION: u8 = 1;
const MIN_SERIALIZATION_VERSION: u8 = 1;
impl_writeable_tlv_based!(PhantomRouteHints, {
(2, channels, required_vec),
(4, phantom_scid, required),
(6, real_node_pubkey, required),
});
impl_writeable_tlv_based!(BlindedForward, {
(0, inbound_blinding_point, required),
(1, failure, (default_value, BlindedFailure::FromIntroductionNode)),
(3, next_blinding_override, option),
});
impl_writeable_tlv_based_enum!(PendingHTLCRouting,
(0, Forward) => {
(0, onion_packet, required),
(1, blinded, option),
(2, short_channel_id, required),
},
(1, Receive) => {
(0, payment_data, required),
(1, phantom_shared_secret, option),
(2, incoming_cltv_expiry, required),
(3, payment_metadata, option),
(5, custom_tlvs, optional_vec),
(7, requires_blinded_error, (default_value, false)),
(9, payment_context, option),
},
(2, ReceiveKeysend) => {
(0, payment_preimage, required),
(1, requires_blinded_error, (default_value, false)),
(2, incoming_cltv_expiry, required),
(3, payment_metadata, option),
(4, payment_data, option), (5, custom_tlvs, optional_vec),
(7, has_recipient_created_payment_secret, (default_value, false)),
},
);
impl_writeable_tlv_based!(PendingHTLCInfo, {
(0, routing, required),
(2, incoming_shared_secret, required),
(4, payment_hash, required),
(6, outgoing_amt_msat, required),
(8, outgoing_cltv_value, required),
(9, incoming_amt_msat, option),
(10, skimmed_fee_msat, option),
});
impl Writeable for HTLCFailureMsg {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
match self {
HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id, htlc_id, reason }) => {
0u8.write(writer)?;
channel_id.write(writer)?;
htlc_id.write(writer)?;
reason.write(writer)?;
},
HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
channel_id, htlc_id, sha256_of_onion, failure_code
}) => {
1u8.write(writer)?;
channel_id.write(writer)?;
htlc_id.write(writer)?;
sha256_of_onion.write(writer)?;
failure_code.write(writer)?;
},
}
Ok(())
}
}
impl Readable for HTLCFailureMsg {
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
let id: u8 = Readable::read(reader)?;
match id {
0 => {
Ok(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
channel_id: Readable::read(reader)?,
htlc_id: Readable::read(reader)?,
reason: Readable::read(reader)?,
}))
},
1 => {
Ok(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
channel_id: Readable::read(reader)?,
htlc_id: Readable::read(reader)?,
sha256_of_onion: Readable::read(reader)?,
failure_code: Readable::read(reader)?,
}))
},
2 => {
let length: BigSize = Readable::read(reader)?;
let mut s = FixedLengthReader::new(reader, length.0);
let res = Readable::read(&mut s)?;
s.eat_remaining()?; Ok(HTLCFailureMsg::Relay(res))
},
3 => {
let length: BigSize = Readable::read(reader)?;
let mut s = FixedLengthReader::new(reader, length.0);
let res = Readable::read(&mut s)?;
s.eat_remaining()?; Ok(HTLCFailureMsg::Malformed(res))
},
_ => Err(DecodeError::UnknownRequiredFeature),
}
}
}
impl_writeable_tlv_based_enum_legacy!(PendingHTLCStatus, ;
(0, Forward),
(1, Fail),
);
impl_writeable_tlv_based_enum!(BlindedFailure,
(0, FromIntroductionNode) => {},
(2, FromBlindedNode) => {},
);
impl_writeable_tlv_based!(HTLCPreviousHopData, {
(0, short_channel_id, required),
(1, phantom_shared_secret, option),
(2, outpoint, required),
(3, blinded_failure, option),
(4, htlc_id, required),
(6, incoming_packet_shared_secret, required),
(7, user_channel_id, option),
(9, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(outpoint.0.unwrap()))),
(11, counterparty_node_id, option),
});
impl Writeable for ClaimableHTLC {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
let (payment_data, keysend_preimage) = match &self.onion_payload {
OnionPayload::Invoice { _legacy_hop_data } => {
(_legacy_hop_data.as_ref(), None)
},
OnionPayload::Spontaneous(preimage) => (None, Some(preimage)),
};
write_tlv_fields!(writer, {
(0, self.prev_hop, required),
(1, self.total_msat, required),
(2, self.value, required),
(3, self.sender_intended_value, required),
(4, payment_data, option),
(5, self.total_value_received, option),
(6, self.cltv_expiry, required),
(8, keysend_preimage, option),
(10, self.counterparty_skimmed_fee_msat, option),
});
Ok(())
}
}
impl Readable for ClaimableHTLC {
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
_init_and_read_len_prefixed_tlv_fields!(reader, {
(0, prev_hop, required),
(1, total_msat, option),
(2, value_ser, required),
(3, sender_intended_value, option),
(4, payment_data_opt, option),
(5, total_value_received, option),
(6, cltv_expiry, required),
(8, keysend_preimage, option),
(10, counterparty_skimmed_fee_msat, option),
});
let payment_data: Option<msgs::FinalOnionHopData> = payment_data_opt;
let value = value_ser.0.unwrap();
let onion_payload = match keysend_preimage {
Some(p) => {
if payment_data.is_some() {
return Err(DecodeError::InvalidValue)
}
if total_msat.is_none() {
total_msat = Some(value);
}
OnionPayload::Spontaneous(p)
},
None => {
if total_msat.is_none() {
if payment_data.is_none() {
return Err(DecodeError::InvalidValue)
}
total_msat = Some(payment_data.as_ref().unwrap().total_msat);
}
OnionPayload::Invoice { _legacy_hop_data: payment_data }
},
};
Ok(Self {
prev_hop: prev_hop.0.unwrap(),
timer_ticks: 0,
value,
sender_intended_value: sender_intended_value.unwrap_or(value),
total_value_received,
total_msat: total_msat.unwrap(),
onion_payload,
cltv_expiry: cltv_expiry.0.unwrap(),
counterparty_skimmed_fee_msat,
})
}
}
impl Readable for HTLCSource {
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
let id: u8 = Readable::read(reader)?;
match id {
0 => {
let mut session_priv: crate::util::ser::RequiredWrapper<SecretKey> = crate::util::ser::RequiredWrapper(None);
let mut first_hop_htlc_msat: u64 = 0;
let mut path_hops = Vec::new();
let mut payment_id = None;
let mut payment_params: Option<PaymentParameters> = None;
let mut blinded_tail: Option<BlindedTail> = None;
read_tlv_fields!(reader, {
(0, session_priv, required),
(1, payment_id, option),
(2, first_hop_htlc_msat, required),
(4, path_hops, required_vec),
(5, payment_params, (option: ReadableArgs, 0)),
(6, blinded_tail, option),
});
if payment_id.is_none() {
payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref()));
}
let path = Path { hops: path_hops, blinded_tail };
if path.hops.len() == 0 {
return Err(DecodeError::InvalidValue);
}
if let Some(params) = payment_params.as_mut() {
if let Payee::Clear { ref mut final_cltv_expiry_delta, .. } = params.payee {
if final_cltv_expiry_delta == &0 {
*final_cltv_expiry_delta = path.final_cltv_expiry_delta().ok_or(DecodeError::InvalidValue)?;
}
}
}
Ok(HTLCSource::OutboundRoute {
session_priv: session_priv.0.unwrap(),
first_hop_htlc_msat,
path,
payment_id: payment_id.unwrap(),
})
}
1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
_ => Err(DecodeError::UnknownRequiredFeature),
}
}
}
impl Writeable for HTLCSource {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), crate::io::Error> {
match self {
HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id } => {
0u8.write(writer)?;
let payment_id_opt = Some(payment_id);
write_tlv_fields!(writer, {
(0, session_priv, required),
(1, payment_id_opt, option),
(2, first_hop_htlc_msat, required),
(4, path.hops, required_vec),
(5, None::<PaymentParameters>, option), (6, path.blinded_tail, option),
});
}
HTLCSource::PreviousHopData(ref field) => {
1u8.write(writer)?;
field.write(writer)?;
}
}
Ok(())
}
}
impl_writeable_tlv_based!(PendingAddHTLCInfo, {
(0, forward_info, required),
(1, prev_user_channel_id, (default_value, 0)),
(2, prev_short_channel_id, required),
(4, prev_htlc_id, required),
(6, prev_funding_outpoint, required),
(7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))),
(9, prev_counterparty_node_id, option),
});
impl Writeable for HTLCForwardInfo {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
const FAIL_HTLC_VARIANT_ID: u8 = 1;
match self {
Self::AddHTLC(info) => {
0u8.write(w)?;
info.write(w)?;
},
Self::FailHTLC { htlc_id, err_packet } => {
FAIL_HTLC_VARIANT_ID.write(w)?;
write_tlv_fields!(w, {
(0, htlc_id, required),
(2, err_packet, required),
});
},
Self::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
FAIL_HTLC_VARIANT_ID.write(w)?;
let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
write_tlv_fields!(w, {
(0, htlc_id, required),
(1, failure_code, required),
(2, dummy_err_packet, required),
(3, sha256_of_onion, required),
});
},
}
Ok(())
}
}
impl Readable for HTLCForwardInfo {
fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
let id: u8 = Readable::read(r)?;
Ok(match id {
0 => Self::AddHTLC(Readable::read(r)?),
1 => {
_init_and_read_len_prefixed_tlv_fields!(r, {
(0, htlc_id, required),
(1, malformed_htlc_failure_code, option),
(2, err_packet, required),
(3, sha256_of_onion, option),
});
if let Some(failure_code) = malformed_htlc_failure_code {
Self::FailMalformedHTLC {
htlc_id: _init_tlv_based_struct_field!(htlc_id, required),
failure_code,
sha256_of_onion: sha256_of_onion.ok_or(DecodeError::InvalidValue)?,
}
} else {
Self::FailHTLC {
htlc_id: _init_tlv_based_struct_field!(htlc_id, required),
err_packet: _init_tlv_based_struct_field!(err_packet, required),
}
}
},
_ => return Err(DecodeError::InvalidValue),
})
}
}
impl_writeable_tlv_based!(PendingInboundPayment, {
(0, payment_secret, required),
(2, expiry_time, required),
(4, user_payment_id, required),
(6, payment_preimage, required),
(8, min_value_msat, required),
});
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> Writeable for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
let _consistency_lock = self.total_consistency_lock.write().unwrap();
write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
self.chain_hash.write(writer)?;
{
let best_block = self.best_block.read().unwrap();
best_block.height.write(writer)?;
best_block.block_hash.write(writer)?;
}
let per_peer_state = self.per_peer_state.write().unwrap();
let mut serializable_peer_count: u64 = 0;
{
let mut number_of_funded_channels = 0;
for (_, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if !peer_state.ok_to_remove(false) {
serializable_peer_count += 1;
}
number_of_funded_channels += peer_state.channel_by_id.iter().filter(
|(_, phase)| if let ChannelPhase::Funded(chan) = phase { chan.context.is_funding_broadcast() } else { false }
).count();
}
(number_of_funded_channels as u64).write(writer)?;
for (_, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for channel in peer_state.channel_by_id.iter().filter_map(
|(_, phase)| if let ChannelPhase::Funded(channel) = phase {
if channel.context.is_funding_broadcast() { Some(channel) } else { None }
} else { None }
) {
channel.write(writer)?;
}
}
}
{
let forward_htlcs = self.forward_htlcs.lock().unwrap();
(forward_htlcs.len() as u64).write(writer)?;
for (short_channel_id, pending_forwards) in forward_htlcs.iter() {
short_channel_id.write(writer)?;
(pending_forwards.len() as u64).write(writer)?;
for forward in pending_forwards {
forward.write(writer)?;
}
}
}
let mut decode_update_add_htlcs_opt = None;
let decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
if !decode_update_add_htlcs.is_empty() {
decode_update_add_htlcs_opt = Some(decode_update_add_htlcs);
}
let claimable_payments = self.claimable_payments.lock().unwrap();
let pending_outbound_payments = self.pending_outbound_payments.pending_outbound_payments.lock().unwrap();
let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
let mut htlc_onion_fields: Vec<&_> = Vec::new();
(claimable_payments.claimable_payments.len() as u64).write(writer)?;
for (payment_hash, payment) in claimable_payments.claimable_payments.iter() {
payment_hash.write(writer)?;
(payment.htlcs.len() as u64).write(writer)?;
for htlc in payment.htlcs.iter() {
htlc.write(writer)?;
}
htlc_purposes.push(&payment.purpose);
htlc_onion_fields.push(&payment.onion_fields);
}
let mut monitor_update_blocked_actions_per_peer = None;
let mut peer_states = Vec::new();
for (_, peer_state_mutex) in per_peer_state.iter() {
peer_states.push(peer_state_mutex.unsafe_well_ordered_double_lock_self());
}
(serializable_peer_count).write(writer)?;
for ((peer_pubkey, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
if !peer_state.ok_to_remove(false) {
peer_pubkey.write(writer)?;
peer_state.latest_features.write(writer)?;
if !peer_state.monitor_update_blocked_actions.is_empty() {
monitor_update_blocked_actions_per_peer
.get_or_insert_with(Vec::new)
.push((*peer_pubkey, &peer_state.monitor_update_blocked_actions));
}
}
}
let events = self.pending_events.lock().unwrap();
let events_not_backwards_compatible = events.iter().any(|(_, action)| action.is_some());
if events_not_backwards_compatible {
0u64.write(writer)?;
} else {
(events.len() as u64).write(writer)?;
for (event, _) in events.iter() {
event.write(writer)?;
}
}
0u64.write(writer)?;
(self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?;
(self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?;
(0 as u64).write(writer)?;
let mut num_pending_outbounds_compat: u64 = 0;
for (_, outbound) in pending_outbound_payments.iter() {
if !outbound.is_fulfilled() && !outbound.abandoned() {
num_pending_outbounds_compat += outbound.remaining_parts() as u64;
}
}
num_pending_outbounds_compat.write(writer)?;
for (_, outbound) in pending_outbound_payments.iter() {
match outbound {
PendingOutboundPayment::Legacy { session_privs } |
PendingOutboundPayment::Retryable { session_privs, .. } => {
for session_priv in session_privs.iter() {
session_priv.write(writer)?;
}
}
PendingOutboundPayment::AwaitingInvoice { .. } => {},
PendingOutboundPayment::AwaitingOffer { .. } => {},
PendingOutboundPayment::InvoiceReceived { .. } => {},
PendingOutboundPayment::StaticInvoiceReceived { .. } => {},
PendingOutboundPayment::Fulfilled { .. } => {},
PendingOutboundPayment::Abandoned { .. } => {},
}
}
let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = new_hash_map();
for (id, outbound) in pending_outbound_payments.iter() {
match outbound {
PendingOutboundPayment::Legacy { session_privs } |
PendingOutboundPayment::Retryable { session_privs, .. } => {
pending_outbound_payments_no_retry.insert(*id, session_privs.clone());
},
_ => {},
}
}
let mut pending_intercepted_htlcs = None;
let our_pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
if our_pending_intercepts.len() != 0 {
pending_intercepted_htlcs = Some(our_pending_intercepts);
}
let mut pending_claiming_payments = Some(&claimable_payments.pending_claiming_payments);
if pending_claiming_payments.as_ref().unwrap().is_empty() {
pending_claiming_payments = None;
}
let mut in_flight_monitor_updates: Option<HashMap<(&PublicKey, &OutPoint), &Vec<ChannelMonitorUpdate>>> = None;
for ((counterparty_id, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
for (funding_outpoint, updates) in peer_state.in_flight_monitor_updates.iter() {
if !updates.is_empty() {
if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(new_hash_map()); }
in_flight_monitor_updates.as_mut().unwrap().insert((counterparty_id, funding_outpoint), updates);
}
}
}
write_tlv_fields!(writer, {
(1, pending_outbound_payments_no_retry, required),
(2, pending_intercepted_htlcs, option),
(3, pending_outbound_payments, required),
(4, pending_claiming_payments, option),
(5, self.our_network_pubkey, required),
(6, monitor_update_blocked_actions_per_peer, option),
(7, self.fake_scid_rand_bytes, required),
(8, if events_not_backwards_compatible { Some(&*events) } else { None }, option),
(9, htlc_purposes, required_vec),
(10, in_flight_monitor_updates, option),
(11, self.probing_cookie_secret, required),
(13, htlc_onion_fields, optional_vec),
(14, decode_update_add_htlcs_opt, option),
(15, self.inbound_payment_id_secret, required),
});
Ok(())
}
}
impl Writeable for VecDeque<(Event, Option<EventCompletionAction>)> {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
(self.len() as u64).write(w)?;
for (event, action) in self.iter() {
event.write(w)?;
action.write(w)?;
#[cfg(debug_assertions)] {
let event_encoded = event.encode();
let event_read: Option<Event> =
MaybeReadable::read(&mut &event_encoded[..]).unwrap();
if action.is_some() { assert!(event_read.is_some()); }
}
}
Ok(())
}
}
impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
let len: u64 = Readable::read(reader)?;
const MAX_ALLOC_SIZE: u64 = 1024 * 16;
let mut events: Self = VecDeque::with_capacity(cmp::min(
MAX_ALLOC_SIZE/mem::size_of::<(events::Event, Option<EventCompletionAction>)>() as u64,
len) as usize);
for _ in 0..len {
let ev_opt = MaybeReadable::read(reader)?;
let action = Readable::read(reader)?;
if let Some(ev) = ev_opt {
events.push_back((ev, action));
} else if action.is_some() {
return Err(DecodeError::InvalidValue);
}
}
Ok(events)
}
}
pub struct ChannelManagerReadArgs<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
pub entropy_source: ES,
pub node_signer: NS,
pub signer_provider: SP,
pub fee_estimator: F,
pub chain_monitor: M,
pub tx_broadcaster: T,
pub router: R,
pub message_router: MR,
pub logger: L,
pub default_config: UserConfig,
pub channel_monitors: HashMap<OutPoint, &'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
}
impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
pub fn new(
entropy_source: ES, node_signer: NS, signer_provider: SP, fee_estimator: F,
chain_monitor: M, tx_broadcaster: T, router: R, message_router: MR, logger: L,
default_config: UserConfig,
mut channel_monitors: Vec<&'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
) -> Self {
Self {
entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor,
tx_broadcaster, router, message_router, logger, default_config,
channel_monitors: hash_map_from_iter(
channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) })
),
}
}
}
impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
ReadableArgs<ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>> for (BlockHash, Arc<ChannelManager<M, T, ES, NS, SP, F, R, MR, L>>)
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
fn read<Reader: io::Read>(reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>) -> Result<Self, DecodeError> {
let (blockhash, chan_manager) = <(BlockHash, ChannelManager<M, T, ES, NS, SP, F, R, MR, L>)>::read(reader, args)?;
Ok((blockhash, Arc::new(chan_manager)))
}
}
impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
ReadableArgs<ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>> for (BlockHash, ChannelManager<M, T, ES, NS, SP, F, R, MR, L>)
where
M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: BroadcasterInterface,
ES::Target: EntropySource,
NS::Target: NodeSigner,
SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
MR::Target: MessageRouter,
L::Target: Logger,
{
fn read<Reader: io::Read>(reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>) -> Result<Self, DecodeError> {
let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
let chain_hash: ChainHash = Readable::read(reader)?;
let best_block_height: u32 = Readable::read(reader)?;
let best_block_hash: BlockHash = Readable::read(reader)?;
let empty_peer_state = || {
PeerState {
channel_by_id: new_hash_map(),
inbound_channel_request_by_id: new_hash_map(),
latest_features: InitFeatures::empty(),
pending_msg_events: Vec::new(),
in_flight_monitor_updates: BTreeMap::new(),
monitor_update_blocked_actions: BTreeMap::new(),
actions_blocking_raa_monitor_updates: BTreeMap::new(),
closed_channel_monitor_update_ids: BTreeMap::new(),
is_connected: false,
}
};
let mut failed_htlcs = Vec::new();
let channel_count: u64 = Readable::read(reader)?;
let mut funding_txo_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128));
let mut per_peer_state = hash_map_with_capacity(cmp::min(channel_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
let mut outpoint_to_peer = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
let mut channel_closures = VecDeque::new();
let mut close_background_events = Vec::new();
let mut funding_txo_to_channel_id = hash_map_with_capacity(channel_count as usize);
for _ in 0..channel_count {
let mut channel: Channel<SP> = Channel::read(reader, (
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
))?;
let logger = WithChannelContext::from(&args.logger, &channel.context, None);
let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id());
funding_txo_set.insert(funding_txo.clone());
if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() ||
channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
log_error!(logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
log_error!(logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
if channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
&channel.context.channel_id(), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
}
if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() {
log_error!(logger, " The ChannelMonitor for channel {} is at holder commitment number {} but the ChannelManager is at holder commitment number {}.",
&channel.context.channel_id(), monitor.get_cur_holder_commitment_number(), channel.get_cur_holder_commitment_transaction_number());
}
if channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() {
log_error!(logger, " The ChannelMonitor for channel {} is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.",
&channel.context.channel_id(), monitor.get_min_seen_secret(), channel.get_revoked_counterparty_commitment_transaction_number());
}
if channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() {
log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
&channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
}
let mut shutdown_result = channel.context.force_shutdown(true, ClosureReason::OutdatedChannelManager);
if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
return Err(DecodeError::InvalidValue);
}
if let Some((counterparty_node_id, funding_txo, channel_id, mut update)) = shutdown_result.monitor_update {
let latest_update_id = monitor.get_latest_update_id().saturating_add(1);
update.update_id = latest_update_id;
per_peer_state.entry(counterparty_node_id)
.or_insert_with(|| Mutex::new(empty_peer_state()))
.lock().unwrap()
.closed_channel_monitor_update_ids.entry(channel_id)
.and_modify(|v| *v = cmp::max(latest_update_id, *v))
.or_insert(latest_update_id);
close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id, funding_txo, channel_id, update
});
}
failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs);
channel_closures.push_back((events::Event::ChannelClosed {
channel_id: channel.context.channel_id(),
user_channel_id: channel.context.get_user_id(),
reason: ClosureReason::OutdatedChannelManager,
counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
channel_capacity_sats: Some(channel.context.get_value_satoshis()),
channel_funding_txo: channel.context.get_funding_txo(),
last_local_balance_msat: Some(channel.context.get_value_to_self_msat()),
}, None));
for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
let mut found_htlc = false;
for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() {
if *channel_htlc_source == monitor_htlc_source { found_htlc = true; break; }
}
if !found_htlc {
let logger = WithChannelContext::from(&args.logger, &channel.context, Some(*payment_hash));
log_info!(logger,
"Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager",
&channel.context.channel_id(), &payment_hash);
failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.context.get_counterparty_node_id(), channel.context.channel_id()));
}
}
} else {
channel.on_startup_drop_completed_blocked_mon_updates_through(&logger, monitor.get_latest_update_id());
log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {} with {} blocked updates",
&channel.context.channel_id(), channel.context.get_latest_monitor_update_id(),
monitor.get_latest_update_id(), channel.blocked_monitor_updates_pending());
if let Some(short_channel_id) = channel.context.get_short_channel_id() {
short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
}
if let Some(funding_txo) = channel.context.get_funding_txo() {
outpoint_to_peer.insert(funding_txo, channel.context.get_counterparty_node_id());
}
per_peer_state.entry(channel.context.get_counterparty_node_id())
.or_insert_with(|| Mutex::new(empty_peer_state()))
.get_mut().unwrap()
.channel_by_id.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
}
} else if channel.is_awaiting_initial_mon_persist() {
let _ = channel.context.force_shutdown(false, ClosureReason::DisconnectedPeer);
channel_closures.push_back((events::Event::ChannelClosed {
channel_id: channel.context.channel_id(),
user_channel_id: channel.context.get_user_id(),
reason: ClosureReason::DisconnectedPeer,
counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
channel_capacity_sats: Some(channel.context.get_value_satoshis()),
channel_funding_txo: channel.context.get_funding_txo(),
last_local_balance_msat: Some(channel.context.get_value_to_self_msat()),
}, None));
} else {
log_error!(logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id());
log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
log_error!(logger, " Without the ChannelMonitor we cannot continue without risking funds.");
log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
return Err(DecodeError::InvalidValue);
}
}
for (funding_txo, monitor) in args.channel_monitors.iter() {
if !funding_txo_set.contains(funding_txo) {
let mut should_queue_fc_update = false;
if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
if !monitor.offchain_closed() || monitor.get_latest_update_id() > 1 {
should_queue_fc_update = !monitor.offchain_closed();
let mut latest_update_id = monitor.get_latest_update_id();
if should_queue_fc_update {
latest_update_id += 1;
}
per_peer_state.entry(counterparty_node_id)
.or_insert_with(|| Mutex::new(empty_peer_state()))
.lock().unwrap()
.closed_channel_monitor_update_ids.entry(monitor.channel_id())
.and_modify(|v| *v = cmp::max(latest_update_id, *v))
.or_insert(latest_update_id);
}
}
if !should_queue_fc_update {
continue;
}
let logger = WithChannelMonitor::from(&args.logger, monitor, None);
let channel_id = monitor.channel_id();
log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
&channel_id);
let mut monitor_update = ChannelMonitorUpdate {
update_id: monitor.get_latest_update_id().saturating_add(1),
counterparty_node_id: None,
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
channel_id: Some(monitor.channel_id()),
};
if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id,
funding_txo: *funding_txo,
channel_id,
update: monitor_update,
};
close_background_events.push(update);
} else {
monitor_update.update_id = u64::MAX;
close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, channel_id, monitor_update)));
}
}
}
const MAX_ALLOC_SIZE: usize = 1024 * 64;
let forward_htlcs_count: u64 = Readable::read(reader)?;
let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128));
for _ in 0..forward_htlcs_count {
let short_channel_id = Readable::read(reader)?;
let pending_forwards_count: u64 = Readable::read(reader)?;
let mut pending_forwards = Vec::with_capacity(cmp::min(pending_forwards_count as usize, MAX_ALLOC_SIZE/mem::size_of::<HTLCForwardInfo>()));
for _ in 0..pending_forwards_count {
pending_forwards.push(Readable::read(reader)?);
}
forward_htlcs.insert(short_channel_id, pending_forwards);
}
let claimable_htlcs_count: u64 = Readable::read(reader)?;
let mut claimable_htlcs_list = Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128));
for _ in 0..claimable_htlcs_count {
let payment_hash = Readable::read(reader)?;
let previous_hops_len: u64 = Readable::read(reader)?;
let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, MAX_ALLOC_SIZE/mem::size_of::<ClaimableHTLC>()));
for _ in 0..previous_hops_len {
previous_hops.push(<ClaimableHTLC as Readable>::read(reader)?);
}
claimable_htlcs_list.push((payment_hash, previous_hops));
}
let peer_count: u64 = Readable::read(reader)?;
for _ in 0..peer_count {
let peer_pubkey: PublicKey = Readable::read(reader)?;
let latest_features = Readable::read(reader)?;
if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) {
peer_state.get_mut().unwrap().latest_features = latest_features;
}
}
let event_count: u64 = Readable::read(reader)?;
let mut pending_events_read: VecDeque<(events::Event, Option<EventCompletionAction>)> =
VecDeque::with_capacity(cmp::min(event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(events::Event, Option<EventCompletionAction>)>()));
for _ in 0..event_count {
match MaybeReadable::read(reader)? {
Some(event) => pending_events_read.push_back((event, None)),
None => continue,
}
}
let background_event_count: u64 = Readable::read(reader)?;
for _ in 0..background_event_count {
match <u8 as Readable>::read(reader)? {
0 => {
let _: OutPoint = Readable::read(reader)?;
let _: ChannelMonitorUpdate = Readable::read(reader)?;
}
_ => return Err(DecodeError::InvalidValue),
}
}
let _last_node_announcement_serial: u32 = Readable::read(reader)?; let highest_seen_timestamp: u32 = Readable::read(reader)?;
let pending_inbound_payment_count: u64 = Readable::read(reader)?;
for _ in 0..pending_inbound_payment_count {
let payment_hash: PaymentHash = Readable::read(reader)?;
let logger = WithContext::from(&args.logger, None, None, Some(payment_hash));
let inbound: PendingInboundPayment = Readable::read(reader)?;
log_warn!(logger, "Ignoring deprecated pending inbound payment with payment hash {}: {:?}", payment_hash, inbound);
}
let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?;
let mut pending_outbound_payments_compat: HashMap<PaymentId, PendingOutboundPayment> =
hash_map_with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
for _ in 0..pending_outbound_payments_count_compat {
let session_priv = Readable::read(reader)?;
let payment = PendingOutboundPayment::Legacy {
session_privs: hash_set_from_iter([session_priv]),
};
if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() {
return Err(DecodeError::InvalidValue)
};
}
let mut pending_outbound_payments_no_retry: Option<HashMap<PaymentId, HashSet<[u8; 32]>>> = None;
let mut pending_outbound_payments = None;
let mut pending_intercepted_htlcs: Option<HashMap<InterceptId, PendingAddHTLCInfo>> = Some(new_hash_map());
let mut received_network_pubkey: Option<PublicKey> = None;
let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
let mut probing_cookie_secret: Option<[u8; 32]> = None;
let mut claimable_htlc_purposes = None;
let mut claimable_htlc_onion_fields = None;
let mut pending_claiming_payments = Some(new_hash_map());
let mut monitor_update_blocked_actions_per_peer: Option<Vec<(_, BTreeMap<_, Vec<_>>)>> = Some(Vec::new());
let mut events_override = None;
let mut in_flight_monitor_updates: Option<HashMap<(PublicKey, OutPoint), Vec<ChannelMonitorUpdate>>> = None;
let mut decode_update_add_htlcs: Option<HashMap<u64, Vec<msgs::UpdateAddHTLC>>> = None;
let mut inbound_payment_id_secret = None;
read_tlv_fields!(reader, {
(1, pending_outbound_payments_no_retry, option),
(2, pending_intercepted_htlcs, option),
(3, pending_outbound_payments, option),
(4, pending_claiming_payments, option),
(5, received_network_pubkey, option),
(6, monitor_update_blocked_actions_per_peer, option),
(7, fake_scid_rand_bytes, option),
(8, events_override, option),
(9, claimable_htlc_purposes, optional_vec),
(10, in_flight_monitor_updates, option),
(11, probing_cookie_secret, option),
(13, claimable_htlc_onion_fields, optional_vec),
(14, decode_update_add_htlcs, option),
(15, inbound_payment_id_secret, option),
});
let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map());
if fake_scid_rand_bytes.is_none() {
fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes());
}
if probing_cookie_secret.is_none() {
probing_cookie_secret = Some(args.entropy_source.get_secure_random_bytes());
}
if inbound_payment_id_secret.is_none() {
inbound_payment_id_secret = Some(args.entropy_source.get_secure_random_bytes());
}
if let Some(events) = events_override {
pending_events_read = events;
}
if !channel_closures.is_empty() {
pending_events_read.append(&mut channel_closures);
}
if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
pending_outbound_payments = Some(pending_outbound_payments_compat);
} else if pending_outbound_payments.is_none() {
let mut outbounds = new_hash_map();
for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() {
outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
}
pending_outbound_payments = Some(outbounds);
}
let pending_outbounds = OutboundPayments::new(pending_outbound_payments.unwrap());
let mut pending_background_events = Vec::new();
macro_rules! handle_in_flight_updates {
($counterparty_node_id: expr, $chan_in_flight_upds: expr, $funding_txo: expr,
$monitor: expr, $peer_state: expr, $logger: expr, $channel_info_log: expr
) => { {
let mut max_in_flight_update_id = 0;
$chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id());
for update in $chan_in_flight_upds.iter() {
log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
update.update_id, $channel_info_log, &$monitor.channel_id());
max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
pending_background_events.push(
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id: $counterparty_node_id,
funding_txo: $funding_txo,
channel_id: $monitor.channel_id(),
update: update.clone(),
});
}
if $chan_in_flight_upds.is_empty() {
pending_background_events.push(
BackgroundEvent::MonitorUpdatesComplete {
counterparty_node_id: $counterparty_node_id,
channel_id: $monitor.channel_id(),
});
} else {
$peer_state.closed_channel_monitor_update_ids.entry($monitor.channel_id())
.and_modify(|v| *v = cmp::max(max_in_flight_update_id, *v))
.or_insert(max_in_flight_update_id);
}
if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() {
log_error!($logger, "Duplicate in-flight monitor update set for the same channel!");
return Err(DecodeError::InvalidValue);
}
max_in_flight_update_id
} }
}
for (counterparty_id, peer_state_mtx) in per_peer_state.iter_mut() {
let mut peer_state_lock = peer_state_mtx.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for phase in peer_state.channel_by_id.values() {
if let ChannelPhase::Funded(chan) = phase {
let logger = WithChannelContext::from(&args.logger, &chan.context, None);
let funding_txo = chan.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
let monitor = args.channel_monitors.get(&funding_txo)
.expect("We already checked for monitor presence when loading channels");
let mut max_in_flight_update_id = monitor.get_latest_update_id();
if let Some(in_flight_upds) = &mut in_flight_monitor_updates {
if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, funding_txo)) {
max_in_flight_update_id = cmp::max(max_in_flight_update_id,
handle_in_flight_updates!(*counterparty_id, chan_in_flight_upds,
funding_txo, monitor, peer_state, logger, ""));
}
}
if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id {
log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id);
log_error!(logger, " but the ChannelManager is at update_id {}.", chan.get_latest_unblocked_monitor_update_id());
log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
return Err(DecodeError::DangerousValue);
}
} else {
debug_assert!(false);
return Err(DecodeError::InvalidValue);
}
}
}
if let Some(in_flight_upds) = in_flight_monitor_updates {
for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds {
let channel_id = funding_txo_to_channel_id.get(&funding_txo).copied();
let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id, None);
if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| {
Mutex::new(empty_peer_state())
});
let mut peer_state = peer_state_mutex.lock().unwrap();
handle_in_flight_updates!(counterparty_id, chan_in_flight_updates,
funding_txo, monitor, peer_state, logger, "closed ");
} else {
log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
log_error!(logger, " The ChannelMonitor for channel {} is missing.", if let Some(channel_id) =
channel_id { channel_id.to_string() } else { format!("with outpoint {}", funding_txo) } );
log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
log_error!(logger, " Pending in-flight updates are: {:?}", chan_in_flight_updates);
return Err(DecodeError::InvalidValue);
}
}
}
pending_background_events.reserve(close_background_events.len());
'each_bg_event: for mut new_event in close_background_events {
if let BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id, funding_txo, channel_id, update,
} = &mut new_event {
debug_assert_eq!(update.updates.len(), 1);
debug_assert!(matches!(update.updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. }));
let mut updated_id = false;
for pending_event in pending_background_events.iter() {
if let BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id: pending_cp, funding_txo: pending_funding,
channel_id: pending_chan_id, update: pending_update,
} = pending_event {
let for_same_channel = counterparty_node_id == pending_cp
&& funding_txo == pending_funding
&& channel_id == pending_chan_id;
if for_same_channel {
debug_assert!(update.update_id >= pending_update.update_id);
if pending_update.updates.iter().any(|upd| matches!(upd, ChannelMonitorUpdateStep::ChannelForceClosed { .. })) {
continue 'each_bg_event;
}
update.update_id = pending_update.update_id.saturating_add(1);
updated_id = true;
}
}
}
let mut per_peer_state = per_peer_state.get(counterparty_node_id)
.expect("If we have pending updates for a channel it must have an entry")
.lock().unwrap();
if updated_id {
per_peer_state
.closed_channel_monitor_update_ids.entry(*channel_id)
.and_modify(|v| *v = cmp::max(update.update_id, *v))
.or_insert(update.update_id);
}
let in_flight_updates = per_peer_state.in_flight_monitor_updates
.entry(*funding_txo)
.or_insert_with(Vec::new);
debug_assert!(!in_flight_updates.iter().any(|upd| upd == update));
in_flight_updates.push(update.clone());
}
pending_background_events.push(new_event);
}
let mut pending_claims_to_replay = Vec::new();
{
for (_, monitor) in args.channel_monitors.iter() {
let counterparty_opt = outpoint_to_peer.get(&monitor.get_funding_txo().0);
if counterparty_opt.is_none() {
for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
let logger = WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash));
if let HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } = htlc_source {
if path.hops.is_empty() {
log_error!(logger, "Got an empty path for a pending payment");
return Err(DecodeError::InvalidValue);
}
let mut session_priv_bytes = [0; 32];
session_priv_bytes[..].copy_from_slice(&session_priv[..]);
pending_outbounds.insert_from_monitor_on_startup(
payment_id, htlc.payment_hash, session_priv_bytes, &path, best_block_height, logger
);
}
}
for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() {
let logger = WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash));
match htlc_source {
HTLCSource::PreviousHopData(prev_hop_data) => {
let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
info.prev_funding_outpoint == prev_hop_data.outpoint &&
info.prev_htlc_id == prev_hop_data.htlc_id
};
decode_update_add_htlcs.retain(|scid, update_add_htlcs| {
update_add_htlcs.retain(|update_add_htlc| {
let matches = *scid == prev_hop_data.short_channel_id &&
update_add_htlc.htlc_id == prev_hop_data.htlc_id;
if matches {
log_info!(logger, "Removing pending to-decode HTLC with hash {} as it was forwarded to the closed channel {}",
&htlc.payment_hash, &monitor.channel_id());
}
!matches
});
!update_add_htlcs.is_empty()
});
forward_htlcs.retain(|_, forwards| {
forwards.retain(|forward| {
if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
if pending_forward_matches_htlc(&htlc_info) {
log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
&htlc.payment_hash, &monitor.channel_id());
false
} else { true }
} else { true }
});
!forwards.is_empty()
});
pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
if pending_forward_matches_htlc(&htlc_info) {
log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
&htlc.payment_hash, &monitor.channel_id());
pending_events_read.retain(|(event, _)| {
if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
intercepted_id != ev_id
} else { true }
});
false
} else { true }
});
},
HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } => {
if let Some(preimage) = preimage_opt {
let pending_events = Mutex::new(pending_events_read);
let compl_action =
EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint: monitor.get_funding_txo().0,
channel_id: monitor.channel_id(),
counterparty_node_id: path.hops[0].pubkey,
};
pending_outbounds.claim_htlc(payment_id, preimage, session_priv,
path, false, compl_action, &pending_events, &&logger);
pending_events_read = pending_events.into_inner().unwrap();
}
},
}
}
}
let mut fail_read = false;
let outbound_claimed_htlcs_iter = monitor.get_all_current_outbound_htlcs()
.into_iter()
.filter_map(|(htlc_source, (htlc, preimage_opt))| {
if let HTLCSource::PreviousHopData(prev_hop) = &htlc_source {
if let Some(payment_preimage) = preimage_opt {
let inbound_edge_monitor = args.channel_monitors.get(&prev_hop.outpoint);
let inbound_edge_monitor = if let Some(monitor) = inbound_edge_monitor {
monitor
} else {
return None;
};
let inbound_edge_balances = inbound_edge_monitor.get_claimable_balances();
if inbound_edge_balances.is_empty() {
return None;
}
if prev_hop.counterparty_node_id.is_none() {
let htlc_payment_hash: PaymentHash = payment_preimage.into();
let balance_could_incl_htlc = |bal| match bal {
&Balance::ClaimableOnChannelClose { .. } => {
true
},
&Balance::MaybePreimageClaimableHTLC { payment_hash, .. } => {
payment_hash == htlc_payment_hash
},
_ => false,
};
let htlc_may_be_in_balances =
inbound_edge_balances.iter().any(balance_could_incl_htlc);
if !htlc_may_be_in_balances {
return None;
}
if short_to_chan_info.get(&prev_hop.short_channel_id).is_none() {
log_error!(args.logger,
"We need to replay the HTLC claim for payment_hash {} (preimage {}) but cannot do so as the HTLC was forwarded prior to LDK 0.0.124.\
All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1",
htlc_payment_hash,
payment_preimage,
);
fail_read = true;
}
log_error!(args.logger,
"We need to replay the HTLC claim for payment_hash {} (preimage {}) but don't have all the required information to do so reliably.\
As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime!\
All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1\
Continuing anyway, though panics may occur!",
htlc_payment_hash,
payment_preimage,
);
}
Some((htlc_source, payment_preimage, htlc.amount_msat,
counterparty_opt.is_none(),
counterparty_opt.cloned().or(monitor.get_counterparty_node_id()),
monitor.get_funding_txo().0, monitor.channel_id()))
} else { None }
} else {
None
}
});
for tuple in outbound_claimed_htlcs_iter {
pending_claims_to_replay.push(tuple);
}
if fail_read {
return Err(DecodeError::InvalidValue);
}
}
}
if !forward_htlcs.is_empty() || !decode_update_add_htlcs.is_empty() || pending_outbounds.needs_abandon() {
pending_events_read.push_back((events::Event::PendingHTLCsForwardable {
time_forwardable: Duration::from_secs(2),
}, None));
}
let expanded_inbound_key = args.node_signer.get_inbound_payment_key();
let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len());
if let Some(purposes) = claimable_htlc_purposes {
if purposes.len() != claimable_htlcs_list.len() {
return Err(DecodeError::InvalidValue);
}
if let Some(onion_fields) = claimable_htlc_onion_fields {
if onion_fields.len() != claimable_htlcs_list.len() {
return Err(DecodeError::InvalidValue);
}
for (purpose, (onion, (payment_hash, htlcs))) in
purposes.into_iter().zip(onion_fields.into_iter().zip(claimable_htlcs_list.into_iter()))
{
let existing_payment = claimable_payments.insert(payment_hash, ClaimablePayment {
purpose, htlcs, onion_fields: onion,
});
if existing_payment.is_some() { return Err(DecodeError::InvalidValue); }
}
} else {
for (purpose, (payment_hash, htlcs)) in purposes.into_iter().zip(claimable_htlcs_list.into_iter()) {
let existing_payment = claimable_payments.insert(payment_hash, ClaimablePayment {
purpose, htlcs, onion_fields: None,
});
if existing_payment.is_some() { return Err(DecodeError::InvalidValue); }
}
}
} else {
for (payment_hash, htlcs) in claimable_htlcs_list.drain(..) {
if htlcs.is_empty() {
return Err(DecodeError::InvalidValue);
}
let purpose = match &htlcs[0].onion_payload {
OnionPayload::Invoice { _legacy_hop_data } => {
if let Some(hop_data) = _legacy_hop_data {
events::PaymentPurpose::Bolt11InvoicePayment {
payment_preimage:
match inbound_payment::verify(
payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger
) {
Ok((payment_preimage, _)) => payment_preimage,
Err(()) => {
log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", &payment_hash);
return Err(DecodeError::InvalidValue);
}
},
payment_secret: hop_data.payment_secret,
}
} else { return Err(DecodeError::InvalidValue); }
},
OnionPayload::Spontaneous(payment_preimage) =>
events::PaymentPurpose::SpontaneousPayment(*payment_preimage),
};
claimable_payments.insert(payment_hash, ClaimablePayment {
purpose, htlcs, onion_fields: None,
});
}
}
for (payment_hash, payment) in claimable_payments.iter() {
for htlc in payment.htlcs.iter() {
if htlc.prev_hop.counterparty_node_id.is_some() {
continue;
}
if short_to_chan_info.get(&htlc.prev_hop.short_channel_id).is_some() {
log_error!(args.logger,
"We do not have the required information to claim a pending payment with payment hash {} reliably.\
As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime!\
All HTLCs that were received by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1\
Continuing anyway, though panics may occur!",
payment_hash,
);
} else {
log_error!(args.logger,
"We do not have the required information to claim a pending payment with payment hash {}.\
All HTLCs that were received by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1",
payment_hash,
);
return Err(DecodeError::InvalidValue);
}
}
}
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&args.entropy_source.get_secure_random_bytes());
let our_network_pubkey = match args.node_signer.get_node_id(Recipient::Node) {
Ok(key) => key,
Err(()) => return Err(DecodeError::InvalidValue)
};
if let Some(network_pubkey) = received_network_pubkey {
if network_pubkey != our_network_pubkey {
log_error!(args.logger, "Key that was generated does not match the existing key.");
return Err(DecodeError::InvalidValue);
}
}
let mut outbound_scid_aliases = new_hash_set();
for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for (chan_id, phase) in peer_state.channel_by_id.iter_mut() {
if let ChannelPhase::Funded(chan) = phase {
let logger = WithChannelContext::from(&args.logger, &chan.context, None);
if chan.context.outbound_scid_alias() == 0 {
let mut outbound_scid_alias;
loop {
outbound_scid_alias = fake_scid::Namespace::OutboundAlias
.get_fake_scid(best_block_height, &chain_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
}
chan.context.set_outbound_scid_alias(outbound_scid_alias);
} else if !outbound_scid_aliases.insert(chan.context.outbound_scid_alias()) {
log_error!(logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
return Err(DecodeError::InvalidValue);
}
if chan.context.is_usable() {
if short_to_chan_info.insert(chan.context.outbound_scid_alias(), (chan.context.get_counterparty_node_id(), *chan_id)).is_some() {
log_error!(logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
return Err(DecodeError::InvalidValue);
}
}
} else {
debug_assert!(false);
return Err(DecodeError::InvalidValue);
}
}
}
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(args.fee_estimator);
for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
if let Some(peer_state) = per_peer_state.get(&node_id) {
for (channel_id, actions) in monitor_update_blocked_actions.iter() {
let logger = WithContext::from(&args.logger, Some(node_id), Some(*channel_id), None);
for action in actions.iter() {
if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
downstream_counterparty_and_funding_outpoint:
Some(EventUnblockedChannel {
counterparty_node_id: blocked_node_id,
funding_txo: _,
channel_id: blocked_channel_id,
blocking_action,
}), ..
} = action {
if let Some(blocked_peer_state) = per_peer_state.get(blocked_node_id) {
log_trace!(logger,
"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
blocked_channel_id);
blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
.entry(*blocked_channel_id)
.or_insert_with(Vec::new).push(blocking_action.clone());
} else {
}
}
if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately { .. } = action {
debug_assert!(false, "Non-event-generating channel freeing should not appear in our queue");
}
}
}
peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions;
} else {
log_error!(WithContext::from(&args.logger, Some(node_id), None, None), "Got blocked actions without a per-peer-state for {}", node_id);
return Err(DecodeError::InvalidValue);
}
}
let channel_manager = ChannelManager {
chain_hash,
fee_estimator: bounded_fee_estimator,
chain_monitor: args.chain_monitor,
tx_broadcaster: args.tx_broadcaster,
router: args.router,
message_router: args.message_router,
best_block: RwLock::new(BestBlock::new(best_block_hash, best_block_height)),
inbound_payment_key: expanded_inbound_key,
pending_outbound_payments: pending_outbounds,
pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()),
forward_htlcs: Mutex::new(forward_htlcs),
decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs),
claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap() }),
outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
outpoint_to_peer: Mutex::new(outpoint_to_peer),
short_to_chan_info: FairRwLock::new(short_to_chan_info),
fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(),
probing_cookie_secret: probing_cookie_secret.unwrap(),
inbound_payment_id_secret: inbound_payment_id_secret.unwrap(),
our_network_pubkey,
secp_ctx,
highest_seen_timestamp: AtomicUsize::new(highest_seen_timestamp as usize),
per_peer_state: FairRwLock::new(per_peer_state),
pending_events: Mutex::new(pending_events_read),
pending_events_processor: AtomicBool::new(false),
pending_background_events: Mutex::new(pending_background_events),
total_consistency_lock: RwLock::new(()),
background_events_processed_since_startup: AtomicBool::new(false),
event_persist_notifier: Notifier::new(),
needs_persist_flag: AtomicBool::new(false),
funding_batch_states: Mutex::new(BTreeMap::new()),
pending_offers_messages: Mutex::new(Vec::new()),
pending_async_payments_messages: Mutex::new(Vec::new()),
pending_broadcast_messages: Mutex::new(Vec::new()),
entropy_source: args.entropy_source,
node_signer: args.node_signer,
signer_provider: args.signer_provider,
last_days_feerates: Mutex::new(VecDeque::new()),
logger: args.logger,
default_configuration: args.default_config,
#[cfg(feature = "dnssec")]
hrn_resolver: OMNameResolver::new(highest_seen_timestamp, best_block_height),
#[cfg(feature = "dnssec")]
pending_dns_onion_messages: Mutex::new(Vec::new()),
#[cfg(feature = "_test_utils")]
testing_dnssec_proof_offer_resolution_override: Mutex::new(new_hash_map()),
};
let mut processed_claims: HashSet<Vec<MPPClaimHTLCSource>> = new_hash_set();
for (_, monitor) in args.channel_monitors.iter() {
for (payment_hash, (payment_preimage, payment_claims)) in monitor.get_stored_preimages() {
if !payment_claims.is_empty() {
for payment_claim in payment_claims {
if processed_claims.contains(&payment_claim.mpp_parts) {
continue;
}
if payment_claim.mpp_parts.is_empty() {
return Err(DecodeError::InvalidValue);
}
let pending_claims = PendingMPPClaim {
channels_without_preimage: payment_claim.mpp_parts.clone(),
channels_with_preimage: Vec::new(),
};
let pending_claim_ptr_opt = Some(Arc::new(Mutex::new(pending_claims)));
let claim_found =
channel_manager.claimable_payments.lock().unwrap().begin_claiming_payment(
payment_hash, &channel_manager.node_signer, &channel_manager.logger,
&channel_manager.inbound_payment_id_secret, true,
);
if claim_found.is_err() {
let mut claimable_payments = channel_manager.claimable_payments.lock().unwrap();
match claimable_payments.pending_claiming_payments.entry(payment_hash) {
hash_map::Entry::Occupied(_) => {
debug_assert!(false, "Entry was added in begin_claiming_payment");
return Err(DecodeError::InvalidValue);
},
hash_map::Entry::Vacant(entry) => {
entry.insert(payment_claim.claiming_payment);
},
}
}
for part in payment_claim.mpp_parts.iter() {
let pending_mpp_claim = pending_claim_ptr_opt.as_ref().map(|ptr| (
part.counterparty_node_id, part.channel_id, part.htlc_id,
PendingMPPClaimPointer(Arc::clone(&ptr))
));
let pending_claim_ptr = pending_claim_ptr_opt.as_ref().map(|ptr|
RAAMonitorUpdateBlockingAction::ClaimedMPPPayment {
pending_claim: PendingMPPClaimPointer(Arc::clone(&ptr)),
}
);
channel_manager.claim_mpp_part(
part.into(), payment_preimage, None,
|_, _|
(Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim }), pending_claim_ptr)
);
}
processed_claims.insert(payment_claim.mpp_parts);
}
} else {
let per_peer_state = channel_manager.per_peer_state.read().unwrap();
let mut claimable_payments = channel_manager.claimable_payments.lock().unwrap();
let payment = claimable_payments.claimable_payments.remove(&payment_hash);
mem::drop(claimable_payments);
if let Some(payment) = payment {
log_info!(channel_manager.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", &payment_hash);
let mut claimable_amt_msat = 0;
let mut receiver_node_id = Some(our_network_pubkey);
let phantom_shared_secret = payment.htlcs[0].prev_hop.phantom_shared_secret;
if phantom_shared_secret.is_some() {
let phantom_pubkey = channel_manager.node_signer.get_node_id(Recipient::PhantomNode)
.expect("Failed to get node_id for phantom node recipient");
receiver_node_id = Some(phantom_pubkey)
}
for claimable_htlc in &payment.htlcs {
claimable_amt_msat += claimable_htlc.value;
let previous_channel_id = claimable_htlc.prev_hop.channel_id;
let peer_node_id_opt = channel_manager.outpoint_to_peer.lock().unwrap()
.get(&claimable_htlc.prev_hop.outpoint).cloned();
if let Some(peer_node_id) = peer_node_id_opt {
let peer_state_mutex = per_peer_state.get(&peer_node_id).unwrap();
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let Some(ChannelPhase::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
let logger = WithChannelContext::from(&channel_manager.logger, &channel.context, Some(payment_hash));
channel.claim_htlc_while_disconnected_dropping_mon_update_legacy(
claimable_htlc.prev_hop.htlc_id, payment_preimage, &&logger
);
}
}
if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) {
previous_hop_monitor.provide_payment_preimage_unsafe_legacy(
&payment_hash, &payment_preimage, &channel_manager.tx_broadcaster,
&channel_manager.fee_estimator, &channel_manager.logger
);
}
}
let mut pending_events = channel_manager.pending_events.lock().unwrap();
let payment_id = payment.inbound_payment_id(&inbound_payment_id_secret.unwrap());
pending_events.push_back((events::Event::PaymentClaimed {
receiver_node_id,
payment_hash,
purpose: payment.purpose,
amount_msat: claimable_amt_msat,
htlcs: payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(),
sender_intended_total_msat: payment.htlcs.first().map(|htlc| htlc.total_msat),
onion_fields: payment.onion_fields,
payment_id: Some(payment_id),
}, None));
}
}
}
}
for htlc_source in failed_htlcs.drain(..) {
let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding, downstream_channel_id) in pending_claims_to_replay {
channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None,
downstream_closed, true, downstream_node_id, downstream_funding,
downstream_channel_id, None
);
}
Ok((best_block_hash.clone(), channel_manager))
}
}
#[cfg(test)]
mod tests {
use bitcoin::hashes::Hash;
use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
use core::sync::atomic::Ordering;
use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
use crate::ln::types::ChannelId;
use crate::types::payment::{PaymentPreimage, PaymentHash, PaymentSecret};
use crate::ln::channelmanager::{create_recv_pending_htlc_info, HTLCForwardInfo, inbound_payment, PaymentId, PaymentSendFailure, RecipientOnionFields, InterceptId};
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::{self, ErrorAction};
use crate::ln::msgs::ChannelMessageHandler;
use crate::ln::outbound_payment::Retry;
use crate::prelude::*;
use crate::routing::router::{PaymentParameters, RouteParameters, find_route};
use crate::util::errors::APIError;
use crate::util::ser::Writeable;
use crate::util::test_utils;
use crate::util::config::{ChannelConfig, ChannelConfigUpdate};
use crate::sign::EntropySource;
#[test]
fn test_notify_limits() {
let chanmon_cfgs = create_chanmon_cfgs(3);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
assert!(nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1);
chan.0.contents.fee_base_msat *= 2;
chan.1.contents.fee_base_msat *= 2;
let node_a_chan_info = nodes[0].node.list_channels_with_counterparty(
&nodes[1].node.get_our_node_id()).pop().unwrap();
let node_b_chan_info = nodes[1].node.list_channels_with_counterparty(
&nodes[0].node.get_our_node_id()).pop().unwrap();
assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
assert!(!nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
nodes[2].node.handle_channel_update(nodes[1].node.get_our_node_id(), &chan.0);
nodes[2].node.handle_channel_update(nodes[1].node.get_our_node_id(), &chan.1);
assert!(!nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
nodes[0].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.0);
nodes[0].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.1);
nodes[1].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.0);
nodes[1].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.1);
assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
let as_node_one = nodes[0].node.get_our_node_id().serialize()[..] < nodes[1].node.get_our_node_id().serialize()[..];
let as_update = if as_node_one == (chan.0.contents.channel_flags & 1 == 0 ) { &chan.0 } else { &chan.1 };
let bs_update = if as_node_one == (chan.0.contents.channel_flags & 1 == 0 ) { &chan.1 } else { &chan.0 };
nodes[0].node.handle_channel_update(nodes[1].node.get_our_node_id(), &as_update);
nodes[1].node.handle_channel_update(nodes[0].node.get_our_node_id(), &bs_update);
assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
nodes[0].node.handle_channel_update(nodes[1].node.get_our_node_id(), &bs_update);
nodes[1].node.handle_channel_update(nodes[0].node.get_our_node_id(), &as_update);
assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
}
#[test]
fn test_keysend_dup_hash_partial_mpp() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes(&nodes, 0, 1);
let (route, our_payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000);
let mut mpp_route = route.clone();
mpp_route.paths.push(mpp_route.paths[0].clone());
let payment_id = PaymentId([42; 32]);
let cur_height = CHAN_CONFIRM_DEPTH + 1; let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
RecipientOnionFields::secret_only(payment_secret), payment_id, &mpp_route).unwrap();
nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &our_payment_hash,
RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
nodes[0].node.send_spontaneous_payment(
Some(payment_preimage), RecipientOnionFields::spontaneous_empty(),
PaymentId(payment_preimage.0), route.route_params.clone().unwrap(), Retry::Attempts(0)
).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let ev = events.drain(..).next().unwrap();
let payment_event = SendEvent::from_event(ev);
nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
expect_pending_htlcs_forwardable!(nodes[1]);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
check_added_monitors!(nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
assert!(updates.update_fulfill_htlcs.is_empty());
assert_eq!(updates.update_fail_htlcs.len(), 1);
assert!(updates.update_fail_malformed_htlcs.is_empty());
assert!(updates.update_fee.is_none());
nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
expect_payment_failed!(nodes[0], our_payment_hash, true);
nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &our_payment_hash,
RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), true, None);
nodes[1].node.claim_funds(payment_preimage);
expect_payment_claimed!(nodes[1], our_payment_hash, 200_000);
check_added_monitors!(nodes[1], 2);
let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]);
expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed);
check_added_monitors!(nodes[0], 1);
let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa);
check_added_monitors!(nodes[1], 1);
let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_first_cs);
check_added_monitors!(nodes[1], 1);
let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
check_added_monitors!(nodes[0], 1);
let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa);
let as_second_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
check_added_monitors!(nodes[0], 1);
nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa);
check_added_monitors!(nodes[1], 1);
nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed);
check_added_monitors!(nodes[1], 1);
let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_third_raa);
check_added_monitors!(nodes[0], 1);
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
match events[0] {
Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
assert_eq!(payment_id, *actual_payment_id);
assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
assert_eq!(route.paths[0], *path);
},
_ => panic!("Unexpected event"),
}
match events[1] {
Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
assert_eq!(payment_id, *actual_payment_id);
assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
assert_eq!(route.paths[0], *path);
},
_ => panic!("Unexpected event"),
}
}
#[test]
fn test_keysend_dup_payment_hash() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes(&nodes, 0, 1);
let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let expected_route = [&nodes[1]];
let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &expected_route, 100_000);
let route_params = RouteParameters::from_payment_params_and_value(
PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(),
TEST_FINAL_CLTV, false), 100_000);
nodes[0].node.send_spontaneous_payment(
Some(payment_preimage), RecipientOnionFields::spontaneous_empty(),
PaymentId(payment_preimage.0), route_params.clone(), Retry::Attempts(0)
).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let ev = events.drain(..).next().unwrap();
let payment_event = SendEvent::from_event(ev);
nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
expect_pending_htlcs_forwardable!(nodes[1]);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
check_added_monitors!(nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
assert!(updates.update_fulfill_htlcs.is_empty());
assert_eq!(updates.update_fail_htlcs.len(), 1);
assert!(updates.update_fail_malformed_htlcs.is_empty());
assert!(updates.update_fee.is_none());
nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
expect_payment_failed!(nodes[0], payment_hash, true);
claim_payment(&nodes[0], &expected_route, payment_preimage);
let payment_preimage = PaymentPreimage([42; 32]);
let route = find_route(
&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
).unwrap();
let payment_hash = nodes[0].node.send_spontaneous_payment(
Some(payment_preimage), RecipientOnionFields::spontaneous_empty(),
PaymentId(payment_preimage.0), route.route_params.clone().unwrap(), Retry::Attempts(0)
).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let event = events.pop().unwrap();
let path = vec![&nodes[1]];
pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
let payment_secret = PaymentSecret([43; 32]);
nodes[0].node.send_payment_with_route(route.clone(), payment_hash,
RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let ev = events.drain(..).next().unwrap();
let payment_event = SendEvent::from_event(ev);
nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
expect_pending_htlcs_forwardable!(nodes[1]);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
check_added_monitors!(nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
assert!(updates.update_fulfill_htlcs.is_empty());
assert_eq!(updates.update_fail_htlcs.len(), 1);
assert!(updates.update_fail_malformed_htlcs.is_empty());
assert!(updates.update_fee.is_none());
nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
expect_payment_failed!(nodes[0], payment_hash, true);
claim_payment(&nodes[0], &expected_route, payment_preimage);
let payment_id_1 = PaymentId([44; 32]);
let payment_hash = nodes[0].node.send_spontaneous_payment(
Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_1,
route.route_params.clone().unwrap(), Retry::Attempts(0)
).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let event = events.pop().unwrap();
let path = vec![&nodes[1]];
pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
let route_params = RouteParameters::from_payment_params_and_value(
PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false),
100_000
);
let payment_id_2 = PaymentId([45; 32]);
nodes[0].node.send_spontaneous_payment(
Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_2, route_params,
Retry::Attempts(0)
).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let ev = events.drain(..).next().unwrap();
let payment_event = SendEvent::from_event(ev);
nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
expect_pending_htlcs_forwardable!(nodes[1]);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
check_added_monitors!(nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
assert!(updates.update_fulfill_htlcs.is_empty());
assert_eq!(updates.update_fail_htlcs.len(), 1);
assert!(updates.update_fail_malformed_htlcs.is_empty());
assert!(updates.update_fee.is_none());
nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
expect_payment_failed!(nodes[0], payment_hash, true);
claim_payment(&nodes[0], &expected_route, payment_preimage);
}
#[test]
fn test_keysend_hash_mismatch() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let payer_pubkey = nodes[0].node.get_our_node_id();
let payee_pubkey = nodes[1].node.get_our_node_id();
let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
let route_params = RouteParameters::from_payment_params_and_value(
PaymentParameters::for_keysend(payee_pubkey, 40, false), 10_000);
let network_graph = nodes[0].network_graph;
let first_hops = nodes[0].node.list_usable_channels();
let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
&payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
).unwrap();
let test_preimage = PaymentPreimage([42; 32]);
let mismatch_payment_hash = PaymentHash([43; 32]);
let session_privs = nodes[0].node.test_add_new_pending_payment(mismatch_payment_hash,
RecipientOnionFields::spontaneous_empty(), PaymentId(mismatch_payment_hash.0), &route).unwrap();
nodes[0].node.test_send_payment_internal(&route, mismatch_payment_hash,
RecipientOnionFields::spontaneous_empty(), Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap();
check_added_monitors!(nodes[0], 1);
let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
assert_eq!(updates.update_add_htlcs.len(), 1);
assert!(updates.update_fulfill_htlcs.is_empty());
assert!(updates.update_fail_htlcs.is_empty());
assert!(updates.update_fail_malformed_htlcs.is_empty());
assert!(updates.update_fee.is_none());
nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Payment preimage didn't match payment hash", 1);
}
#[test]
fn test_multi_hop_missing_secret() {
let chanmon_cfgs = create_chanmon_cfgs(4);
let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
let (mut route, payment_hash, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
let path = route.paths[0].clone();
route.paths.push(path);
route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
route.paths[0].hops[0].short_channel_id = chan_1_id;
route.paths[0].hops[1].short_channel_id = chan_3_id;
route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
route.paths[1].hops[0].short_channel_id = chan_2_id;
route.paths[1].hops[1].short_channel_id = chan_4_id;
match nodes[0].node.send_payment_with_route(route, payment_hash,
RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0))
.unwrap_err() {
PaymentSendFailure::ParameterError(APIError::APIMisuseError { ref err }) => {
assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err))
},
_ => panic!("unexpected error")
}
}
#[test]
fn test_channel_update_cached() {
let chanmon_cfgs = create_chanmon_cfgs(3);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
nodes[0].node.force_close_channel_with_peer(&chan.2, &nodes[1].node.get_our_node_id(), None, true).unwrap();
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
let node_1_events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(node_1_events.len(), 0);
{
let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
assert_eq!(pending_broadcast_messages.len(), 1);
}
nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
nodes[0].node.peer_disconnected(nodes[2].node.get_our_node_id());
nodes[2].node.peer_disconnected(nodes[0].node.get_our_node_id());
let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(node_0_events.len(), 0);
nodes[0].node.peer_connected(nodes[2].node.get_our_node_id(), &msgs::Init {
features: nodes[2].node.init_features(), networks: None, remote_network_address: None
}, true).unwrap();
nodes[2].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
features: nodes[0].node.init_features(), networks: None, remote_network_address: None
}, false).unwrap();
let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(node_0_events.len(), 1);
match &node_0_events[0] {
MessageSendEvent::BroadcastChannelUpdate { .. } => (),
_ => panic!("Unexpected event"),
}
{
let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
assert_eq!(pending_broadcast_messages.len(), 0);
}
}
#[test]
fn test_drop_disconnected_peers_when_removing_channels() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
let chan_id = nodes[0].node.list_channels()[0].channel_id;
let error_message = "Channel force-closed";
nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 1_000_000);
{
let nodes_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
assert_eq!(nodes_0_per_peer_state.len(), 1);
assert!(nodes_0_per_peer_state.get(&nodes[1].node.get_our_node_id()).is_some());
}
nodes[0].node.timer_tick_occurred();
{
assert_eq!(nodes[0].node.per_peer_state.read().unwrap().len(), 0);
}
}
#[test]
fn test_drop_peers_when_removing_unfunded_channels() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1, "Unexpected events {:?}", events);
match events[0] {
Event::FundingGenerationReady { .. } => {}
_ => panic!("Unexpected event {:?}", events),
}
nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer, [nodes[1].node.get_our_node_id()], 1_000_000);
check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer, [nodes[0].node.get_our_node_id()], 1_000_000);
assert_eq!(nodes[0].node.per_peer_state.read().unwrap().len(), 0);
assert_eq!(nodes[1].node.per_peer_state.read().unwrap().len(), 0);
}
#[test]
fn bad_inbound_payment_hash() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[0]);
let payment_data = msgs::FinalOnionHopData {
payment_secret,
total_msat: 100_000,
};
let mut bad_payment_hash = payment_hash.clone();
bad_payment_hash.0[0] += 1;
match inbound_payment::verify(bad_payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) {
Ok(_) => panic!("Unexpected ok"),
Err(()) => {
nodes[0].logger.assert_log_contains("lightning::ln::inbound_payment", "Failing HTLC with user-generated payment_hash", 1);
}
}
assert!(inbound_payment::verify(payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger).is_ok());
}
#[test]
fn test_outpoint_to_peer_coverage() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel);
let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel);
let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
let channel_id = ChannelId::from_bytes(tx.compute_txid().to_byte_array());
{
assert_eq!(nodes[0].node.outpoint_to_peer.lock().unwrap().len(), 0);
assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
}
nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
{
let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
assert_eq!(nodes_0_lock.len(), 1);
assert!(nodes_0_lock.contains_key(&funding_output));
}
assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg);
{
let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
assert_eq!(nodes_0_lock.len(), 1);
assert!(nodes_0_lock.contains_key(&funding_output));
}
expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
{
let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
assert_eq!(nodes_1_lock.len(), 1);
assert!(nodes_1_lock.contains_key(&funding_output));
}
check_added_monitors!(nodes[1], 1);
let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed);
check_added_monitors!(nodes[0], 1);
expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
let (announcement, nodes_0_update, nodes_1_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update);
nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
nodes[1].node.handle_shutdown(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
let nodes_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
nodes[0].node.handle_shutdown(nodes[1].node.get_our_node_id(), &nodes_1_shutdown);
let closing_signed_node_0 = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
nodes[1].node.handle_closing_signed(nodes[0].node.get_our_node_id(), &closing_signed_node_0);
{
let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
assert_eq!(nodes_0_lock.len(), 1);
assert!(nodes_0_lock.contains_key(&funding_output));
}
{
let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
assert_eq!(nodes_1_lock.len(), 1);
assert!(nodes_1_lock.contains_key(&funding_output));
}
nodes[0].node.handle_closing_signed(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
{
assert_eq!(nodes[0].node.outpoint_to_peer.lock().unwrap().len(), 0);
let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
assert_eq!(nodes_1_lock.len(), 1);
assert!(nodes_1_lock.contains_key(&funding_output));
}
let (_nodes_0_update, closing_signed_node_0) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
nodes[1].node.handle_closing_signed(nodes[0].node.get_our_node_id(), &closing_signed_node_0.unwrap());
{
assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
}
let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
}
fn check_not_connected_to_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
let expected_message = format!("Not connected to node: {}", expected_public_key);
check_api_error_message(expected_message, res_err)
}
fn check_unkown_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
let expected_message = format!("Can't find a peer matching the passed counterparty node_id {}", expected_public_key);
check_api_error_message(expected_message, res_err)
}
fn check_channel_unavailable_error<T>(res_err: Result<T, APIError>, expected_channel_id: ChannelId, peer_node_id: PublicKey) {
let expected_message = format!("Channel with id {} not found for the passed counterparty node_id {}", expected_channel_id, peer_node_id);
check_api_error_message(expected_message, res_err)
}
fn check_api_misuse_error<T>(res_err: Result<T, APIError>) {
let expected_message = "No such channel awaiting to be accepted.".to_string();
check_api_error_message(expected_message, res_err)
}
fn check_api_error_message<T>(expected_err_message: String, res_err: Result<T, APIError>) {
match res_err {
Err(APIError::APIMisuseError { err }) => {
assert_eq!(err, expected_err_message);
},
Err(APIError::ChannelUnavailable { err }) => {
assert_eq!(err, expected_err_message);
},
Ok(_) => panic!("Unexpected Ok"),
Err(_) => panic!("Unexpected Error"),
}
}
#[test]
fn test_api_calls_with_unkown_counterparty_node() {
let chanmon_cfg = create_chanmon_cfgs(2);
let node_cfg = create_node_cfgs(2, &chanmon_cfg);
let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
let nodes = create_network(2, &node_cfg, &node_chanmgr);
let channel_id = ChannelId::from_bytes([4; 32]);
let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap());
let intercept_id = InterceptId([0; 32]);
let error_message = "Channel force-closed";
check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None, None), unkown_public_key);
check_unkown_peer_error(nodes[0].node.accept_inbound_channel(&channel_id, &unkown_public_key, 42), unkown_public_key);
check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key);
check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key);
check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key);
check_unkown_peer_error(nodes[0].node.forward_intercepted_htlc(intercept_id, &channel_id, unkown_public_key, 1_000_000), unkown_public_key);
check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key);
}
#[test]
fn test_api_calls_with_unavailable_channel() {
let chanmon_cfg = create_chanmon_cfgs(2);
let node_cfg = create_node_cfgs(2, &chanmon_cfg);
let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
let nodes = create_network(2, &node_cfg, &node_chanmgr);
let counterparty_node_id = nodes[1].node.get_our_node_id();
let channel_id = ChannelId::from_bytes([4; 32]);
let error_message = "Channel force-closed";
check_api_misuse_error(nodes[0].node.accept_inbound_channel(&channel_id, &counterparty_node_id, 42));
check_channel_unavailable_error(nodes[0].node.close_channel(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id);
check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id);
check_channel_unavailable_error(nodes[0].node.forward_intercepted_htlc(InterceptId([0; 32]), &channel_id, counterparty_node_id, 1_000_000), channel_id, counterparty_node_id);
check_channel_unavailable_error(nodes[0].node.update_channel_config(&counterparty_node_id, &[channel_id], &ChannelConfig::default()), channel_id, counterparty_node_id);
}
#[test]
fn test_connection_limiting() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
let mut funding_tx = None;
for idx in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
if idx == 0 {
nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel);
let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
funding_tx = Some(tx.clone());
nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx).unwrap();
let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg);
check_added_monitors!(nodes[1], 1);
expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed);
check_added_monitors!(nodes[0], 1);
expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
}
open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
}
open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(
&nodes[0].keys_manager);
nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
open_channel_msg.common_fields.temporary_channel_id);
let mut peer_pks = Vec::with_capacity(super::MAX_NO_CHANNEL_PEERS);
for _ in 1..super::MAX_NO_CHANNEL_PEERS {
let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
peer_pks.push(random_pk);
nodes[1].node.peer_connected(random_pk, &msgs::Init {
features: nodes[0].node.init_features(), networks: None, remote_network_address: None
}, true).unwrap();
}
let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
nodes[1].node.peer_connected(last_random_pk, &msgs::Init {
features: nodes[0].node.init_features(), networks: None, remote_network_address: None
}, true).unwrap_err();
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
let chan_closed_events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(chan_closed_events.len(), super::MAX_UNFUNDED_CHANS_PER_PEER - 1);
for ev in chan_closed_events {
if let Event::ChannelClosed { .. } = ev { } else { panic!(); }
}
nodes[1].node.peer_connected(last_random_pk, &msgs::Init {
features: nodes[0].node.init_features(), networks: None, remote_network_address: None
}, true).unwrap();
nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
features: nodes[0].node.init_features(), networks: None, remote_network_address: None
}, true).unwrap_err();
nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
features: nodes[0].node.init_features(), networks: None, remote_network_address: None
}, false).unwrap();
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
assert!(peer_pks.len() > super::MAX_UNFUNDED_CHANNEL_PEERS - 1);
for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
nodes[1].node.handle_open_channel(peer_pks[i], &open_channel_msg);
get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]);
open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
}
nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
open_channel_msg.common_fields.temporary_channel_id);
nodes[1].node.create_channel(last_random_pk, 100_000, 0, 42, None, None).unwrap();
get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, last_random_pk);
mine_transaction(&nodes[1], funding_tx.as_ref().unwrap());
nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
features: nodes[0].node.init_features(), networks: None, remote_network_address: None
}, true).unwrap();
get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
}
#[test]
fn test_outbound_chans_unlimited() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
for _ in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
}
nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
open_channel_msg.common_fields.temporary_channel_id);
nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
open_channel_msg.common_fields.temporary_channel_id);
}
#[test]
fn test_0conf_limiting() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let mut settings = test_default_channel_config();
settings.manually_accept_inbound_channels = true;
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(settings)]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
for _ in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
nodes[1].node.peer_connected(random_pk, &msgs::Init {
features: nodes[0].node.init_features(), networks: None, remote_network_address: None
}, true).unwrap();
nodes[1].node.handle_open_channel(random_pk, &open_channel_msg);
let events = nodes[1].node.get_and_clear_pending_events();
match events[0] {
Event::OpenChannelRequest { temporary_channel_id, .. } => {
nodes[1].node.accept_inbound_channel(&temporary_channel_id, &random_pk, 23).unwrap();
}
_ => panic!("Unexpected event"),
}
get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, random_pk);
open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
}
let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
nodes[1].node.peer_connected(last_random_pk, &msgs::Init {
features: nodes[0].node.init_features(), networks: None, remote_network_address: None
}, true).unwrap();
nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
let events = nodes[1].node.get_and_clear_pending_events();
match events[0] {
Event::OpenChannelRequest { temporary_channel_id, .. } => {
match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &last_random_pk, 23) {
Err(APIError::APIMisuseError { err }) =>
assert_eq!(err, "Too many peers with unfunded channels, refusing to accept new ones"),
_ => panic!(),
}
}
_ => panic!("Unexpected event"),
}
assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
open_channel_msg.common_fields.temporary_channel_id);
nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
let events = nodes[1].node.get_and_clear_pending_events();
match events[0] {
Event::OpenChannelRequest { temporary_channel_id, .. } => {
nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &last_random_pk, 23).unwrap();
}
_ => panic!("Unexpected event"),
}
get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
}
#[test]
fn reject_excessively_underpaying_htlcs() {
let chanmon_cfg = create_chanmon_cfgs(1);
let node_cfg = create_node_cfgs(1, &chanmon_cfg);
let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
let node = create_network(1, &node_cfg, &node_chanmgr);
let sender_intended_amt_msat = 100;
let extra_fee_msat = 10;
let hop_data = msgs::InboundOnionPayload::Receive {
sender_intended_htlc_amt_msat: 100,
cltv_expiry_height: 42,
payment_metadata: None,
keysend_preimage: None,
payment_data: Some(msgs::FinalOnionHopData {
payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat,
}),
custom_tlvs: Vec::new(),
};
let current_height: u32 = node[0].node.best_block.read().unwrap().height;
if let Err(crate::ln::channelmanager::InboundHTLCErr { err_code, .. }) =
create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat),
current_height)
{
assert_eq!(err_code, 19);
} else { panic!(); }
let hop_data = msgs::InboundOnionPayload::Receive { sender_intended_htlc_amt_msat: 100,
cltv_expiry_height: 42,
payment_metadata: None,
keysend_preimage: None,
payment_data: Some(msgs::FinalOnionHopData {
payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat,
}),
custom_tlvs: Vec::new(),
};
let current_height: u32 = node[0].node.best_block.read().unwrap().height;
assert!(create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat),
current_height).is_ok());
}
#[test]
fn test_final_incorrect_cltv(){
let chanmon_cfg = create_chanmon_cfgs(1);
let node_cfg = create_node_cfgs(1, &chanmon_cfg);
let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
let node = create_network(1, &node_cfg, &node_chanmgr);
let current_height: u32 = node[0].node.best_block.read().unwrap().height;
let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive {
sender_intended_htlc_amt_msat: 100,
cltv_expiry_height: 22,
payment_metadata: None,
keysend_preimage: None,
payment_data: Some(msgs::FinalOnionHopData {
payment_secret: PaymentSecret([0; 32]), total_msat: 100,
}),
custom_tlvs: Vec::new(),
}, [0; 32], PaymentHash([0; 32]), 100, 23, None, true, None, current_height);
assert!(result.is_ok());
}
#[test]
fn test_inbound_anchors_manual_acceptance() {
let mut anchors_cfg = test_default_channel_config();
anchors_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
let mut anchors_manual_accept_cfg = anchors_cfg.clone();
anchors_manual_accept_cfg.manually_accept_inbound_channels = true;
let chanmon_cfgs = create_chanmon_cfgs(3);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs,
&[Some(anchors_cfg.clone()), Some(anchors_cfg.clone()), Some(anchors_manual_accept_cfg.clone())]);
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
match &msg_events[0] {
MessageSendEvent::HandleError { node_id, action } => {
assert_eq!(*node_id, nodes[0].node.get_our_node_id());
match action {
ErrorAction::SendErrorMessage { msg } =>
assert_eq!(msg.data, "No channels with anchor outputs accepted".to_owned()),
_ => panic!("Unexpected error action"),
}
}
_ => panic!("Unexpected event"),
}
nodes[2].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
let events = nodes[2].node.get_and_clear_pending_events();
match events[0] {
Event::OpenChannelRequest { temporary_channel_id, .. } =>
nodes[2].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23).unwrap(),
_ => panic!("Unexpected event"),
}
get_event_msg!(nodes[2], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
}
#[test]
fn test_anchors_zero_fee_htlc_tx_fallback() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let mut anchors_config = test_default_channel_config();
anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
anchors_config.manually_accept_inbound_channels = true;
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config.clone())]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let error_message = "Channel force-closed";
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 0, None, None).unwrap();
let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
assert!(open_channel_msg.common_fields.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx());
nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
let events = nodes[1].node.get_and_clear_pending_events();
match events[0] {
Event::OpenChannelRequest { temporary_channel_id, .. } => {
nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
}
_ => panic!("Unexpected event"),
}
let error_msg = get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id());
nodes[0].node.handle_error(nodes[1].node.get_our_node_id(), &error_msg);
let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
assert!(!open_channel_msg.common_fields.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx());
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
}
#[test]
fn test_update_channel_config() {
let chanmon_cfg = create_chanmon_cfgs(2);
let node_cfg = create_node_cfgs(2, &chanmon_cfg);
let mut user_config = test_default_channel_config();
let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]);
let nodes = create_network(2, &node_cfg, &node_chanmgr);
let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
let channel = &nodes[0].node.list_channels()[0];
nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap();
let events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 0);
user_config.channel_config.forwarding_fee_base_msat += 10;
nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap();
assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_base_msat, user_config.channel_config.forwarding_fee_base_msat);
let events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
match &events[0] {
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("expected BroadcastChannelUpdate event"),
}
nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate::default()).unwrap();
let events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 0);
let new_cltv_expiry_delta = user_config.channel_config.cltv_expiry_delta + 6;
nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate {
cltv_expiry_delta: Some(new_cltv_expiry_delta),
..Default::default()
}).unwrap();
assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta);
let events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
match &events[0] {
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("expected BroadcastChannelUpdate event"),
}
let new_fee = user_config.channel_config.forwarding_fee_proportional_millionths + 100;
nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate {
forwarding_fee_proportional_millionths: Some(new_fee),
..Default::default()
}).unwrap();
assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta);
assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, new_fee);
let events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
match &events[0] {
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("expected BroadcastChannelUpdate event"),
}
let bad_channel_id = ChannelId::v1_from_funding_txid(&[10; 32], 10);
let current_fee = nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths;
let new_fee = current_fee + 100;
assert!(
matches!(
nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id, bad_channel_id], &ChannelConfigUpdate {
forwarding_fee_proportional_millionths: Some(new_fee),
..Default::default()
}),
Err(APIError::ChannelUnavailable { err: _ }),
)
);
assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, current_fee);
let events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 0);
}
#[test]
fn test_payment_display() {
let payment_id = PaymentId([42; 32]);
assert_eq!(format!("{}", &payment_id), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
let payment_hash = PaymentHash([42; 32]);
assert_eq!(format!("{}", &payment_hash), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
let payment_preimage = PaymentPreimage([42; 32]);
assert_eq!(format!("{}", &payment_preimage), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
}
#[test]
fn test_trigger_lnd_force_close() {
let chanmon_cfg = create_chanmon_cfgs(2);
let node_cfg = create_node_cfgs(2, &chanmon_cfg);
let user_config = test_default_channel_config();
let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]);
let nodes = create_network(2, &node_cfg, &node_chanmgr);
let error_message = "Channel force-closed";
let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast(&nodes[0], 1, true);
check_added_monitors(&nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
{
let txn = nodes[0].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1);
check_spends!(txn[0], funding_tx);
}
nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init {
features: nodes[1].node.init_features(), networks: None, remote_network_address: None
}, true).unwrap();
nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
features: nodes[0].node.init_features(), networks: None, remote_network_address: None
}, false).unwrap();
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
let channel_reestablish = get_event_msg!(
nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()
);
nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &channel_reestablish);
let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 2);
if let MessageSendEvent::SendChannelReestablish { node_id, msg } = &msg_events[0] {
assert_eq!(*node_id, nodes[1].node.get_our_node_id());
assert_eq!(msg.next_local_commitment_number, 0);
assert_eq!(msg.next_remote_commitment_number, 0);
nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &msg);
} else { panic!() };
check_closed_broadcast(&nodes[1], 1, true);
check_added_monitors(&nodes[1], 1);
let expected_close_reason = ClosureReason::ProcessingError {
err: "Peer sent an invalid channel_reestablish to force close in a non-standard way".to_string()
};
check_closed_event!(nodes[1], 1, expected_close_reason, [nodes[0].node.get_our_node_id()], 100000);
{
let txn = nodes[1].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1);
check_spends!(txn[0], funding_tx);
}
}
#[test]
fn test_malformed_forward_htlcs_ser() {
let chanmon_cfg = create_chanmon_cfgs(1);
let node_cfg = create_node_cfgs(1, &chanmon_cfg);
let persister;
let chain_monitor;
let chanmgrs = create_node_chanmgrs(1, &node_cfg, &[None]);
let deserialized_chanmgr;
let mut nodes = create_network(1, &node_cfg, &chanmgrs);
let dummy_failed_htlc = |htlc_id| {
HTLCForwardInfo::FailHTLC { htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }, }
};
let dummy_malformed_htlc = |htlc_id| {
HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code: 0x4000, sha256_of_onion: [0; 32] }
};
let dummy_htlcs_1: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
if htlc_id % 2 == 0 {
dummy_failed_htlc(htlc_id)
} else {
dummy_malformed_htlc(htlc_id)
}
}).collect();
let dummy_htlcs_2: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
if htlc_id % 2 == 1 {
dummy_failed_htlc(htlc_id)
} else {
dummy_malformed_htlc(htlc_id)
}
}).collect();
let (scid_1, scid_2) = (42, 43);
let mut forward_htlcs = new_hash_map();
forward_htlcs.insert(scid_1, dummy_htlcs_1.clone());
forward_htlcs.insert(scid_2, dummy_htlcs_2.clone());
let mut chanmgr_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
*chanmgr_fwd_htlcs = forward_htlcs.clone();
core::mem::drop(chanmgr_fwd_htlcs);
reload_node!(nodes[0], nodes[0].node.encode(), &[], persister, chain_monitor, deserialized_chanmgr);
let mut deserialized_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
for scid in [scid_1, scid_2].iter() {
let deserialized_htlcs = deserialized_fwd_htlcs.remove(scid).unwrap();
assert_eq!(forward_htlcs.remove(scid).unwrap(), deserialized_htlcs);
}
assert!(deserialized_fwd_htlcs.is_empty());
core::mem::drop(deserialized_fwd_htlcs);
expect_pending_htlcs_forwardable!(nodes[0]);
}
}
#[cfg(ldk_bench)]
pub mod bench {
use crate::chain::Listen;
use crate::chain::chainmonitor::{ChainMonitor, Persist};
use crate::sign::{KeysManager, InMemorySigner};
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider};
use crate::ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId, RecipientOnionFields, Retry};
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::{ChannelMessageHandler, Init};
use crate::routing::gossip::NetworkGraph;
use crate::routing::router::{PaymentParameters, RouteParameters};
use crate::util::test_utils;
use crate::util::config::{UserConfig, MaxDustHTLCExposure};
use bitcoin::amount::Amount;
use bitcoin::locktime::absolute::LockTime;
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::{Transaction, TxOut};
use bitcoin::transaction::Version;
use crate::sync::{Arc, Mutex, RwLock};
use criterion::Criterion;
type Manager<'a, P> = ChannelManager<
&'a ChainMonitor<InMemorySigner, &'a test_utils::TestChainSource,
&'a test_utils::TestBroadcaster, &'a test_utils::TestFeeEstimator,
&'a test_utils::TestLogger, &'a P>,
&'a test_utils::TestBroadcaster, &'a KeysManager, &'a KeysManager, &'a KeysManager,
&'a test_utils::TestFeeEstimator, &'a test_utils::TestRouter<'a>,
&'a test_utils::TestMessageRouter<'a>, &'a test_utils::TestLogger>;
struct ANodeHolder<'node_cfg, 'chan_mon_cfg: 'node_cfg, P: Persist<InMemorySigner>> {
node: &'node_cfg Manager<'chan_mon_cfg, P>,
}
impl<'node_cfg, 'chan_mon_cfg: 'node_cfg, P: Persist<InMemorySigner>> NodeHolder for ANodeHolder<'node_cfg, 'chan_mon_cfg, P> {
type CM = Manager<'chan_mon_cfg, P>;
#[inline]
fn node(&self) -> &Manager<'chan_mon_cfg, P> { self.node }
#[inline]
fn chain_monitor(&self) -> Option<&test_utils::TestChainMonitor> { None }
}
pub fn bench_sends(bench: &mut Criterion) {
bench_two_sends(bench, "bench_sends", test_utils::TestPersister::new(), test_utils::TestPersister::new());
}
pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Criterion, bench_name: &str, persister_a: P, persister_b: P) {
let network = bitcoin::Network::Testnet;
let genesis_block = bitcoin::constants::genesis_block(network);
let tx_broadcaster = test_utils::TestBroadcaster::new(network);
let fee_estimator = test_utils::TestFeeEstimator::new(253);
let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
let scorer = RwLock::new(test_utils::TestScorer::new());
let entropy = test_utils::TestKeysInterface::new(&[0u8; 32], network);
let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &logger_a, &scorer);
let message_router = test_utils::TestMessageRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &entropy);
let mut config: UserConfig = Default::default();
config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253);
config.channel_handshake_config.minimum_depth = 1;
let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a);
let seed_a = [1u8; 32];
let keys_manager_a = KeysManager::new(&seed_a, 42, 42);
let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &message_router, &logger_a, &keys_manager_a, &keys_manager_a, &keys_manager_a, config.clone(), ChainParameters {
network,
best_block: BestBlock::from_network(network),
}, genesis_block.header.time);
let node_a_holder = ANodeHolder { node: &node_a };
let logger_b = test_utils::TestLogger::with_id("node a".to_owned());
let chain_monitor_b = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_b);
let seed_b = [2u8; 32];
let keys_manager_b = KeysManager::new(&seed_b, 42, 42);
let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &message_router, &logger_b, &keys_manager_b, &keys_manager_b, &keys_manager_b, config.clone(), ChainParameters {
network,
best_block: BestBlock::from_network(network),
}, genesis_block.header.time);
let node_b_holder = ANodeHolder { node: &node_b };
node_a.peer_connected(node_b.get_our_node_id(), &Init {
features: node_b.init_features(), networks: None, remote_network_address: None
}, true).unwrap();
node_b.peer_connected(node_a.get_our_node_id(), &Init {
features: node_a.init_features(), networks: None, remote_network_address: None
}, false).unwrap();
node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None, None).unwrap();
node_b.handle_open_channel(node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
node_a.handle_accept_channel(node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
let tx;
if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) {
tx = Transaction { version: Version::TWO, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
value: Amount::from_sat(8_000_000), script_pubkey: output_script,
}]};
node_a.funding_transaction_generated(temporary_channel_id, node_b.get_our_node_id(), tx.clone()).unwrap();
} else { panic!(); }
node_b.handle_funding_created(node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingCreated, node_b.get_our_node_id()));
let events_b = node_b.get_and_clear_pending_events();
assert_eq!(events_b.len(), 1);
match events_b[0] {
Event::ChannelPending{ ref counterparty_node_id, .. } => {
assert_eq!(*counterparty_node_id, node_a.get_our_node_id());
},
_ => panic!("Unexpected event"),
}
node_a.handle_funding_signed(node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingSigned, node_a.get_our_node_id()));
let events_a = node_a.get_and_clear_pending_events();
assert_eq!(events_a.len(), 1);
match events_a[0] {
Event::ChannelPending{ ref counterparty_node_id, .. } => {
assert_eq!(*counterparty_node_id, node_b.get_our_node_id());
},
_ => panic!("Unexpected event"),
}
assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
let block = create_dummy_block(BestBlock::from_network(network).block_hash, 42, vec![tx]);
Listen::block_connected(&node_a, &block, 1);
Listen::block_connected(&node_b, &block, 1);
node_a.handle_channel_ready(node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendChannelReady, node_a.get_our_node_id()));
let msg_events = node_a.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 2);
match msg_events[0] {
MessageSendEvent::SendChannelReady { ref msg, .. } => {
node_b.handle_channel_ready(node_a.get_our_node_id(), msg);
get_event_msg!(node_b_holder, MessageSendEvent::SendChannelUpdate, node_a.get_our_node_id());
},
_ => panic!(),
}
match msg_events[1] {
MessageSendEvent::SendChannelUpdate { .. } => {},
_ => panic!(),
}
let events_a = node_a.get_and_clear_pending_events();
assert_eq!(events_a.len(), 1);
match events_a[0] {
Event::ChannelReady{ ref counterparty_node_id, .. } => {
assert_eq!(*counterparty_node_id, node_b.get_our_node_id());
},
_ => panic!("Unexpected event"),
}
let events_b = node_b.get_and_clear_pending_events();
assert_eq!(events_b.len(), 1);
match events_b[0] {
Event::ChannelReady{ ref counterparty_node_id, .. } => {
assert_eq!(*counterparty_node_id, node_a.get_our_node_id());
},
_ => panic!("Unexpected event"),
}
let mut payment_count: u64 = 0;
macro_rules! send_payment {
($node_a: expr, $node_b: expr) => {
let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id(), TEST_FINAL_CLTV)
.with_bolt11_features($node_b.bolt11_invoice_features()).unwrap();
let mut payment_preimage = PaymentPreimage([0; 32]);
payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes());
payment_count += 1;
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
$node_a.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
PaymentId(payment_hash.0),
RouteParameters::from_payment_params_and_value(payment_params, 10_000),
Retry::Attempts(0)).unwrap();
let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
$node_b.handle_update_add_htlc($node_a.get_our_node_id(), &payment_event.msgs[0]);
$node_b.handle_commitment_signed($node_a.get_our_node_id(), &payment_event.commitment_msg);
let (raa, cs) = get_revoke_commit_msgs(&ANodeHolder { node: &$node_b }, &$node_a.get_our_node_id());
$node_a.handle_revoke_and_ack($node_b.get_our_node_id(), &raa);
$node_a.handle_commitment_signed($node_b.get_our_node_id(), &cs);
$node_b.handle_revoke_and_ack($node_a.get_our_node_id(), &get_event_msg!(ANodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id()));
expect_pending_htlcs_forwardable!(ANodeHolder { node: &$node_b });
expect_payment_claimable!(ANodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000);
$node_b.claim_funds(payment_preimage);
expect_payment_claimed!(ANodeHolder { node: &$node_b }, payment_hash, 10_000);
match $node_b.get_and_clear_pending_msg_events().pop().unwrap() {
MessageSendEvent::UpdateHTLCs { node_id, updates } => {
assert_eq!(node_id, $node_a.get_our_node_id());
$node_a.handle_update_fulfill_htlc($node_b.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
$node_a.handle_commitment_signed($node_b.get_our_node_id(), &updates.commitment_signed);
},
_ => panic!("Failed to generate claim event"),
}
let (raa, cs) = get_revoke_commit_msgs(&ANodeHolder { node: &$node_a }, &$node_b.get_our_node_id());
$node_b.handle_revoke_and_ack($node_a.get_our_node_id(), &raa);
$node_b.handle_commitment_signed($node_a.get_our_node_id(), &cs);
$node_a.handle_revoke_and_ack($node_b.get_our_node_id(), &get_event_msg!(ANodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id()));
expect_payment_sent!(ANodeHolder { node: &$node_a }, payment_preimage);
}
}
bench.bench_function(bench_name, |b| b.iter(|| {
send_payment!(node_a, node_b);
send_payment!(node_b, node_a);
}));
}
}