// This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
//! The logic to monitor for on-chain transactions and create the relevant claim responses lives
//! here.
//!
//! ChannelMonitor objects are generated by ChannelManager in response to relevant
//! messages/actions, and MUST be persisted to disk (and, preferably, remotely) before progress can
//! be made in responding to certain messages, see [`chain::Watch`] for more.
//!
//! Note that ChannelMonitors are an important part of the lightning trust model and a copy of the
//! latest ChannelMonitor must always be actively monitoring for chain updates (and no out-of-date
//! ChannelMonitors should do so). Thus, if you're building rust-lightning into an HSM or other
//! security-domain-separated system design, you should consider having multiple paths for
//! ChannelMonitors to get out of the HSM and onto monitoring devices.
use bitcoin::amount::Amount;
use bitcoin::block::Header;
use bitcoin::script::{Script, ScriptBuf};
use bitcoin::transaction::{OutPoint as BitcoinOutPoint, Transaction, TxOut};
use bitcoin::hash_types::{BlockHash, Txid};
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hashes::Hash;
use bitcoin::ecdsa::Signature as BitcoinSignature;
use bitcoin::secp256k1::{self, ecdsa::Signature, PublicKey, Secp256k1, SecretKey};
use crate::chain;
use crate::chain::chaininterface::{
BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator,
};
use crate::chain::onchaintx::{ClaimEvent, FeerateStrategy, OnchainTxHandler};
use crate::chain::package::{
CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, HolderFundingOutput,
HolderHTLCOutput, PackageSolvingData, PackageTemplate, RevokedHTLCOutput, RevokedOutput,
};
use crate::chain::transaction::{OutPoint, TransactionData};
use crate::chain::Filter;
use crate::chain::{BestBlock, WatchedOutput};
use crate::events::bump_transaction::{AnchorDescriptor, BumpTransactionEvent};
use crate::events::{ClosureReason, Event, EventHandler, PaidBolt12Invoice, ReplayEvent};
use crate::ln::chan_utils::{
self, ChannelTransactionParameters, CommitmentTransaction, CounterpartyCommitmentSecrets,
HTLCClaim, HTLCOutputInCommitment, HolderCommitmentTransaction,
};
use crate::ln::channel::INITIAL_COMMITMENT_NUMBER;
use crate::ln::channel_keys::{
DelayedPaymentBasepoint, DelayedPaymentKey, HtlcBasepoint, HtlcKey, RevocationBasepoint,
RevocationKey,
};
use crate::ln::channelmanager::{HTLCSource, PaymentClaimDetails, SentHTLCId};
use crate::ln::msgs::DecodeError;
use crate::ln::types::ChannelId;
use crate::sign::{
ecdsa::EcdsaChannelSigner, ChannelDerivationParameters, DelayedPaymentOutputDescriptor,
EntropySource, HTLCDescriptor, SignerProvider, SpendableOutputDescriptor,
StaticPaymentOutputDescriptor,
};
use crate::types::features::ChannelTypeFeatures;
use crate::types::payment::{PaymentHash, PaymentPreimage};
use crate::util::byte_utils;
use crate::util::logger::{Logger, WithContext};
use crate::util::persist::MonitorName;
use crate::util::ser::{
MaybeReadable, Readable, ReadableArgs, RequiredWrapper, UpgradableRequired, Writeable, Writer,
U48,
};
#[allow(unused_imports)]
use crate::prelude::*;
use crate::io::{self, Error};
use crate::sync::Mutex;
use core::ops::Deref;
use core::{cmp, mem};
/// An update generated by the underlying channel itself which contains some new information the
/// [`ChannelMonitor`] should be made aware of.
///
/// Because this represents only a small number of updates to the underlying state, it is generally
/// much smaller than a full [`ChannelMonitor`]. However, for large single commitment transaction
/// updates (e.g. ones during which there are hundreds of HTLCs pending on the commitment
/// transaction), a single update may reach upwards of 1 MiB in serialized size.
#[derive(Clone, Debug, PartialEq, Eq)]
#[must_use]
pub struct ChannelMonitorUpdate {
pub(crate) updates: Vec<ChannelMonitorUpdateStep>,
/// The sequence number of this update. Updates *must* be replayed in-order according to this
/// sequence number (and updates may panic if they are not). The update_id values are strictly
/// increasing and increase by one for each new update, with two exceptions specified below.
///
/// This sequence number is also used to track up to which points updates which returned
/// [`ChannelMonitorUpdateStatus::InProgress`] have been applied to all copies of a given
/// ChannelMonitor when ChannelManager::channel_monitor_updated is called.
///
/// Note that for [`ChannelMonitorUpdate`]s generated on LDK versions prior to 0.1 after the
/// channel was closed, this value may be [`u64::MAX`]. In that case, multiple updates may
/// appear with the same ID, and all should be replayed.
///
/// [`ChannelMonitorUpdateStatus::InProgress`]: super::ChannelMonitorUpdateStatus::InProgress
pub update_id: u64,
/// The channel ID associated with these updates.
///
/// Will be `None` for `ChannelMonitorUpdate`s constructed on LDK versions prior to 0.0.121 and
/// always `Some` otherwise.
pub channel_id: Option<ChannelId>,
}
impl ChannelMonitorUpdate {
pub(crate) fn internal_renegotiated_funding_data(
&self,
) -> impl Iterator<Item = (OutPoint, ScriptBuf)> + '_ {
self.updates.iter().filter_map(|update| match update {
ChannelMonitorUpdateStep::RenegotiatedFunding { channel_parameters, .. } => {
let funding_outpoint = channel_parameters
.funding_outpoint
.expect("Renegotiated funding must always have known outpoint");
let funding_script = channel_parameters.make_funding_redeemscript().to_p2wsh();
Some((funding_outpoint, funding_script))
},
_ => None,
})
}
/// Returns a `Vec` of new (funding outpoint, funding script) to monitor the chain for as a
/// result of a renegotiated funding transaction.
#[cfg(c_bindings)]
pub fn renegotiated_funding_data(&self) -> Vec<(OutPoint, ScriptBuf)> {
self.internal_renegotiated_funding_data().collect()
}
/// Returns an iterator of new (funding outpoint, funding script) to monitor the chain for as a
/// result of a renegotiated funding transaction.
#[cfg(not(c_bindings))]
pub fn renegotiated_funding_data(&self) -> impl Iterator<Item = (OutPoint, ScriptBuf)> + '_ {
self.internal_renegotiated_funding_data()
}
}
/// LDK prior to 0.1 used this constant as the [`ChannelMonitorUpdate::update_id`] for any
/// [`ChannelMonitorUpdate`]s which were generated after the channel was closed.
const LEGACY_CLOSED_CHANNEL_UPDATE_ID: u64 = u64::MAX;
impl Writeable for ChannelMonitorUpdate {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
write_ver_prefix!(w, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
self.update_id.write(w)?;
(self.updates.len() as u64).write(w)?;
for update_step in self.updates.iter() {
update_step.write(w)?;
}
write_tlv_fields!(w, {
// 1 was previously used to store `counterparty_node_id`
(3, self.channel_id, option),
});
Ok(())
}
}
impl Readable for ChannelMonitorUpdate {
#[rustfmt::skip]
fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
let _ver = read_ver_prefix!(r, SERIALIZATION_VERSION);
let update_id: u64 = Readable::read(r)?;
let len: u64 = Readable::read(r)?;
let mut updates = Vec::with_capacity(cmp::min(len as usize, MAX_ALLOC_SIZE / ::core::mem::size_of::<ChannelMonitorUpdateStep>()));
for _ in 0..len {
if let Some(upd) = MaybeReadable::read(r)? {
updates.push(upd);
}
}
let mut channel_id = None;
read_tlv_fields!(r, {
// 1 was previously used to store `counterparty_node_id`
(3, channel_id, option),
});
Ok(Self { update_id, updates, channel_id })
}
}
/// An event to be processed by the ChannelManager.
#[derive(Clone, PartialEq, Eq)]
pub enum MonitorEvent {
/// A monitor event containing an HTLCUpdate.
HTLCEvent(HTLCUpdate),
/// Indicates we broadcasted the channel's latest commitment transaction and thus closed the
/// channel. Holds information about the channel and why it was closed.
HolderForceClosedWithInfo {
/// The reason the channel was closed.
reason: ClosureReason,
/// The funding outpoint of the channel.
outpoint: OutPoint,
/// The channel ID of the channel.
channel_id: ChannelId,
},
/// Indicates we broadcasted the channel's latest commitment transaction and thus closed the
/// channel.
HolderForceClosed(OutPoint),
/// Indicates that we've detected a commitment transaction (either holder's or counterparty's)
/// be included in a block and should consider the channel closed.
CommitmentTxConfirmed(()),
/// Indicates a [`ChannelMonitor`] update has completed. See
/// [`ChannelMonitorUpdateStatus::InProgress`] for more information on how this is used.
///
/// [`ChannelMonitorUpdateStatus::InProgress`]: super::ChannelMonitorUpdateStatus::InProgress
Completed {
/// The funding outpoint of the [`ChannelMonitor`] that was updated
funding_txo: OutPoint,
/// The channel ID of the channel associated with the [`ChannelMonitor`]
channel_id: ChannelId,
/// The Update ID from [`ChannelMonitorUpdate::update_id`] which was applied or
/// [`ChannelMonitor::get_latest_update_id`].
///
/// Note that this should only be set to a given update's ID if all previous updates for the
/// same [`ChannelMonitor`] have been applied and persisted.
monitor_update_id: u64,
},
}
impl_writeable_tlv_based_enum_upgradable_legacy!(MonitorEvent,
// Note that Completed is currently never serialized to disk as it is generated only in
// ChainMonitor.
(0, Completed) => {
(0, funding_txo, required),
(2, monitor_update_id, required),
(4, channel_id, required),
},
(5, HolderForceClosedWithInfo) => {
(0, reason, upgradable_required),
(2, outpoint, required),
(4, channel_id, required),
},
;
(1, CommitmentTxConfirmed),
(2, HTLCEvent),
(4, HolderForceClosed),
// 6 was `UpdateFailed` until LDK 0.0.117
);
/// Simple structure sent back by `chain::Watch` when an HTLC from a forward channel is detected on
/// chain. Used to update the corresponding HTLC in the backward channel. Failing to pass the
/// preimage claim backward will lead to loss of funds.
#[derive(Clone, PartialEq, Eq)]
pub struct HTLCUpdate {
pub(crate) payment_hash: PaymentHash,
pub(crate) payment_preimage: Option<PaymentPreimage>,
pub(crate) source: HTLCSource,
pub(crate) htlc_value_satoshis: Option<u64>,
}
impl_writeable_tlv_based!(HTLCUpdate, {
(0, payment_hash, required),
(1, htlc_value_satoshis, option),
(2, source, required),
(4, payment_preimage, option),
});
/// If an output goes from claimable only by us to claimable by us or our counterparty within this
/// many blocks, we consider it pinnable for the purposes of aggregating claims in a single
/// transaction.
pub(crate) const COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE: u32 = 12;
/// When we go to force-close a channel because an HTLC is expiring, by the time we've gotten the
/// commitment transaction confirmed, we should ensure that the HTLC(s) expiring are not considered
/// pinnable, allowing us to aggregate them with other HTLC(s) expiring at the same time.
const _: () = assert!(MAX_BLOCKS_FOR_CONF > COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE);
/// The upper bound on how many blocks we think it can take for us to get a transaction confirmed.
pub(crate) const MAX_BLOCKS_FOR_CONF: u32 = 18;
/// If an HTLC expires within this many blocks, force-close the channel to broadcast the
/// HTLC-Success transaction.
///
/// This is two times [`MAX_BLOCKS_FOR_CONF`] as we need to first get the commitment transaction
/// confirmed, then get an HTLC transaction confirmed.
pub(crate) const CLTV_CLAIM_BUFFER: u32 = MAX_BLOCKS_FOR_CONF * 2;
/// Number of blocks by which point we expect our counterparty to have seen new blocks on the
/// network and done a full update_fail_htlc/commitment_signed dance (+ we've updated all our
/// copies of ChannelMonitors, including watchtowers). We could enforce the contract by failing
/// at CLTV expiration height but giving a grace period to our peer may be profitable for us if he
/// can provide an over-late preimage. Nevertheless, grace period has to be accounted in our
/// CLTV_EXPIRY_DELTA to be secure. Following this policy we may decrease the rate of channel failures
/// due to expiration but increase the cost of funds being locked longuer in case of failure.
/// This delay also cover a low-power peer being slow to process blocks and so being behind us on
/// accurate block height.
/// In case of onchain failure to be pass backward we may see the last block of ANTI_REORG_DELAY
/// with at worst this delay, so we are not only using this value as a mercy for them but also
/// us as a safeguard to delay with enough time.
pub(crate) const LATENCY_GRACE_PERIOD_BLOCKS: u32 = 3;
/// Number of blocks we wait on seeing a HTLC output being solved before we fail corresponding
/// inbound HTLCs. This prevents us from failing backwards and then getting a reorg resulting in us
/// losing money.
///
/// Note that this is a library-wide security assumption. If a reorg deeper than this number of
/// blocks occurs, counterparties may be able to steal funds or claims made by and balances exposed
/// by a [`ChannelMonitor`] may be incorrect.
// We also use this delay to be sure we can remove our in-flight claim txn from bump candidates buffer.
// It may cause spurious generation of bumped claim txn but that's alright given the outpoint is already
// solved by a previous claim tx. What we want to avoid is reorg evicting our claim tx and us not
// keep bumping another claim tx to solve the outpoint.
pub const ANTI_REORG_DELAY: u32 = 6;
/// Number of blocks we wait before assuming a [`ChannelMonitor`] to be fully resolved and
/// considering it be safely archived.
// 4032 blocks are roughly four weeks
pub const ARCHIVAL_DELAY_BLOCKS: u32 = 4032;
/// Number of blocks before confirmation at which we fail back an un-relayed HTLC or at which we
/// refuse to accept a new HTLC.
///
/// This is used for a few separate purposes:
/// 1) if we've received an MPP HTLC to us and it expires within this many blocks and we are
/// waiting on additional parts (or waiting on the preimage for any HTLC from the user), we will
/// fail this HTLC,
/// 2) if we receive an HTLC within this many blocks of its expiry (plus one to avoid a race
/// condition with the above), we will fail this HTLC without telling the user we received it,
///
/// (1) is all about protecting us - we need enough time to update the channel state before we hit
/// CLTV_CLAIM_BUFFER, at which point we'd go on chain to claim the HTLC with the preimage.
///
/// (2) is the same, but with an additional buffer to avoid accepting an HTLC which is immediately
/// in a race condition between the user connecting a block (which would fail it) and the user
/// providing us the preimage (which would claim it).
pub const HTLC_FAIL_BACK_BUFFER: u32 = CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS;
// Deprecated, use [`HolderCommitment`] or [`HolderCommitmentTransaction`].
#[derive(Clone, PartialEq, Eq)]
struct HolderSignedTx {
/// txid of the transaction in tx, just used to make comparison faster
txid: Txid,
revocation_key: RevocationKey,
a_htlc_key: HtlcKey,
b_htlc_key: HtlcKey,
delayed_payment_key: DelayedPaymentKey,
per_commitment_point: PublicKey,
htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>,
to_self_value_sat: u64,
feerate_per_kw: u32,
}
// Any changes made here must also reflect in `write_legacy_holder_commitment_data`.
impl_writeable_tlv_based!(HolderSignedTx, {
(0, txid, required),
(1, to_self_value_sat, required), // Added in 0.0.100, required in 0.2.
(2, revocation_key, required),
(4, a_htlc_key, required),
(6, b_htlc_key, required),
(8, delayed_payment_key, required),
(10, per_commitment_point, required),
(12, feerate_per_kw, required),
(14, htlc_outputs, required_vec)
});
// Matches the serialization of `HolderSignedTx` for backwards compatibility reasons.
#[rustfmt::skip]
fn write_legacy_holder_commitment_data<W: Writer>(
writer: &mut W, commitment_tx: &HolderCommitmentTransaction, htlc_data: &CommitmentHTLCData,
) -> Result<(), io::Error> {
let trusted_tx = commitment_tx.trust();
let tx_keys = trusted_tx.keys();
let txid = trusted_tx.txid();
let to_self_value_sat = commitment_tx.to_broadcaster_value_sat();
let feerate_per_kw = trusted_tx.negotiated_feerate_per_kw();
let revocation_key = &tx_keys.revocation_key;
let a_htlc_key = &tx_keys.broadcaster_htlc_key;
let b_htlc_key = &tx_keys.countersignatory_htlc_key;
let delayed_payment_key = &tx_keys.broadcaster_delayed_payment_key;
let per_commitment_point = &tx_keys.per_commitment_point;
let mut nondust_htlcs = commitment_tx.nondust_htlcs().iter()
.zip(commitment_tx.counterparty_htlc_sigs.iter());
let mut sources = htlc_data.nondust_htlc_sources.iter();
// Use an iterator to write `htlc_outputs` to avoid allocations.
let nondust_htlcs = core::iter::from_fn(move || {
let (htlc, counterparty_htlc_sig) = if let Some(nondust_htlc) = nondust_htlcs.next() {
nondust_htlc
} else {
assert!(sources.next().is_none());
return None;
};
let mut source = None;
if htlc.offered {
source = sources.next();
if source.is_none() {
panic!("Every offered non-dust HTLC should have a corresponding source");
}
}
Some((htlc, Some(counterparty_htlc_sig), source))
});
// Dust HTLCs go last.
let dust_htlcs = htlc_data.dust_htlcs.iter()
.map(|(htlc, source)| (htlc, None::<&Signature>, source.as_ref()));
let htlc_outputs = crate::util::ser::IterableOwned(nondust_htlcs.chain(dust_htlcs));
write_tlv_fields!(writer, {
(0, txid, required),
(1, to_self_value_sat, required),
(2, revocation_key, required),
(4, a_htlc_key, required),
(6, b_htlc_key, required),
(8, delayed_payment_key, required),
(10, per_commitment_point, required),
(12, feerate_per_kw, required),
(14, htlc_outputs, required),
});
Ok(())
}
/// We use this to track static counterparty commitment transaction data and to generate any
/// justice or 2nd-stage preimage/timeout transactions.
#[derive(Clone, PartialEq, Eq)]
struct CounterpartyCommitmentParameters {
counterparty_delayed_payment_base_key: DelayedPaymentBasepoint,
counterparty_htlc_base_key: HtlcBasepoint,
on_counterparty_tx_csv: u16,
}
impl Writeable for CounterpartyCommitmentParameters {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
w.write_all(&0u64.to_be_bytes())?;
write_tlv_fields!(w, {
(0, self.counterparty_delayed_payment_base_key, required),
(2, self.counterparty_htlc_base_key, required),
(4, self.on_counterparty_tx_csv, required),
});
Ok(())
}
}
impl Readable for CounterpartyCommitmentParameters {
#[rustfmt::skip]
fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
let counterparty_commitment_transaction = {
// Versions prior to 0.0.100 had some per-HTLC state stored here, which is no longer
// used. Read it for compatibility.
let per_htlc_len: u64 = Readable::read(r)?;
for _ in 0..per_htlc_len {
let _txid: Txid = Readable::read(r)?;
let htlcs_count: u64 = Readable::read(r)?;
for _ in 0..htlcs_count {
let _htlc: HTLCOutputInCommitment = Readable::read(r)?;
}
}
let mut counterparty_delayed_payment_base_key = RequiredWrapper(None);
let mut counterparty_htlc_base_key = RequiredWrapper(None);
let mut on_counterparty_tx_csv: u16 = 0;
read_tlv_fields!(r, {
(0, counterparty_delayed_payment_base_key, required),
(2, counterparty_htlc_base_key, required),
(4, on_counterparty_tx_csv, required),
});
CounterpartyCommitmentParameters {
counterparty_delayed_payment_base_key: counterparty_delayed_payment_base_key.0.unwrap(),
counterparty_htlc_base_key: counterparty_htlc_base_key.0.unwrap(),
on_counterparty_tx_csv,
}
};
Ok(counterparty_commitment_transaction)
}
}
/// An entry for an [`OnchainEvent`], stating the block height and hash when the event was
/// observed, as well as the transaction causing it.
///
/// Used to determine when the on-chain event can be considered safe from a chain reorganization.
#[derive(Clone, PartialEq, Eq)]
struct OnchainEventEntry {
txid: Txid,
height: u32,
block_hash: Option<BlockHash>, // Added as optional, will be filled in for any entry generated on 0.0.113 or after
event: OnchainEvent,
transaction: Option<Transaction>, // Added as optional, but always filled in, in LDK 0.0.110
}
impl OnchainEventEntry {
#[rustfmt::skip]
fn confirmation_threshold(&self) -> u32 {
let mut conf_threshold = self.height + ANTI_REORG_DELAY - 1;
match self.event {
OnchainEvent::MaturingOutput {
descriptor: SpendableOutputDescriptor::DelayedPaymentOutput(ref descriptor)
} => {
// A CSV'd transaction is confirmable in block (input height) + CSV delay, which means
// it's broadcastable when we see the previous block.
conf_threshold = cmp::max(conf_threshold, self.height + descriptor.to_self_delay as u32 - 1);
},
OnchainEvent::FundingSpendConfirmation { on_local_output_csv: Some(csv), .. } |
OnchainEvent::HTLCSpendConfirmation { on_to_local_output_csv: Some(csv), .. } => {
// A CSV'd transaction is confirmable in block (input height) + CSV delay, which means
// it's broadcastable when we see the previous block.
conf_threshold = cmp::max(conf_threshold, self.height + csv as u32 - 1);
},
_ => {},
}
conf_threshold
}
fn has_reached_confirmation_threshold(&self, best_block: &BestBlock) -> bool {
best_block.height >= self.confirmation_threshold()
}
}
/// The (output index, sats value) for the counterparty's output in a commitment transaction.
///
/// This was added as an `Option` in 0.0.110.
type CommitmentTxCounterpartyOutputInfo = Option<(u32, Amount)>;
/// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
/// once they mature to enough confirmations (ANTI_REORG_DELAY)
#[derive(Clone, PartialEq, Eq)]
enum OnchainEvent {
/// An outbound HTLC failing after a transaction is confirmed. Used
/// * when an outbound HTLC output is spent by us after the HTLC timed out
/// * an outbound HTLC which was not present in the commitment transaction which appeared
/// on-chain (either because it was not fully committed to or it was dust).
/// Note that this is *not* used for preimage claims, as those are passed upstream immediately,
/// appearing only as an `HTLCSpendConfirmation`, below.
HTLCUpdate {
source: HTLCSource,
payment_hash: PaymentHash,
htlc_value_satoshis: Option<u64>,
/// None in the second case, above, ie when there is no relevant output in the commitment
/// transaction which appeared on chain.
commitment_tx_output_idx: Option<u32>,
},
/// An output waiting on [`ANTI_REORG_DELAY`] confirmations before we hand the user the
/// [`SpendableOutputDescriptor`].
MaturingOutput { descriptor: SpendableOutputDescriptor },
/// A spend of the funding output, either a commitment transaction or a cooperative closing
/// transaction.
FundingSpendConfirmation {
/// The CSV delay for the output of the funding spend transaction (implying it is a local
/// commitment transaction, and this is the delay on the to_self output).
on_local_output_csv: Option<u16>,
/// If the funding spend transaction was a known remote commitment transaction, we track
/// the output index and amount of the counterparty's `to_self` output here.
///
/// This allows us to generate a [`Balance::CounterpartyRevokedOutputClaimable`] for the
/// counterparty output.
commitment_tx_to_counterparty_output: CommitmentTxCounterpartyOutputInfo,
},
/// A spend of a commitment transaction HTLC output, set in the cases where *no* `HTLCUpdate`
/// is constructed. This is used when
/// * an outbound HTLC is claimed by our counterparty with a preimage, causing us to
/// immediately claim the HTLC on the inbound edge and track the resolution here,
/// * an inbound HTLC is claimed by our counterparty (with a timeout),
/// * an inbound HTLC is claimed by us (with a preimage).
/// * a revoked-state HTLC transaction was broadcasted, which was claimed by the revocation
/// signature.
/// * a revoked-state HTLC transaction was broadcasted, which was claimed by an
/// HTLC-Success/HTLC-Failure transaction (and is still claimable with a revocation
/// signature).
HTLCSpendConfirmation {
commitment_tx_output_idx: u32,
/// If the claim was made by either party with a preimage, this is filled in
preimage: Option<PaymentPreimage>,
/// If the claim was made by us on an inbound HTLC against a local commitment transaction,
/// we set this to the output CSV value which we will have to wait until to spend the
/// output (and generate a SpendableOutput event).
on_to_local_output_csv: Option<u16>,
},
/// An alternative funding transaction (due to a splice/RBF) has confirmed but can no longer be
/// locked now as the monitor is no longer allowing updates. Note that we wait to promote the
/// corresponding `FundingScope` until we see a
/// [`ChannelMonitorUpdateStep::RenegotiatedFundingLocked`], but this event is only applicable
/// once [`ChannelMonitor::no_further_updates_allowed`] returns true. We promote the
/// `FundingScope` once the funding transaction is irrevocably confirmed.
AlternativeFundingConfirmation {},
}
impl Writeable for OnchainEventEntry {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
write_tlv_fields!(writer, {
(0, self.txid, required),
(1, self.transaction, option),
(2, self.height, required),
(3, self.block_hash, option),
(4, self.event, required),
});
Ok(())
}
}
impl MaybeReadable for OnchainEventEntry {
#[rustfmt::skip]
fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
let mut txid = Txid::all_zeros();
let mut transaction = None;
let mut block_hash = None;
let mut height = 0;
let mut event = UpgradableRequired(None);
read_tlv_fields!(reader, {
(0, txid, required),
(1, transaction, option),
(2, height, required),
(3, block_hash, option),
(4, event, upgradable_required),
});
Ok(Some(Self { txid, transaction, height, block_hash, event: _init_tlv_based_struct_field!(event, upgradable_required) }))
}
}
impl_writeable_tlv_based_enum_upgradable!(OnchainEvent,
(0, HTLCUpdate) => {
(0, source, required),
(1, htlc_value_satoshis, option),
(2, payment_hash, required),
(3, commitment_tx_output_idx, option),
},
(1, MaturingOutput) => {
(0, descriptor, required),
},
(2, AlternativeFundingConfirmation) => {},
(3, FundingSpendConfirmation) => {
(0, on_local_output_csv, option),
(1, commitment_tx_to_counterparty_output, option),
},
(5, HTLCSpendConfirmation) => {
(0, commitment_tx_output_idx, required),
(2, preimage, option),
(4, on_to_local_output_csv, option),
},
);
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) enum ChannelMonitorUpdateStep {
LatestHolderCommitmentTXInfo {
commitment_tx: HolderCommitmentTransaction,
/// Note that LDK after 0.0.115 supports this only containing dust HTLCs (implying the
/// `Signature` field is never filled in). At that point, non-dust HTLCs are implied by the
/// HTLC fields in `commitment_tx` and the sources passed via `nondust_htlc_sources`.
/// Starting with 0.2, the non-dust HTLC sources will always be provided separately, and
/// `htlc_outputs` will only include dust HTLCs. We still have to track the
/// `Option<Signature>` for backwards compatibility.
htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>,
claimed_htlcs: Vec<(SentHTLCId, PaymentPreimage)>,
nondust_htlc_sources: Vec<HTLCSource>,
},
LatestHolderCommitment {
commitment_txs: Vec<HolderCommitmentTransaction>,
htlc_data: CommitmentHTLCData,
claimed_htlcs: Vec<(SentHTLCId, PaymentPreimage)>,
},
LatestCounterpartyCommitmentTXInfo {
commitment_txid: Txid,
htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>,
commitment_number: u64,
their_per_commitment_point: PublicKey,
feerate_per_kw: Option<u32>,
to_broadcaster_value_sat: Option<u64>,
to_countersignatory_value_sat: Option<u64>,
},
LatestCounterpartyCommitment {
commitment_txs: Vec<CommitmentTransaction>,
htlc_data: CommitmentHTLCData,
},
PaymentPreimage {
payment_preimage: PaymentPreimage,
/// If this preimage was from an inbound payment claim, information about the claim should
/// be included here to enable claim replay on startup.
payment_info: Option<PaymentClaimDetails>,
},
CommitmentSecret {
idx: u64,
secret: [u8; 32],
},
/// Used to indicate that the no future updates will occur, and likely that the latest holder
/// commitment transaction(s) should be broadcast, as the channel has been force-closed.
ChannelForceClosed {
/// If set to false, we shouldn't broadcast the latest holder commitment transaction as we
/// think we've fallen behind!
should_broadcast: bool,
},
ShutdownScript {
scriptpubkey: ScriptBuf,
},
RenegotiatedFunding {
channel_parameters: ChannelTransactionParameters,
holder_commitment_tx: HolderCommitmentTransaction,
counterparty_commitment_tx: CommitmentTransaction,
},
RenegotiatedFundingLocked {
funding_txid: Txid,
},
/// When a payment is finally resolved by the user handling an [`Event::PaymentSent`] or
/// [`Event::PaymentFailed`] event, the `ChannelManager` no longer needs to hear about it on
/// startup (which would cause it to re-hydrate the payment information even though the user
/// already learned about the payment's result).
///
/// This will remove the HTLC from [`ChannelMonitor::get_all_current_outbound_htlcs`] and
/// [`ChannelMonitor::get_onchain_failed_outbound_htlcs`].
///
/// Note that this is only generated for closed channels as this is implicit in the
/// [`Self::CommitmentSecret`] update which clears the payment information from all un-revoked
/// counterparty commitment transactions.
ReleasePaymentComplete {
htlc: SentHTLCId,
},
}
impl ChannelMonitorUpdateStep {
#[rustfmt::skip]
fn variant_name(&self) -> &'static str {
match self {
ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { .. } => "LatestHolderCommitmentTXInfo",
ChannelMonitorUpdateStep::LatestHolderCommitment { .. } => "LatestHolderCommitment",
ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { .. } => "LatestCounterpartyCommitmentTXInfo",
ChannelMonitorUpdateStep::LatestCounterpartyCommitment { .. } => "LatestCounterpartyCommitment",
ChannelMonitorUpdateStep::PaymentPreimage { .. } => "PaymentPreimage",
ChannelMonitorUpdateStep::CommitmentSecret { .. } => "CommitmentSecret",
ChannelMonitorUpdateStep::ChannelForceClosed { .. } => "ChannelForceClosed",
ChannelMonitorUpdateStep::ShutdownScript { .. } => "ShutdownScript",
ChannelMonitorUpdateStep::RenegotiatedFunding { .. } => "RenegotiatedFunding",
ChannelMonitorUpdateStep::RenegotiatedFundingLocked { .. } => "RenegotiatedFundingLocked",
ChannelMonitorUpdateStep::ReleasePaymentComplete { .. } => "ReleasePaymentComplete",
}
}
}
impl_writeable_tlv_based_enum_upgradable!(ChannelMonitorUpdateStep,
(0, LatestHolderCommitmentTXInfo) => {
(0, commitment_tx, required),
(1, claimed_htlcs, optional_vec),
(2, htlc_outputs, required_vec),
(4, nondust_htlc_sources, optional_vec),
},
(1, LatestCounterpartyCommitmentTXInfo) => {
(0, commitment_txid, required),
(1, feerate_per_kw, option),
(2, commitment_number, required),
(3, to_broadcaster_value_sat, option),
(4, their_per_commitment_point, required),
(5, to_countersignatory_value_sat, option),
(6, htlc_outputs, required_vec),
},
(2, PaymentPreimage) => {
(0, payment_preimage, required),
(1, payment_info, option),
},
(3, CommitmentSecret) => {
(0, idx, required),
(2, secret, required),
},
(4, ChannelForceClosed) => {
(0, should_broadcast, required),
},
(5, ShutdownScript) => {
(0, scriptpubkey, required),
},
(6, LatestCounterpartyCommitment) => {
(1, commitment_txs, required_vec),
(3, htlc_data, required),
},
(7, ReleasePaymentComplete) => {
(1, htlc, required),
},
(8, LatestHolderCommitment) => {
(1, commitment_txs, required_vec),
(3, htlc_data, required),
(5, claimed_htlcs, required_vec),
},
(10, RenegotiatedFunding) => {
(1, channel_parameters, (required: ReadableArgs, None)),
(3, holder_commitment_tx, required),
(5, counterparty_commitment_tx, required),
},
(12, RenegotiatedFundingLocked) => {
(1, funding_txid, required),
},
);
/// Indicates whether the balance is derived from a cooperative close, a force-close
/// (for holder or counterparty), or whether it is for an HTLC.
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(test, derive(PartialOrd, Ord))]
pub enum BalanceSource {
/// The channel was force closed by the holder.
HolderForceClosed,
/// The channel was force closed by the counterparty.
CounterpartyForceClosed,
/// The channel was cooperatively closed.
CoopClose,
/// This balance is the result of an HTLC.
Htlc,
}
/// The claimable balance of a holder commitment transaction that has yet to be broadcast.
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(test, derive(PartialOrd, Ord))]
pub struct HolderCommitmentTransactionBalance {
/// The amount available to claim, in satoshis, excluding the on-chain fees which will be
/// required to do so.
pub amount_satoshis: u64,
/// The transaction fee we pay for the closing commitment transaction. This amount is not
/// included in the [`HolderCommitmentTransactionBalance::amount_satoshis`] value.
/// This amount includes the sum of dust HTLCs on the commitment transaction, any elided anchors,
/// as well as the sum of msat amounts rounded down from non-dust HTLCs.
///
/// Note that if this channel is inbound (and thus our counterparty pays the commitment
/// transaction fee) this value will be zero. For [`ChannelMonitor`]s created prior to LDK
/// 0.0.124, the channel is always treated as outbound (and thus this value is never zero).
pub transaction_fee_satoshis: u64,
}
/// Details about the balance(s) available for spending once the channel appears on chain.
///
/// See [`ChannelMonitor::get_claimable_balances`] for more details on when these will or will not
/// be provided.
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(test, derive(PartialOrd, Ord))]
pub enum Balance {
/// The channel is not yet closed (or the commitment or closing transaction has not yet
/// appeared in a block).
ClaimableOnChannelClose {
/// A list of balance candidates based on the latest set of valid holder commitment
/// transactions that can hit the chain. Typically, a channel only has one valid holder
/// commitment transaction that spends the current funding output. As soon as a channel is
/// spliced, an alternative holder commitment transaction exists spending the new funding
/// output. More alternative holder commitment transactions can exist as the splice remains
/// pending and RBF attempts are made.
///
/// The candidates are sorted by the order in which the holder commitment transactions were
/// negotiated. When only one candidate exists, the channel does not have a splice pending.
/// When multiple candidates exist, the last one reflects the balance of the
/// latest splice/RBF attempt, while the first reflects the balance prior to the splice
/// occurring.
///
/// Entries remain in this vec until the pending splice has reached [`ANTI_REORG_DELAY`]
/// confirmations, at which point any conflicts will be removed. Once a splice confirms
/// [`Self::ClaimableOnChannelClose::confirmed_balance_candidate_index`] will point to the
/// confirmed entry, even if it has fewer than [`ANTI_REORG_DELAY`] confirmations.
balance_candidates: Vec<HolderCommitmentTransactionBalance>,
/// The index within [`Balance::ClaimableOnChannelClose::balance_candidates`] for the
/// balance according to the current onchain state of the channel. This can be helpful when
/// wanting to determine the claimable amount when the holder commitment transaction for the
/// current funding transaction is broadcast and/or confirms.
confirmed_balance_candidate_index: usize,
/// The amount of millisatoshis which has been burned to fees from HTLCs which are outbound
/// from us and are related to a payment which was sent by us. This is the sum of the
/// millisatoshis part of all HTLCs which are otherwise represented by
/// [`Balance::MaybeTimeoutClaimableHTLC`] with their
/// [`Balance::MaybeTimeoutClaimableHTLC::outbound_payment`] flag set, as well as any dust
/// HTLCs which would otherwise be represented the same.
///
/// This amount (rounded up to a whole satoshi value) will not be included in `amount_satoshis`.
outbound_payment_htlc_rounded_msat: u64,
/// The amount of millisatoshis which has been burned to fees from HTLCs which are outbound
/// from us and are related to a forwarded HTLC. This is the sum of the millisatoshis part
/// of all HTLCs which are otherwise represented by [`Balance::MaybeTimeoutClaimableHTLC`]
/// with their [`Balance::MaybeTimeoutClaimableHTLC::outbound_payment`] flag *not* set, as
/// well as any dust HTLCs which would otherwise be represented the same.
///
/// This amount (rounded up to a whole satoshi value) will not be included in `amount_satoshis`.
outbound_forwarded_htlc_rounded_msat: u64,
/// The amount of millisatoshis which has been burned to fees from HTLCs which are inbound
/// to us and for which we know the preimage. This is the sum of the millisatoshis part of
/// all HTLCs which would be represented by [`Balance::ContentiousClaimable`] on channel
/// close, but whose current value is included in
/// [`HolderCommitmentTransactionBalance::amount_satoshis`], as well as any dust HTLCs which
/// would otherwise be represented the same.
///
/// This amount (rounded up to a whole satoshi value) will not be included in the counterparty's
/// `amount_satoshis`.
inbound_claiming_htlc_rounded_msat: u64,
/// The amount of millisatoshis which has been burned to fees from HTLCs which are inbound
/// to us and for which we do not know the preimage. This is the sum of the millisatoshis
/// part of all HTLCs which would be represented by [`Balance::MaybePreimageClaimableHTLC`]
/// on channel close, as well as any dust HTLCs which would otherwise be represented the
/// same.
///
/// This amount (rounded up to a whole satoshi value) will not be included in the counterparty's
/// `amount_satoshis`.
inbound_htlc_rounded_msat: u64,
},
/// The channel has been closed, and the given balance is ours but awaiting confirmations until
/// we consider it spendable.
ClaimableAwaitingConfirmations {
/// The amount available to claim, in satoshis, possibly excluding the on-chain fees which
/// were spent in broadcasting the transaction.
amount_satoshis: u64,
/// The height at which an [`Event::SpendableOutputs`] event will be generated for this
/// amount.
confirmation_height: u32,
/// Whether this balance is a result of cooperative close, a force-close, or an HTLC.
source: BalanceSource,
},
/// The channel has been closed, and the given balance should be ours but awaiting spending
/// transaction confirmation. If the spending transaction does not confirm in time, it is
/// possible our counterparty can take the funds by broadcasting an HTLC timeout on-chain.
///
/// Once the spending transaction confirms, before it has reached enough confirmations to be
/// considered safe from chain reorganizations, the balance will instead be provided via
/// [`Balance::ClaimableAwaitingConfirmations`].
ContentiousClaimable {
/// The amount available to claim, in satoshis, excluding the on-chain fees which will be
/// required to do so.
amount_satoshis: u64,
/// The height at which the counterparty may be able to claim the balance if we have not
/// done so.
timeout_height: u32,
/// The payment hash that locks this HTLC.
payment_hash: PaymentHash,
/// The preimage that can be used to claim this HTLC.
payment_preimage: PaymentPreimage,
},
/// HTLCs which we sent to our counterparty which are claimable after a timeout (less on-chain
/// fees) if the counterparty does not know the preimage for the HTLCs. These are somewhat
/// likely to be claimed by our counterparty before we do.
MaybeTimeoutClaimableHTLC {
/// The amount potentially available to claim, in satoshis, excluding the on-chain fees
/// which will be required to do so.
amount_satoshis: u64,
/// The height at which we will be able to claim the balance if our counterparty has not
/// done so.
claimable_height: u32,
/// The payment hash whose preimage our counterparty needs to claim this HTLC.
payment_hash: PaymentHash,
/// Whether this HTLC represents a payment which was sent outbound from us. Otherwise it
/// represents an HTLC which was forwarded (and should, thus, have a corresponding inbound
/// edge on another channel).
outbound_payment: bool,
},
/// HTLCs which we received from our counterparty which are claimable with a preimage which we
/// do not currently have. This will only be claimable if we receive the preimage from the node
/// to which we forwarded this HTLC before the timeout.
MaybePreimageClaimableHTLC {
/// The amount potentially available to claim, in satoshis, excluding the on-chain fees
/// which will be required to do so.
amount_satoshis: u64,
/// The height at which our counterparty will be able to claim the balance if we have not
/// yet received the preimage and claimed it ourselves.
expiry_height: u32,
/// The payment hash whose preimage we need to claim this HTLC.
payment_hash: PaymentHash,
},
/// The channel has been closed, and our counterparty broadcasted a revoked commitment
/// transaction.
///
/// Thus, we're able to claim all outputs in the commitment transaction, one of which has the
/// following amount.
CounterpartyRevokedOutputClaimable {
/// The amount, in satoshis, of the output which we can claim.
///
/// Note that for outputs from HTLC balances this may be excluding some on-chain fees that
/// were already spent.
amount_satoshis: u64,
},
}
impl Balance {
/// The amount claimable, in satoshis.
///
/// When the channel has yet to close, this returns the balance we expect to claim from the
/// channel. This may change throughout the lifetime of the channel due to payments, but also
/// due to splicing. If there's a pending splice, this will return the balance we expect to have
/// assuming the latest negotiated splice confirms. However, if one of the negotiated splice
/// transactions has already confirmed but is not yet locked, this reports the corresponding
/// balance for said splice transaction instead.
///
/// For outbound payments, this excludes the balance from the possible HTLC timeout.
///
/// For forwarded payments, this includes the balance from the possible HTLC timeout as
/// (to be conservative) that balance does not include routing fees we'd earn if we'd claim
/// the balance from a preimage in a successful forward.
///
/// For more information on these balances see [`Balance::MaybeTimeoutClaimableHTLC`] and
/// [`Balance::MaybePreimageClaimableHTLC`].
///
/// On-chain fees required to claim the balance are not included in this amount.
#[rustfmt::skip]
pub fn claimable_amount_satoshis(&self) -> u64 {
match self {
Balance::ClaimableOnChannelClose {
balance_candidates, confirmed_balance_candidate_index, ..
} => {
if *confirmed_balance_candidate_index != 0 {
balance_candidates[*confirmed_balance_candidate_index].amount_satoshis
} else {
balance_candidates.last().map(|balance| balance.amount_satoshis).unwrap_or(0)
}
},
Balance::ClaimableAwaitingConfirmations { amount_satoshis, .. }|
Balance::ContentiousClaimable { amount_satoshis, .. }|
Balance::CounterpartyRevokedOutputClaimable { amount_satoshis, .. }
=> *amount_satoshis,
Balance::MaybeTimeoutClaimableHTLC { amount_satoshis, outbound_payment, .. }
=> if *outbound_payment { 0 } else { *amount_satoshis },
Balance::MaybePreimageClaimableHTLC { .. } => 0,
}
}
}
/// An HTLC which has been irrevocably resolved on-chain, and has reached ANTI_REORG_DELAY.
#[derive(Clone, PartialEq, Eq)]
struct IrrevocablyResolvedHTLC {
commitment_tx_output_idx: Option<u32>,
/// The txid of the transaction which resolved the HTLC, this may be a commitment (if the HTLC
/// was not present in the confirmed commitment transaction), HTLC-Success, or HTLC-Timeout
/// transaction.
resolving_txid: Option<Txid>, // Added as optional, but always filled in, in 0.0.110
resolving_tx: Option<Transaction>,
/// Only set if the HTLC claim was ours using a payment preimage
payment_preimage: Option<PaymentPreimage>,
}
/// In LDK versions prior to 0.0.111 commitment_tx_output_idx was not Option-al and
/// IrrevocablyResolvedHTLC objects only existed for non-dust HTLCs. This was a bug, but to maintain
/// backwards compatibility we must ensure we always write out a commitment_tx_output_idx field,
/// using [`u32::MAX`] as a sentinal to indicate the HTLC was dust.
impl Writeable for IrrevocablyResolvedHTLC {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
let mapped_commitment_tx_output_idx = self.commitment_tx_output_idx.unwrap_or(u32::MAX);
write_tlv_fields!(writer, {
(0, mapped_commitment_tx_output_idx, required),
(1, self.resolving_txid, option),
(2, self.payment_preimage, option),
(3, self.resolving_tx, option),
});
Ok(())
}
}
impl Readable for IrrevocablyResolvedHTLC {
#[rustfmt::skip]
fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
let mut mapped_commitment_tx_output_idx = 0;
let mut resolving_txid = None;
let mut payment_preimage = None;
let mut resolving_tx = None;
read_tlv_fields!(reader, {
(0, mapped_commitment_tx_output_idx, required),
(1, resolving_txid, option),
(2, payment_preimage, option),
(3, resolving_tx, option),
});
Ok(Self {
commitment_tx_output_idx: if mapped_commitment_tx_output_idx == u32::MAX { None } else { Some(mapped_commitment_tx_output_idx) },
resolving_txid,
payment_preimage,
resolving_tx,
})
}
}
/// A ChannelMonitor handles chain events (blocks connected and disconnected) and generates
/// on-chain transactions to ensure no loss of funds occurs.
///
/// You MUST ensure that no ChannelMonitors for a given channel anywhere contain out-of-date
/// information and are actively monitoring the chain.
///
/// Like the [`ChannelManager`], deserialization is implemented for `(BlockHash, ChannelMonitor)`,
/// providing you with the last block hash which was connected before shutting down. You must begin
/// syncing the chain from that point, disconnecting and connecting blocks as required to get to
/// the best chain on startup. Note that all [`ChannelMonitor`]s passed to a [`ChainMonitor`] must
/// by synced as of the same block, so syncing must happen prior to [`ChainMonitor`]
/// initialization.
///
/// For those loading potentially-ancient [`ChannelMonitor`]s, deserialization is also implemented
/// for `Option<(BlockHash, ChannelMonitor)>`. LDK can no longer deserialize a [`ChannelMonitor`]
/// that was first created in LDK prior to 0.0.110 and last updated prior to LDK 0.0.119. In such
/// cases, the `Option<(..)>` deserialization option may return `Ok(None)` rather than failing to
/// deserialize, allowing you to differentiate between the two cases.
///
/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
pub struct ChannelMonitor<Signer: EcdsaChannelSigner> {
pub(crate) inner: Mutex<ChannelMonitorImpl<Signer>>,
}
impl<Signer: EcdsaChannelSigner> Clone for ChannelMonitor<Signer>
where
Signer: Clone,
{
fn clone(&self) -> Self {
let inner = self.inner.lock().unwrap().clone();
ChannelMonitor::from_impl(inner)
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) struct CommitmentHTLCData {
// These must be sorted in increasing output index order to match the expected order of the
// HTLCs in the `CommitmentTransaction`.
pub nondust_htlc_sources: Vec<HTLCSource>,
pub dust_htlcs: Vec<(HTLCOutputInCommitment, Option<HTLCSource>)>,
}
impl CommitmentHTLCData {
fn new() -> Self {
Self { nondust_htlc_sources: Vec::new(), dust_htlcs: Vec::new() }
}
}
impl_writeable_tlv_based!(CommitmentHTLCData, {
(1, nondust_htlc_sources, required_vec),
(3, dust_htlcs, required_vec),
});
impl TryFrom<HolderSignedTx> for CommitmentHTLCData {
type Error = ();
#[rustfmt::skip]
fn try_from(value: HolderSignedTx) -> Result<Self, Self::Error> {
// HolderSignedTx tracks all HTLCs included in the commitment (dust included). For
// `HolderCommitment`, we'll need to extract the dust HTLCs and their sources, and non-dust
// HTLC sources, separately. All offered, non-dust HTLCs must have a source available.
let mut missing_nondust_source = false;
let mut nondust_htlc_sources = Vec::with_capacity(value.htlc_outputs.len());
let dust_htlcs = value.htlc_outputs.into_iter().filter_map(|(htlc, _, source)| {
// Filter our non-dust HTLCs, while at the same time pushing their sources into
// `nondust_htlc_sources`.
if htlc.transaction_output_index.is_none() {
return Some((htlc, source))
}
if htlc.offered {
if let Some(source) = source {
nondust_htlc_sources.push(source);
} else {
missing_nondust_source = true;
}
}
None
}).collect();
if missing_nondust_source {
return Err(());
}
Ok(Self {
nondust_htlc_sources,
dust_htlcs,
})
}
}
#[derive(Clone, PartialEq)]
struct FundingScope {
channel_parameters: ChannelTransactionParameters,
current_counterparty_commitment_txid: Option<Txid>,
prev_counterparty_commitment_txid: Option<Txid>,
/// The set of outpoints in each counterparty commitment transaction. We always need at least
/// the payment hash from `HTLCOutputInCommitment` to claim even a revoked commitment
/// transaction broadcast as we need to be able to construct the witness script in all cases.
//
// TODO(splicing): We shouldn't have to track these duplicatively per `FundingScope`. Ideally,
// we have a global map to track the HTLCs, along with their source, as they should be
// consistent across all commitments. Unfortunately, doing so requires that our HTLCs are not
// tied to their respective commitment transaction via `transaction_output_index`, as those may
// not be consistent across all commitments.
counterparty_claimable_outpoints:
HashMap<Txid, Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>>,
// We store two holder commitment transactions to avoid any race conditions where we may update
// some monitors (potentially on watchtowers) but then fail to update others, resulting in the
// various monitors for one channel being out of sync, and us broadcasting a holder
// transaction for which we have deleted claim information on some watchtowers.
current_holder_commitment_tx: HolderCommitmentTransaction,
prev_holder_commitment_tx: Option<HolderCommitmentTransaction>,
}
impl FundingScope {
fn funding_outpoint(&self) -> OutPoint {
let funding_outpoint = self.channel_parameters.funding_outpoint.as_ref();
*funding_outpoint.expect("Funding outpoint must be set for active monitor")
}
fn funding_txid(&self) -> Txid {
self.funding_outpoint().txid
}
fn is_splice(&self) -> bool {
self.channel_parameters.splice_parent_funding_txid.is_some()
}
fn channel_type_features(&self) -> &ChannelTypeFeatures {
&self.channel_parameters.channel_type_features
}
}
impl_writeable_tlv_based!(FundingScope, {
(1, channel_parameters, (required: ReadableArgs, None)),
(3, current_counterparty_commitment_txid, required),
(5, prev_counterparty_commitment_txid, option),
(7, current_holder_commitment_tx, required),
(9, prev_holder_commitment_tx, option),
(11, counterparty_claimable_outpoints, required),
});
#[derive(Clone, PartialEq)]
pub(crate) struct ChannelMonitorImpl<Signer: EcdsaChannelSigner> {
funding: FundingScope,
pending_funding: Vec<FundingScope>,
/// True if this channel was configured for manual funding broadcasts. Monitors written by
/// versions prior to LDK 0.2 load with `false` until a new update persists it.
is_manual_broadcast: bool,
/// True once we've observed either funding transaction on-chain. Older monitors prior to LDK 0.2
/// assume this is `true` when absent during upgrade so holder broadcasts aren't gated unexpectedly.
/// In manual-broadcast channels we also use this to trigger deferred holder
/// broadcasts once the funding transaction finally appears on-chain.
///
/// Note: This tracks whether the funding transaction was ever broadcast, not whether it is
/// currently confirmed. It is never reset, even if the funding transaction is unconfirmed due
/// to a reorg.
funding_seen_onchain: bool,
latest_update_id: u64,
commitment_transaction_number_obscure_factor: u64,
destination_script: ScriptBuf,
broadcasted_holder_revokable_script: Option<(ScriptBuf, PublicKey, RevocationKey)>,
counterparty_payment_script: ScriptBuf,
shutdown_script: Option<ScriptBuf>,
channel_keys_id: [u8; 32],
holder_revocation_basepoint: RevocationBasepoint,
channel_id: ChannelId,
first_negotiated_funding_txo: OutPoint,
counterparty_commitment_params: CounterpartyCommitmentParameters,
// first is the idx of the first of the two per-commitment points
their_cur_per_commitment_points: Option<(u64, PublicKey, Option<PublicKey>)>,
on_holder_tx_csv: u16,
commitment_secrets: CounterpartyCommitmentSecrets,
/// We cannot identify HTLC-Success or HTLC-Timeout transactions by themselves on the chain.
/// Nor can we figure out their commitment numbers without the commitment transaction they are
/// spending. Thus, in order to claim them via revocation key, we track all the counterparty
/// commitment transactions which we find on-chain, mapping them to the commitment number which
/// can be used to derive the revocation key and claim the transactions.
counterparty_commitment_txn_on_chain: HashMap<Txid, u64>,
/// Cache used to make pruning of payment_preimages faster.
/// Maps payment_hash values to commitment numbers for counterparty transactions for non-revoked
/// counterparty transactions (ie should remain pretty small).
/// Serialized to disk but should generally not be sent to Watchtowers.
counterparty_hash_commitment_number: HashMap<PaymentHash, u64>,
counterparty_fulfilled_htlcs: HashMap<SentHTLCId, PaymentPreimage>,
// Used just for ChannelManager to make sure it has the latest channel data during
// deserialization
current_counterparty_commitment_number: u64,
// Used just for ChannelManager to make sure it has the latest channel data during
// deserialization
current_holder_commitment_number: u64,
/// The set of payment hashes from inbound payments for which we know the preimage. Payment
/// preimages that are not included in any unrevoked local commitment transaction or unrevoked
/// remote commitment transactions are automatically removed when commitment transactions are
/// revoked. Note that this happens one revocation after it theoretically could, leaving
/// preimages present here for the previous state even when the channel is "at rest". This is a
/// good safety buffer, but also is important as it ensures we retain payment preimages for the
/// previous local commitment transaction, which may have been broadcast already when we see
/// the revocation (in setups with redundant monitors).
///
/// We also store [`PaymentClaimDetails`] here, tracking the payment information(s) for this
/// preimage for inbound payments. This allows us to rebuild the inbound payment information on
/// startup even if we lost our `ChannelManager`.
payment_preimages: HashMap<PaymentHash, (PaymentPreimage, Vec<PaymentClaimDetails>)>,
// Note that `MonitorEvent`s MUST NOT be generated during update processing, only generated
// during chain data processing. This prevents a race in `ChainMonitor::update_channel` (and
// presumably user implementations thereof as well) where we update the in-memory channel
// object, then before the persistence finishes (as it's all under a read-lock), we return
// pending events to the user or to the relevant `ChannelManager`. Then, on reload, we'll have
// the pre-event state here, but have processed the event in the `ChannelManager`.
// Note that because the `event_lock` in `ChainMonitor` is only taken in
// block/transaction-connected events and *not* during block/transaction-disconnected events,
// we further MUST NOT generate events during block/transaction-disconnection.
pending_monitor_events: Vec<MonitorEvent>,
pub(super) pending_events: Vec<Event>,
pub(super) is_processing_pending_events: bool,
// Used to track on-chain events (i.e., transactions part of channels confirmed on chain) on
// which to take actions once they reach enough confirmations. Each entry includes the
// transaction's id and the height when the transaction was confirmed on chain.
onchain_events_awaiting_threshold_conf: Vec<OnchainEventEntry>,
// If we get serialized out and re-read, we need to make sure that the chain monitoring
// interface knows about the TXOs that we want to be notified of spends of. We could probably
// be smart and derive them from the above storage fields, but its much simpler and more
// Obviously Correct (tm) if we just keep track of them explicitly.
outputs_to_watch: HashMap<Txid, Vec<(u32, ScriptBuf)>>,
#[cfg(any(test, feature = "_test_utils"))]
pub onchain_tx_handler: OnchainTxHandler<Signer>,
#[cfg(not(any(test, feature = "_test_utils")))]
onchain_tx_handler: OnchainTxHandler<Signer>,
// This is set when the Channel[Manager] generated a ChannelMonitorUpdate which indicated the
// channel has been force-closed. After this is set, no further holder commitment transaction
// updates may occur, and we panic!() if one is provided.
lockdown_from_offchain: bool,
// Set once we've signed a holder commitment transaction and handed it over to our
// OnchainTxHandler. After this is set, no future updates to our holder commitment transactions
// may occur, and we fail any such monitor updates.
//
// In case of update rejection due to a locally already signed commitment transaction, we
// nevertheless store update content to track in case of concurrent broadcast by another
// remote monitor out-of-order with regards to the block view.
holder_tx_signed: bool,
// If a spend of the funding output is seen, we set this to true and reject any further
// updates. This prevents any further changes in the offchain state no matter the order
// of block connection between ChannelMonitors and the ChannelManager.
funding_spend_seen: bool,
/// True if the commitment transaction fee is paid by us.
/// Added in 0.0.124.
holder_pays_commitment_tx_fee: Option<bool>,
/// Set to `Some` of the confirmed transaction spending the funding input of the channel after
/// reaching `ANTI_REORG_DELAY` confirmations.
funding_spend_confirmed: Option<Txid>,
confirmed_commitment_tx_counterparty_output: CommitmentTxCounterpartyOutputInfo,
/// The set of HTLCs which have been either claimed or failed on chain and have reached
/// the requisite confirmations on the claim/fail transaction (either ANTI_REORG_DELAY or the
/// spending CSV for revocable outputs).
htlcs_resolved_on_chain: Vec<IrrevocablyResolvedHTLC>,
/// When a payment is resolved through an on-chain transaction, we tell the `ChannelManager`
/// about this via [`ChannelMonitor::get_onchain_failed_outbound_htlcs`] and
/// [`ChannelMonitor::get_all_current_outbound_htlcs`] at startup. We'll keep repeating the
/// same payments until they're eventually fully resolved by the user processing a
/// `PaymentSent` or `PaymentFailed` event, at which point the `ChannelManager` will inform of
/// this and we'll store the set of fully resolved payments here.
htlcs_resolved_to_user: HashSet<SentHTLCId>,
/// The set of `SpendableOutput` events which we have already passed upstream to be claimed.
/// These are tracked explicitly to ensure that we don't generate the same events redundantly
/// if users duplicatively confirm old transactions. Specifically for transactions claiming a
/// revoked remote outpoint we otherwise have no tracking at all once they've reached
/// [`ANTI_REORG_DELAY`], so we have to track them here.
spendable_txids_confirmed: Vec<Txid>,
// We simply modify best_block in Channel's block_connected so that serialization is
// consistent but hopefully the users' copy handles block_connected in a consistent way.
// (we do *not*, however, update them in update_monitor to ensure any local user copies keep
// their best_block from its state and not based on updated copies that didn't run through
// the full block_connected).
best_block: BestBlock,
/// The node_id of our counterparty
counterparty_node_id: PublicKey,
/// Initial counterparty commmitment data needed to recreate the commitment tx
/// in the persistence pipeline for third-party watchtowers. This will only be present on
/// monitors created after 0.0.117.
///
/// Ordering of tuple data: (their_per_commitment_point, feerate_per_kw, to_broadcaster_sats,
/// to_countersignatory_sats)
initial_counterparty_commitment_info: Option<(PublicKey, u32, u64, u64)>,
/// Initial counterparty commitment transaction
///
/// We previously used the field above to re-build the counterparty commitment transaction,
/// we now provide the transaction outright.
initial_counterparty_commitment_tx: Option<CommitmentTransaction>,
/// The first block height at which we had no remaining claimable balances.
balances_empty_height: Option<u32>,
/// In-memory only HTLC ids used to track upstream HTLCs that have been failed backwards due to
/// a downstream channel force-close remaining unconfirmed by the time the upstream timeout
/// expires. This is used to tell us we already generated an event to fail this HTLC back
/// during a previous block scan.
failed_back_htlc_ids: HashSet<SentHTLCId>,
// The auxiliary HTLC data associated with a holder commitment transaction. This includes
// non-dust HTLC sources, along with dust HTLCs and their sources. Note that this assumes any
// alternative holder commitment transactions, like in the case of splicing, must maintain the
// same set of non-dust and dust HTLCs. Also, while non-dust HTLC indices might change across
// commitment transactions, their ordering with respect to each other must remain the same.
current_holder_htlc_data: CommitmentHTLCData,
prev_holder_htlc_data: Option<CommitmentHTLCData>,
// Upon confirmation, tracks the txid and confirmation height of a renegotiated funding
// transaction found in `Self::pending_funding`. Used to determine which commitment we should
// broadcast when necessary.
//
// "Alternative" in this context means a `FundingScope` other than the currently locked one
// found at `Self::funding`. We don't use the term "renegotiated", as the currently locked
// `FundingScope` could be one that was renegotiated.
alternative_funding_confirmed: Option<(Txid, u32)>,
/// [`ChannelMonitor`]s written by LDK prior to 0.1 need to be re-persisted after startup. To
/// make deciding whether to do so simple, here we track whether this monitor was last written
/// prior to 0.1.
written_by_0_1_or_later: bool,
}
// Returns a `&FundingScope` for the one we are currently observing/handling commitment transactions
// for on the chain.
macro_rules! get_confirmed_funding_scope {
($self: expr) => {
$self
.alternative_funding_confirmed
.map(|(alternative_funding_txid, _)| {
$self
.pending_funding
.iter()
.find(|funding| funding.funding_txid() == alternative_funding_txid)
.expect("FundingScope for confirmed alternative funding must exist")
})
.unwrap_or(&$self.funding)
};
}
// Macro helper to access holder commitment HTLC data (including both non-dust and dust) while
// holding mutable references to `self`. Unfortunately, if these were turned into helper functions,
// we'd be unable to mutate `self` while holding an immutable iterator (specifically, returned from
// a function) over `self`.
#[rustfmt::skip]
macro_rules! holder_commitment_htlcs {
($self: expr, CURRENT) => {{
let funding = get_confirmed_funding_scope!($self);
funding.current_holder_commitment_tx.nondust_htlcs().iter()
.chain($self.current_holder_htlc_data.dust_htlcs.iter().map(|(htlc, _)| htlc))
}};
($self: expr, CURRENT_WITH_SOURCES) => {{
let funding = get_confirmed_funding_scope!($self);
holder_commitment_htlcs!(
&funding.current_holder_commitment_tx, &$self.current_holder_htlc_data
)
}};
($self: expr, PREV) => {{
let funding = get_confirmed_funding_scope!($self);
funding.prev_holder_commitment_tx.as_ref().map(|tx| {
let dust_htlcs = $self.prev_holder_htlc_data.as_ref().unwrap().dust_htlcs.iter()
.map(|(htlc, _)| htlc);
tx.nondust_htlcs().iter().chain(dust_htlcs)
})
}};
($self: expr, PREV_WITH_SOURCES) => {{
let funding = get_confirmed_funding_scope!($self);
funding.prev_holder_commitment_tx.as_ref().map(|tx| {
holder_commitment_htlcs!(tx, $self.prev_holder_htlc_data.as_ref().unwrap())
})
}};
($commitment_tx: expr, $htlc_data: expr) => {{
let mut sources = $htlc_data.nondust_htlc_sources.iter();
let nondust_htlcs = $commitment_tx.nondust_htlcs().iter().map(move |htlc| {
let mut source = None;
if htlc.offered {
debug_assert!(htlc.transaction_output_index.is_some());
source = sources.next();
if source.is_none() {
panic!("Every offered non-dust HTLC should have a corresponding source");
}
}
(htlc, source)
});
let dust_htlcs = $htlc_data.dust_htlcs.iter().map(|(htlc, source)| (htlc, source.as_ref()));
nondust_htlcs.chain(dust_htlcs)
}};
}
/// Transaction outputs to watch for on-chain spends.
pub type TransactionOutputs = (Txid, Vec<(u32, TxOut)>);
// Because we have weird workarounds for `ChannelMonitor` equality checks in `OnchainTxHandler` and
// `PackageTemplate` the equality implementation isn't really fit for public consumption. Instead,
// we only expose it during tests.
#[cfg(any(feature = "_test_utils", test))]
impl<Signer: EcdsaChannelSigner> PartialEq for ChannelMonitor<Signer>
where
Signer: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
use crate::sync::LockTestExt;
// We need some kind of total lockorder. Absent a better idea, we sort by position in
// memory and take locks in that order (assuming that we can't move within memory while a
// lock is held).
let ord = ((self as *const _) as usize) < ((other as *const _) as usize);
let a = if ord {
self.inner.unsafe_well_ordered_double_lock_self()
} else {
other.inner.unsafe_well_ordered_double_lock_self()
};
let b = if ord {
other.inner.unsafe_well_ordered_double_lock_self()
} else {
self.inner.unsafe_well_ordered_double_lock_self()
};
a.eq(&b)
}
}
impl<Signer: EcdsaChannelSigner> Writeable for ChannelMonitor<Signer> {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
self.inner.lock().unwrap().write(writer)
}
}
// These are also used for ChannelMonitorUpdate, above.
const SERIALIZATION_VERSION: u8 = 1;
const MIN_SERIALIZATION_VERSION: u8 = 1;
/// Utility function for writing [`ChannelMonitor`] to prevent code duplication in [`ChainMonitor`] while sending Peer Storage.
///
/// NOTE: `is_stub` is true only when we are using this to serialise for Peer Storage.
///
/// TODO: Determine which fields of each `ChannelMonitor` should be included in Peer Storage, and which should be omitted.
///
/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
pub(crate) fn write_chanmon_internal<Signer: EcdsaChannelSigner, W: Writer>(
channel_monitor: &ChannelMonitorImpl<Signer>, _is_stub: bool, writer: &mut W,
) -> Result<(), Error> {
write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
channel_monitor.latest_update_id.write(writer)?;
// Set in initial Channel-object creation, so should always be set by now:
U48(channel_monitor.commitment_transaction_number_obscure_factor).write(writer)?;
channel_monitor.destination_script.write(writer)?;
if let Some(ref broadcasted_holder_revokable_script) =
channel_monitor.broadcasted_holder_revokable_script
{
writer.write_all(&[0; 1])?;
broadcasted_holder_revokable_script.0.write(writer)?;
broadcasted_holder_revokable_script.1.write(writer)?;
broadcasted_holder_revokable_script.2.write(writer)?;
} else {
writer.write_all(&[1; 1])?;
}
channel_monitor.counterparty_payment_script.write(writer)?;
match &channel_monitor.shutdown_script {
Some(script) => script.write(writer)?,
None => ScriptBuf::new().write(writer)?,
}
channel_monitor.channel_keys_id.write(writer)?;
channel_monitor.holder_revocation_basepoint.write(writer)?;
let funding_outpoint = channel_monitor.get_funding_txo();
writer.write_all(&funding_outpoint.txid[..])?;
writer.write_all(&funding_outpoint.index.to_be_bytes())?;
let redeem_script = channel_monitor.funding.channel_parameters.make_funding_redeemscript();
let script_pubkey = redeem_script.to_p2wsh();
script_pubkey.write(writer)?;
channel_monitor.funding.current_counterparty_commitment_txid.write(writer)?;
channel_monitor.funding.prev_counterparty_commitment_txid.write(writer)?;
channel_monitor.counterparty_commitment_params.write(writer)?;
redeem_script.write(writer)?;
channel_monitor.funding.channel_parameters.channel_value_satoshis.write(writer)?;
match channel_monitor.their_cur_per_commitment_points {
Some((idx, pubkey, second_option)) => {
writer.write_all(&byte_utils::be48_to_array(idx))?;
writer.write_all(&pubkey.serialize())?;
match second_option {
Some(second_pubkey) => {
writer.write_all(&second_pubkey.serialize())?;
},
None => {
writer.write_all(&[0; 33])?;
},
}
},
None => {
writer.write_all(&byte_utils::be48_to_array(0))?;
},
}
writer.write_all(&channel_monitor.on_holder_tx_csv.to_be_bytes())?;
channel_monitor.commitment_secrets.write(writer)?;
#[rustfmt::skip]
macro_rules! serialize_htlc_in_commitment {
($htlc_output: expr) => {
writer.write_all(&[$htlc_output.offered as u8; 1])?;
writer.write_all(&$htlc_output.amount_msat.to_be_bytes())?;
writer.write_all(&$htlc_output.cltv_expiry.to_be_bytes())?;
writer.write_all(&$htlc_output.payment_hash.0[..])?;
$htlc_output.transaction_output_index.write(writer)?;
}
}
writer.write_all(
&(channel_monitor.funding.counterparty_claimable_outpoints.len() as u64).to_be_bytes(),
)?;
for (ref txid, ref htlc_infos) in
channel_monitor.funding.counterparty_claimable_outpoints.iter()
{
writer.write_all(&txid[..])?;
writer.write_all(&(htlc_infos.len() as u64).to_be_bytes())?;
for &(ref htlc_output, ref htlc_source) in htlc_infos.iter() {
debug_assert!(
htlc_source.is_none()
|| Some(**txid) == channel_monitor.funding.current_counterparty_commitment_txid
|| Some(**txid) == channel_monitor.funding.prev_counterparty_commitment_txid,
"HTLC Sources for all revoked commitment transactions should be none!"
);
serialize_htlc_in_commitment!(htlc_output);
htlc_source.as_ref().map(|b| b.as_ref()).write(writer)?;
}
}
writer.write_all(
&(channel_monitor.counterparty_commitment_txn_on_chain.len() as u64).to_be_bytes(),
)?;
for (ref txid, commitment_number) in channel_monitor.counterparty_commitment_txn_on_chain.iter()
{
writer.write_all(&txid[..])?;
writer.write_all(&byte_utils::be48_to_array(*commitment_number))?;
}
writer.write_all(
&(channel_monitor.counterparty_hash_commitment_number.len() as u64).to_be_bytes(),
)?;
for (ref payment_hash, commitment_number) in
channel_monitor.counterparty_hash_commitment_number.iter()
{
writer.write_all(&payment_hash.0[..])?;
writer.write_all(&byte_utils::be48_to_array(*commitment_number))?;
}
if let Some(holder_commitment_tx) = &channel_monitor.funding.prev_holder_commitment_tx {
writer.write_all(&[1; 1])?;
write_legacy_holder_commitment_data(
writer,
holder_commitment_tx,
&channel_monitor.prev_holder_htlc_data.as_ref().unwrap(),
)?;
} else {
writer.write_all(&[0; 1])?;
}
write_legacy_holder_commitment_data(
writer,
&channel_monitor.funding.current_holder_commitment_tx,
&channel_monitor.current_holder_htlc_data,
)?;
writer.write_all(&byte_utils::be48_to_array(
channel_monitor.current_counterparty_commitment_number,
))?;
writer
.write_all(&byte_utils::be48_to_array(channel_monitor.current_holder_commitment_number))?;
writer.write_all(&(channel_monitor.payment_preimages.len() as u64).to_be_bytes())?;
for (payment_preimage, _) in channel_monitor.payment_preimages.values() {
writer.write_all(&payment_preimage.0[..])?;
}
writer.write_all(
&(channel_monitor
.pending_monitor_events
.iter()
.filter(|ev| match ev {
MonitorEvent::HTLCEvent(_) => true,
MonitorEvent::HolderForceClosed(_) => true,
MonitorEvent::HolderForceClosedWithInfo { .. } => true,
_ => false,
})
.count() as u64)
.to_be_bytes(),
)?;
for event in channel_monitor.pending_monitor_events.iter() {
match event {
MonitorEvent::HTLCEvent(upd) => {
0u8.write(writer)?;
upd.write(writer)?;
},
MonitorEvent::HolderForceClosed(_) => 1u8.write(writer)?,
// `HolderForceClosedWithInfo` replaced `HolderForceClosed` in v0.0.122. To keep
// backwards compatibility, we write a `HolderForceClosed` event along with the
// `HolderForceClosedWithInfo` event. This is deduplicated in the reader.
MonitorEvent::HolderForceClosedWithInfo { .. } => 1u8.write(writer)?,
_ => {}, // Covered in the TLV writes below
}
}
writer.write_all(&(channel_monitor.pending_events.len() as u64).to_be_bytes())?;
for event in channel_monitor.pending_events.iter() {
event.write(writer)?;
}
channel_monitor.best_block.block_hash.write(writer)?;
writer.write_all(&channel_monitor.best_block.height.to_be_bytes())?;
writer.write_all(
&(channel_monitor.onchain_events_awaiting_threshold_conf.len() as u64).to_be_bytes(),
)?;
for ref entry in channel_monitor.onchain_events_awaiting_threshold_conf.iter() {
entry.write(writer)?;
}
(channel_monitor.outputs_to_watch.len() as u64).write(writer)?;
for (txid, idx_scripts) in channel_monitor.outputs_to_watch.iter() {
txid.write(writer)?;
(idx_scripts.len() as u64).write(writer)?;
for (idx, script) in idx_scripts.iter() {
idx.write(writer)?;
script.write(writer)?;
}
}
channel_monitor.onchain_tx_handler.write(writer)?;
channel_monitor.lockdown_from_offchain.write(writer)?;
channel_monitor.holder_tx_signed.write(writer)?;
// If we have a `HolderForceClosedWithInfo` event, we need to write the `HolderForceClosed` for backwards compatibility.
let pending_monitor_events =
match channel_monitor.pending_monitor_events.iter().find(|ev| match ev {
MonitorEvent::HolderForceClosedWithInfo { .. } => true,
_ => false,
}) {
Some(MonitorEvent::HolderForceClosedWithInfo { outpoint, .. }) => {
let mut pending_monitor_events = channel_monitor.pending_monitor_events.clone();
pending_monitor_events.push(MonitorEvent::HolderForceClosed(*outpoint));
pending_monitor_events
},
_ => channel_monitor.pending_monitor_events.clone(),
};
write_tlv_fields!(writer, {
(1, channel_monitor.funding_spend_confirmed, option),
(3, channel_monitor.htlcs_resolved_on_chain, required_vec),
(5, pending_monitor_events, required_vec),
(7, channel_monitor.funding_spend_seen, required),
(9, channel_monitor.counterparty_node_id, required),
(11, channel_monitor.confirmed_commitment_tx_counterparty_output, option),
(13, channel_monitor.spendable_txids_confirmed, required_vec),
(15, channel_monitor.counterparty_fulfilled_htlcs, required),
(17, channel_monitor.initial_counterparty_commitment_info, option),
(19, channel_monitor.channel_id, required),
(21, channel_monitor.balances_empty_height, option),
(23, channel_monitor.holder_pays_commitment_tx_fee, option),
(25, channel_monitor.payment_preimages, required),
(27, channel_monitor.first_negotiated_funding_txo, required),
(29, channel_monitor.initial_counterparty_commitment_tx, option),
(31, channel_monitor.funding.channel_parameters, required),
(32, channel_monitor.pending_funding, optional_vec),
(33, channel_monitor.htlcs_resolved_to_user, required),
(34, channel_monitor.alternative_funding_confirmed, option),
(35, channel_monitor.is_manual_broadcast, required),
(37, channel_monitor.funding_seen_onchain, required),
});
Ok(())
}
impl<Signer: EcdsaChannelSigner> Writeable for ChannelMonitorImpl<Signer> {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
write_chanmon_internal(self, false, writer)
}
}
#[rustfmt::skip]
macro_rules! _process_events_body {
($self_opt: expr, $logger: expr, $event_to_handle: expr, $handle_event: expr) => {
loop {
let mut handling_res = Ok(());
let (pending_events, repeated_events);
if let Some(us) = $self_opt {
let mut inner = us.inner.lock().unwrap();
if inner.is_processing_pending_events {
break handling_res;
}
inner.is_processing_pending_events = true;
pending_events = inner.pending_events.clone();
repeated_events = inner.get_repeated_events();
} else { break handling_res; }
let mut num_handled_events = 0;
for event in pending_events {
log_trace!($logger, "Handling event {:?}...", event);
$event_to_handle = event;
let event_handling_result = $handle_event;
log_trace!($logger, "Done handling event, result: {:?}", event_handling_result);
match event_handling_result {
Ok(()) => num_handled_events += 1,
Err(e) => {
// If we encounter an error we stop handling events and make sure to replay
// any unhandled events on the next invocation.
handling_res = Err(e);
break;
}
}
}
if handling_res.is_ok() {
for event in repeated_events {
// For repeated events we ignore any errors as they will be replayed eventually
// anyways.
$event_to_handle = event;
let _ = $handle_event;
}
}
if let Some(us) = $self_opt {
let mut inner = us.inner.lock().unwrap();
inner.pending_events.drain(..num_handled_events);
inner.is_processing_pending_events = false;
if handling_res.is_ok() && !inner.pending_events.is_empty() {
// If there's more events to process and we didn't fail so far, go ahead and do
// so.
continue;
}
}
break handling_res;
}
}
}
pub(super) use _process_events_body as process_events_body;
pub(crate) struct WithChannelMonitor;
impl WithChannelMonitor {
pub(crate) fn from<'a, L: Deref, S: EcdsaChannelSigner>(
logger: &'a L, monitor: &ChannelMonitor<S>, payment_hash: Option<PaymentHash>,
) -> WithContext<'a, L>
where
L::Target: Logger,
{
Self::from_impl(logger, &*monitor.inner.lock().unwrap(), payment_hash)
}
pub(crate) fn from_impl<'a, L: Deref, S: EcdsaChannelSigner>(
logger: &'a L, monitor_impl: &ChannelMonitorImpl<S>, payment_hash: Option<PaymentHash>,
) -> WithContext<'a, L>
where
L::Target: Logger,
{
let peer_id = Some(monitor_impl.counterparty_node_id);
let channel_id = Some(monitor_impl.channel_id());
WithContext::from(logger, peer_id, channel_id, payment_hash)
}
}
impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
/// For lockorder enforcement purposes, we need to have a single site which constructs the
/// `inner` mutex, otherwise cases where we lock two monitors at the same time (eg in our
/// PartialEq implementation) we may decide a lockorder violation has occurred.
fn from_impl(imp: ChannelMonitorImpl<Signer>) -> Self {
ChannelMonitor { inner: Mutex::new(imp) }
}
#[rustfmt::skip]
pub(crate) fn new(
secp_ctx: Secp256k1<secp256k1::All>, keys: Signer, shutdown_script: Option<ScriptBuf>,
on_counterparty_tx_csv: u16, destination_script: &Script,
channel_parameters: &ChannelTransactionParameters, holder_pays_commitment_tx_fee: bool,
commitment_transaction_number_obscure_factor: u64,
initial_holder_commitment_tx: HolderCommitmentTransaction, best_block: BestBlock,
counterparty_node_id: PublicKey, channel_id: ChannelId,
is_manual_broadcast: bool,
) -> ChannelMonitor<Signer> {
assert!(commitment_transaction_number_obscure_factor <= (1 << 48));
let holder_pubkeys = &channel_parameters.holder_pubkeys;
let counterparty_payment_script = chan_utils::get_countersigner_payment_script(
&channel_parameters.channel_type_features, &holder_pubkeys.payment_point
);
let counterparty_channel_parameters = channel_parameters.counterparty_parameters.as_ref().unwrap();
let counterparty_delayed_payment_base_key = counterparty_channel_parameters.pubkeys.delayed_payment_basepoint;
let counterparty_htlc_base_key = counterparty_channel_parameters.pubkeys.htlc_basepoint;
let counterparty_commitment_params = CounterpartyCommitmentParameters { counterparty_delayed_payment_base_key, counterparty_htlc_base_key, on_counterparty_tx_csv };
let channel_keys_id = keys.channel_keys_id();
let holder_revocation_basepoint = holder_pubkeys.revocation_basepoint;
let current_holder_commitment_number =
initial_holder_commitment_tx.trust().commitment_number();
let onchain_tx_handler = OnchainTxHandler::new(
channel_parameters.channel_value_satoshis, channel_keys_id, destination_script.into(),
keys, channel_parameters.clone(), initial_holder_commitment_tx.clone(), secp_ctx
);
let funding_outpoint = channel_parameters.funding_outpoint
.expect("Funding outpoint must be known during initialization");
let funding_redeem_script = channel_parameters.make_funding_redeemscript();
let funding_script = funding_redeem_script.to_p2wsh();
let mut outputs_to_watch = new_hash_map();
outputs_to_watch.insert(
funding_outpoint.txid, vec![(funding_outpoint.index as u32, funding_script.clone())],
);
Self::from_impl(ChannelMonitorImpl {
funding: FundingScope {
channel_parameters: channel_parameters.clone(),
current_counterparty_commitment_txid: None,
prev_counterparty_commitment_txid: None,
counterparty_claimable_outpoints: new_hash_map(),
current_holder_commitment_tx: initial_holder_commitment_tx,
prev_holder_commitment_tx: None,
},
pending_funding: vec![],
is_manual_broadcast,
funding_seen_onchain: false,
latest_update_id: 0,
commitment_transaction_number_obscure_factor,
destination_script: destination_script.into(),
broadcasted_holder_revokable_script: None,
counterparty_payment_script,
shutdown_script,
channel_keys_id,
holder_revocation_basepoint,
channel_id,
first_negotiated_funding_txo: funding_outpoint,
counterparty_commitment_params,
their_cur_per_commitment_points: None,
on_holder_tx_csv: counterparty_channel_parameters.selected_contest_delay,
commitment_secrets: CounterpartyCommitmentSecrets::new(),
counterparty_commitment_txn_on_chain: new_hash_map(),
counterparty_hash_commitment_number: new_hash_map(),
counterparty_fulfilled_htlcs: new_hash_map(),
current_counterparty_commitment_number: 1 << 48,
current_holder_commitment_number,
payment_preimages: new_hash_map(),
pending_monitor_events: Vec::new(),
pending_events: Vec::new(),
is_processing_pending_events: false,
onchain_events_awaiting_threshold_conf: Vec::new(),
outputs_to_watch,
onchain_tx_handler,
holder_pays_commitment_tx_fee: Some(holder_pays_commitment_tx_fee),
lockdown_from_offchain: false,
holder_tx_signed: false,
funding_spend_seen: false,
funding_spend_confirmed: None,
confirmed_commitment_tx_counterparty_output: None,
htlcs_resolved_on_chain: Vec::new(),
htlcs_resolved_to_user: new_hash_set(),
spendable_txids_confirmed: Vec::new(),
best_block,
counterparty_node_id: counterparty_node_id,
initial_counterparty_commitment_info: None,
initial_counterparty_commitment_tx: None,
balances_empty_height: None,
failed_back_htlc_ids: new_hash_set(),
// There are never any HTLCs in the initial commitment transaction
current_holder_htlc_data: CommitmentHTLCData::new(),
prev_holder_htlc_data: None,
alternative_funding_confirmed: None,
written_by_0_1_or_later: true,
})
}
/// Returns a unique id for persisting the [`ChannelMonitor`], which is used as a key in a
/// key-value store.
///
/// Note: Previously, the funding outpoint was used in the [`Persist`] trait. However, since the
/// outpoint may change during splicing, this method is used to obtain a unique key instead. For
/// v1 channels, the funding outpoint is still used for backwards compatibility, whereas v2
/// channels use the channel id since it is fixed.
///
/// [`Persist`]: crate::chain::chainmonitor::Persist
pub fn persistence_key(&self) -> MonitorName {
let inner = self.inner.lock().unwrap();
let funding_outpoint = inner.first_negotiated_funding_txo;
let channel_id = inner.channel_id;
if ChannelId::v1_from_funding_outpoint(funding_outpoint) == channel_id {
MonitorName::V1Channel(funding_outpoint)
} else {
MonitorName::V2Channel(channel_id)
}
}
#[cfg(test)]
fn provide_secret(&self, idx: u64, secret: [u8; 32]) -> Result<(), &'static str> {
self.inner.lock().unwrap().provide_secret(idx, secret)
}
/// A variant of `Self::provide_latest_counterparty_commitment_tx` used to provide
/// the counterparty commitment transaction to the monitor so that the transaction
/// can be retrieved during the initial persistence of the monitor (mainly for use in
/// third-party watchtowers).
///
/// This is used to provide the counterparty commitment transaction directly to the monitor
/// before the initial persistence of a new channel.
pub(crate) fn provide_initial_counterparty_commitment_tx(
&self, commitment_tx: CommitmentTransaction,
) {
let mut inner = self.inner.lock().unwrap();
inner.provide_initial_counterparty_commitment_tx(commitment_tx);
}
/// Informs this monitor of the latest counterparty (ie non-broadcastable) commitment transaction.
/// The monitor watches for it to be broadcasted and then uses the HTLC information (and
/// possibly future revocation/preimage information) to claim outputs where possible.
/// We cache also the mapping hash:commitment number to lighten pruning of old preimages by watchtowers.
#[cfg(test)]
fn provide_latest_counterparty_commitment_tx(
&self, txid: Txid, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>,
commitment_number: u64, their_per_commitment_point: PublicKey,
) {
let mut inner = self.inner.lock().unwrap();
inner.provide_latest_counterparty_commitment_tx(
txid,
htlc_outputs,
commitment_number,
their_per_commitment_point,
)
}
#[cfg(test)]
#[rustfmt::skip]
fn provide_latest_holder_commitment_tx(
&self, holder_commitment_tx: HolderCommitmentTransaction,
htlc_outputs: &[(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)],
) {
self.inner.lock().unwrap().provide_latest_holder_commitment_tx(
holder_commitment_tx, htlc_outputs, &Vec::new(), Vec::new(),
).unwrap()
}
/// This is used to provide payment preimage(s) out-of-band during startup without updating the
/// off-chain state with a new commitment transaction.
///
/// It is used only for legacy (created prior to LDK 0.1) pending payments on upgrade, and the
/// flow that uses it assumes that this [`ChannelMonitor`] is persisted prior to the
/// [`ChannelManager`] being persisted (as the state necessary to call this method again is
/// removed from the [`ChannelManager`] and thus a persistence inversion would imply we do not
/// get the preimage back into this [`ChannelMonitor`] on startup).
///
/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
#[rustfmt::skip]
pub(crate) fn provide_payment_preimage_unsafe_legacy<B: Deref, F: Deref, L: Deref>(
&self,
payment_hash: &PaymentHash,
payment_preimage: &PaymentPreimage,
broadcaster: &B,
fee_estimator: &LowerBoundedFeeEstimator<F>,
logger: &L,
) where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
let mut inner = self.inner.lock().unwrap();
let logger = WithChannelMonitor::from_impl(logger, &*inner, Some(*payment_hash));
// Note that we don't pass any MPP claim parts here. This is generally not okay but in this
// case is acceptable as we only call this method from `ChannelManager` deserialization in
// cases where we are replaying a claim started on a previous version of LDK.
inner.provide_payment_preimage(
payment_hash, payment_preimage, &None, broadcaster, fee_estimator, &logger)
}
/// Updates a ChannelMonitor on the basis of some new information provided by the Channel
/// itself.
///
/// panics if the given update is not the next update by update_id.
pub fn update_monitor<B: Deref, F: Deref, L: Deref>(
&self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L,
) -> Result<(), ()>
where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
let mut inner = self.inner.lock().unwrap();
let logger = WithChannelMonitor::from_impl(logger, &*inner, None);
inner.update_monitor(updates, broadcaster, fee_estimator, &logger)
}
/// Gets the update_id from the latest ChannelMonitorUpdate which was applied to this
/// ChannelMonitor.
///
/// Note that for channels closed prior to LDK 0.1, this may return [`u64::MAX`].
pub fn get_latest_update_id(&self) -> u64 {
self.inner.lock().unwrap().get_latest_update_id()
}
/// Gets the funding transaction outpoint of the channel this ChannelMonitor is monitoring for.
pub fn get_funding_txo(&self) -> OutPoint {
self.inner.lock().unwrap().get_funding_txo()
}
pub(crate) fn written_by_0_1_or_later(&self) -> bool {
self.inner.lock().unwrap().written_by_0_1_or_later
}
/// Gets the funding script of the channel this ChannelMonitor is monitoring for.
pub fn get_funding_script(&self) -> ScriptBuf {
self.inner.lock().unwrap().get_funding_script()
}
/// Gets the channel_id of the channel this ChannelMonitor is monitoring for.
pub fn channel_id(&self) -> ChannelId {
self.inner.lock().unwrap().channel_id()
}
/// Gets the channel type of the corresponding channel.
pub fn channel_type_features(&self) -> ChannelTypeFeatures {
self.inner.lock().unwrap().channel_type_features().clone()
}
/// Gets a list of txids, with their output scripts (in the order they appear in the
/// transaction), which we must learn about spends of via block_connected().
#[rustfmt::skip]
pub fn get_outputs_to_watch(&self) -> Vec<(Txid, Vec<(u32, ScriptBuf)>)> {
self.inner.lock().unwrap().get_outputs_to_watch()
.iter().map(|(txid, outputs)| (*txid, outputs.clone())).collect()
}
/// Loads the funding txo and outputs to watch into the given `chain::Filter` by repeatedly
/// calling `chain::Filter::register_output` and `chain::Filter::register_tx` until all outputs
/// have been registered.
#[rustfmt::skip]
pub fn load_outputs_to_watch<F: Deref, L: Deref>(&self, filter: &F, logger: &L)
where
F::Target: chain::Filter, L::Target: Logger,
{
let lock = self.inner.lock().unwrap();
let logger = WithChannelMonitor::from_impl(logger, &*lock, None);
for funding in core::iter::once(&lock.funding).chain(&lock.pending_funding) {
let funding_outpoint = funding.funding_outpoint();
log_trace!(&logger, "Registering funding outpoint {} with the filter to monitor confirmations", &funding_outpoint);
let script_pubkey = funding.channel_parameters.make_funding_redeemscript().to_p2wsh();
filter.register_tx(&funding_outpoint.txid, &script_pubkey);
}
for (txid, outputs) in lock.get_outputs_to_watch().iter() {
for (index, script_pubkey) in outputs.iter() {
assert!(*index <= u16::MAX as u32);
let outpoint = OutPoint { txid: *txid, index: *index as u16 };
log_trace!(logger, "Registering outpoint {} with the filter to monitor spend", outpoint);
filter.register_output(WatchedOutput {
block_hash: None,
outpoint,
script_pubkey: script_pubkey.clone(),
});
}
}
}
/// Get the list of HTLCs who's status has been updated on chain. This should be called by
/// ChannelManager via [`chain::Watch::release_pending_monitor_events`].
pub fn get_and_clear_pending_monitor_events(&self) -> Vec<MonitorEvent> {
self.inner.lock().unwrap().get_and_clear_pending_monitor_events()
}
/// Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity.
///
/// For channels featuring anchor outputs, this method will also process [`BumpTransaction`]
/// events produced from each [`ChannelMonitor`] while there is a balance to claim onchain
/// within each channel. As the confirmation of a commitment transaction may be critical to the
/// safety of funds, we recommend invoking this every 30 seconds, or lower if running in an
/// environment with spotty connections, like on mobile.
///
/// An [`EventHandler`] may safely call back to the provider, though this shouldn't be needed in
/// order to handle these events.
///
/// Will return a [`ReplayEvent`] error if event handling failed and should eventually be retried.
///
/// [`SpendableOutputs`]: crate::events::Event::SpendableOutputs
/// [`BumpTransaction`]: crate::events::Event::BumpTransaction
pub fn process_pending_events<H: Deref, L: Deref>(
&self, handler: &H, logger: &L,
) -> Result<(), ReplayEvent>
where
H::Target: EventHandler,
L::Target: Logger,
{
let mut ev;
process_events_body!(Some(self), logger, ev, handler.handle_event(ev))
}
/// Processes any events asynchronously.
///
/// See [`Self::process_pending_events`] for more information.
pub async fn process_pending_events_async<
Future: core::future::Future<Output = Result<(), ReplayEvent>>,
H: Fn(Event) -> Future,
L: Deref,
>(
&self, handler: &H, logger: &L,
) -> Result<(), ReplayEvent>
where
L::Target: Logger,
{
let mut ev;
process_events_body!(Some(self), logger, ev, { handler(ev).await })
}
#[cfg(test)]
pub fn get_and_clear_pending_events(&self) -> Vec<Event> {
let mut ret = Vec::new();
let mut lck = self.inner.lock().unwrap();
mem::swap(&mut ret, &mut lck.pending_events);
ret.append(&mut lck.get_repeated_events());
ret
}
/// Gets the counterparty's initial commitment transaction. The returned commitment
/// transaction is unsigned. This is intended to be called during the initial persistence of
/// the monitor (inside an implementation of [`Persist::persist_new_channel`]), to allow for
/// watchtowers in the persistence pipeline to have enough data to form justice transactions.
///
/// This is similar to [`Self::counterparty_commitment_txs_from_update`], except
/// that for the initial commitment transaction, we don't have a corresponding update.
///
/// This will only return `Some` for channel monitors that have been created after upgrading
/// to LDK 0.0.117+.
///
/// [`Persist::persist_new_channel`]: crate::chain::chainmonitor::Persist::persist_new_channel
pub fn initial_counterparty_commitment_tx(&self) -> Option<CommitmentTransaction> {
self.inner.lock().unwrap().initial_counterparty_commitment_tx()
}
/// Gets all of the counterparty commitment transactions provided by the given update. This
/// may be empty if the update doesn't include any new counterparty commitments. Returned
/// commitment transactions are unsigned.
///
/// This is provided so that watchtower clients in the persistence pipeline are able to build
/// justice transactions for each counterparty commitment upon each update. It's intended to be
/// used within an implementation of [`Persist::update_persisted_channel`], which is provided
/// with a monitor and an update. Once revoked, signing a justice transaction can be done using
/// [`Self::sign_to_local_justice_tx`].
///
/// It is expected that a watchtower client may use this method to retrieve the latest counterparty
/// commitment transaction(s), and then hold the necessary data until a later update in which
/// the monitor has been updated with the corresponding revocation data, at which point the
/// monitor can sign the justice transaction.
///
/// This will only return a non-empty list for monitor updates that have been created after
/// upgrading to LDK 0.0.117+. Note that no restriction lies on the monitors themselves, which
/// may have been created prior to upgrading.
///
/// [`Persist::update_persisted_channel`]: crate::chain::chainmonitor::Persist::update_persisted_channel
pub fn counterparty_commitment_txs_from_update(
&self, update: &ChannelMonitorUpdate,
) -> Vec<CommitmentTransaction> {
self.inner.lock().unwrap().counterparty_commitment_txs_from_update(update)
}
/// Wrapper around [`EcdsaChannelSigner::sign_justice_revoked_output`] to make
/// signing the justice transaction easier for implementors of
/// [`chain::chainmonitor::Persist`]. On success this method returns the provided transaction
/// signing the input at `input_idx`. This method will only produce a valid signature for
/// a transaction spending the `to_local` output of a commitment transaction, i.e. this cannot
/// be used for revoked HTLC outputs.
///
/// `Value` is the value of the output being spent by the input at `input_idx`, committed
/// in the BIP 143 signature.
///
/// This method will only succeed if this monitor has received the revocation secret for the
/// provided `commitment_number`. If a commitment number is provided that does not correspond
/// to the commitment transaction being revoked, this will return a signed transaction, but
/// the signature will not be valid.
///
/// Note that due to splicing, this can also return an `Err` when the counterparty commitment
/// this transaction is attempting to claim is no longer valid because the corresponding funding
/// transaction was spliced.
///
/// [`EcdsaChannelSigner::sign_justice_revoked_output`]: crate::sign::ecdsa::EcdsaChannelSigner::sign_justice_revoked_output
/// [`Persist`]: crate::chain::chainmonitor::Persist
#[rustfmt::skip]
pub fn sign_to_local_justice_tx(&self, justice_tx: Transaction, input_idx: usize, value: u64, commitment_number: u64) -> Result<Transaction, ()> {
self.inner.lock().unwrap().sign_to_local_justice_tx(justice_tx, input_idx, value, commitment_number)
}
pub(crate) fn get_min_seen_secret(&self) -> u64 {
self.inner.lock().unwrap().get_min_seen_secret()
}
pub(crate) fn get_cur_counterparty_commitment_number(&self) -> u64 {
self.inner.lock().unwrap().get_cur_counterparty_commitment_number()
}
pub(crate) fn get_cur_holder_commitment_number(&self) -> u64 {
self.inner.lock().unwrap().get_cur_holder_commitment_number()
}
/// Fetches whether this monitor has marked the channel as closed and will refuse any further
/// updates to the commitment transactions.
///
/// It can be marked closed in a few different ways, including via a
/// [`ChannelMonitorUpdateStep::ChannelForceClosed`] or if the channel has been closed
/// on-chain.
pub(crate) fn no_further_updates_allowed(&self) -> bool {
self.inner.lock().unwrap().no_further_updates_allowed()
}
/// Gets the `node_id` of the counterparty for this channel.
pub fn get_counterparty_node_id(&self) -> PublicKey {
self.inner.lock().unwrap().counterparty_node_id
}
/// You may use this to broadcast the latest local commitment transaction, either because
/// a monitor update failed or because we've fallen behind (i.e. we've received proof that our
/// counterparty side knows a revocation secret we gave them that they shouldn't know).
///
/// Broadcasting these transactions in this manner is UNSAFE, as they allow counterparty
/// side to punish you. Nevertheless you may want to broadcast them if counterparty doesn't
/// close channel with their commitment transaction after a substantial amount of time. Best
/// may be to contact the other node operator out-of-band to coordinate other options available
/// to you.
///
/// Note: For channels using manual funding broadcast (see
/// [`crate::ln::channelmanager::ChannelManager::funding_transaction_generated_manual_broadcast`]),
/// automatic broadcasts are suppressed until the funding transaction has been observed on-chain.
/// Calling this method overrides that suppression and queues the latest holder commitment
/// transaction for broadcast even if the funding has not yet been seen on-chain. This may result
/// in unconfirmable transactions being broadcast or [`Event::BumpTransaction`] notifications for
/// transactions that cannot be confirmed until the funding transaction is visible.
///
/// [`Event::BumpTransaction`]: crate::events::Event::BumpTransaction
pub fn broadcast_latest_holder_commitment_txn<B: Deref, F: Deref, L: Deref>(
&self, broadcaster: &B, fee_estimator: &F, logger: &L,
) where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
let mut inner = self.inner.lock().unwrap();
let fee_estimator = LowerBoundedFeeEstimator::new(&**fee_estimator);
let logger = WithChannelMonitor::from_impl(logger, &*inner, None);
inner.queue_latest_holder_commitment_txn_for_broadcast(
broadcaster,
&fee_estimator,
&logger,
false,
);
}
/// Unsafe test-only version of `broadcast_latest_holder_commitment_txn` used by our test framework
/// to bypass HolderCommitmentTransaction state update lockdown after signature and generate
/// revoked commitment transaction.
#[cfg(any(test, feature = "_test_utils", feature = "unsafe_revoked_tx_signing"))]
pub fn unsafe_get_latest_holder_commitment_txn<L: Deref>(&self, logger: &L) -> Vec<Transaction>
where
L::Target: Logger,
{
let mut inner = self.inner.lock().unwrap();
let logger = WithChannelMonitor::from_impl(logger, &*inner, None);
inner.unsafe_get_latest_holder_commitment_txn(&logger)
}
/// Processes transactions in a newly connected block, which may result in any of the following:
/// - update the monitor's state against resolved HTLCs
/// - punish the counterparty in the case of seeing a revoked commitment transaction
/// - force close the channel and claim/timeout incoming/outgoing HTLCs if near expiration
/// - detect settled outputs for later spending
/// - schedule and bump any in-flight claims
///
/// Returns any new outputs to watch from `txdata`; after called, these are also included in
/// [`get_outputs_to_watch`].
///
/// [`get_outputs_to_watch`]: #method.get_outputs_to_watch
#[rustfmt::skip]
pub fn block_connected<B: Deref, F: Deref, L: Deref>(
&self,
header: &Header,
txdata: &TransactionData,
height: u32,
broadcaster: B,
fee_estimator: F,
logger: &L,
) -> Vec<TransactionOutputs>
where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
let mut inner = self.inner.lock().unwrap();
let logger = WithChannelMonitor::from_impl(logger, &*inner, None);
inner.block_connected(
header, txdata, height, broadcaster, fee_estimator, &logger)
}
/// Determines if the disconnected block contained any transactions of interest and updates
/// appropriately.
pub fn blocks_disconnected<B: Deref, F: Deref, L: Deref>(
&self, fork_point: BestBlock, broadcaster: B, fee_estimator: F, logger: &L,
) where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
let mut inner = self.inner.lock().unwrap();
let logger = WithChannelMonitor::from_impl(logger, &*inner, None);
inner.blocks_disconnected(fork_point, broadcaster, fee_estimator, &logger)
}
/// Processes transactions confirmed in a block with the given header and height, returning new
/// outputs to watch. See [`block_connected`] for details.
///
/// Used instead of [`block_connected`] by clients that are notified of transactions rather than
/// blocks. See [`chain::Confirm`] for calling expectations.
///
/// [`block_connected`]: Self::block_connected
#[rustfmt::skip]
pub fn transactions_confirmed<B: Deref, F: Deref, L: Deref>(
&self,
header: &Header,
txdata: &TransactionData,
height: u32,
broadcaster: B,
fee_estimator: F,
logger: &L,
) -> Vec<TransactionOutputs>
where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
let mut inner = self.inner.lock().unwrap();
let logger = WithChannelMonitor::from_impl(logger, &*inner, None);
inner.transactions_confirmed(
header, txdata, height, broadcaster, &bounded_fee_estimator, &logger)
}
/// Processes a transaction that was reorganized out of the chain.
///
/// Used instead of [`blocks_disconnected`] by clients that are notified of transactions rather
/// than blocks. See [`chain::Confirm`] for calling expectations.
///
/// [`blocks_disconnected`]: Self::blocks_disconnected
#[rustfmt::skip]
pub fn transaction_unconfirmed<B: Deref, F: Deref, L: Deref>(
&self,
txid: &Txid,
broadcaster: B,
fee_estimator: F,
logger: &L,
) where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
let mut inner = self.inner.lock().unwrap();
let logger = WithChannelMonitor::from_impl(logger, &*inner, None);
inner.transaction_unconfirmed(
txid, broadcaster, &bounded_fee_estimator, &logger
);
}
/// Updates the monitor with the current best chain tip, returning new outputs to watch. See
/// [`block_connected`] for details.
///
/// Used instead of [`block_connected`] by clients that are notified of transactions rather than
/// blocks. See [`chain::Confirm`] for calling expectations.
///
/// [`block_connected`]: Self::block_connected
#[rustfmt::skip]
pub fn best_block_updated<B: Deref, F: Deref, L: Deref>(
&self,
header: &Header,
height: u32,
broadcaster: B,
fee_estimator: F,
logger: &L,
) -> Vec<TransactionOutputs>
where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
let mut inner = self.inner.lock().unwrap();
let logger = WithChannelMonitor::from_impl(logger, &*inner, None);
inner.best_block_updated(
header, height, broadcaster, &bounded_fee_estimator, &logger
)
}
/// Returns the set of txids that should be monitored for re-organization out of the chain.
#[rustfmt::skip]
pub fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option<BlockHash>)> {
let inner = self.inner.lock().unwrap();
let mut txids: Vec<(Txid, u32, Option<BlockHash>)> = inner.onchain_events_awaiting_threshold_conf
.iter()
.map(|entry| (entry.txid, entry.height, entry.block_hash))
.chain(inner.onchain_tx_handler.get_relevant_txids().into_iter())
.collect();
txids.sort_unstable_by(|a, b| a.0.cmp(&b.0).then(b.1.cmp(&a.1)));
txids.dedup_by_key(|(txid, _, _)| *txid);
txids
}
/// Gets the latest best block which was connected either via the [`chain::Listen`] or
/// [`chain::Confirm`] interfaces.
pub fn current_best_block(&self) -> BestBlock {
self.inner.lock().unwrap().best_block.clone()
}
/// Triggers rebroadcasts/fee-bumps of pending claims from a force-closed channel. This is
/// crucial in preventing certain classes of pinning attacks, detecting substantial mempool
/// feerate changes between blocks, and ensuring reliability if broadcasting fails. We recommend
/// invoking this every 30 seconds, or lower if running in an environment with spotty
/// connections, like on mobile.
#[rustfmt::skip]
pub fn rebroadcast_pending_claims<B: Deref, F: Deref, L: Deref>(
&self, broadcaster: B, fee_estimator: F, logger: &L,
)
where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
let fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
let mut lock = self.inner.lock().unwrap();
let inner = &mut *lock;
let logger = WithChannelMonitor::from_impl(logger, &*inner, None);
let current_height = inner.best_block.height;
let conf_target = inner.closure_conf_target();
inner.onchain_tx_handler.rebroadcast_pending_claims(
current_height, FeerateStrategy::HighestOfPreviousOrNew, &broadcaster, conf_target,
&inner.destination_script, &fee_estimator, &logger,
);
}
/// Returns true if the monitor has pending claim requests that are not fully confirmed yet.
pub fn has_pending_claims(&self) -> bool {
self.inner.lock().unwrap().onchain_tx_handler.has_pending_claims()
}
/// Triggers rebroadcasts of pending claims from a force-closed channel after a transaction
/// signature generation failure.
#[rustfmt::skip]
pub fn signer_unblocked<B: Deref, F: Deref, L: Deref>(
&self, broadcaster: B, fee_estimator: F, logger: &L,
)
where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
let fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
let mut lock = self.inner.lock().unwrap();
let inner = &mut *lock;
let logger = WithChannelMonitor::from_impl(logger, &*inner, None);
let current_height = inner.best_block.height;
let conf_target = inner.closure_conf_target();
inner.onchain_tx_handler.rebroadcast_pending_claims(
current_height, FeerateStrategy::RetryPrevious, &broadcaster, conf_target,
&inner.destination_script, &fee_estimator, &logger,
);
}
/// Returns the descriptors for relevant outputs (i.e., those that we can spend) within the
/// transaction if they exist and the transaction has at least [`ANTI_REORG_DELAY`]
/// confirmations. For [`SpendableOutputDescriptor::DelayedPaymentOutput`] descriptors to be
/// returned, the transaction must have at least `max(ANTI_REORG_DELAY, to_self_delay)`
/// confirmations.
///
/// Descriptors returned by this method are primarily exposed via [`Event::SpendableOutputs`]
/// once they are no longer under reorg risk. This method serves as a way to retrieve these
/// descriptors at a later time, either for historical purposes, or to replay any
/// missed/unhandled descriptors. For the purpose of gathering historical records, if the
/// channel close has fully resolved (i.e., [`ChannelMonitor::get_claimable_balances`] returns
/// an empty set), you can retrieve all spendable outputs by providing all descendant spending
/// transactions starting from the channel's funding transaction and going down three levels.
///
/// `tx` is a transaction we'll scan the outputs of. Any transaction can be provided. If any
/// outputs which can be spent by us are found, at least one descriptor is returned.
///
/// `confirmation_height` must be the height of the block in which `tx` was included in.
#[rustfmt::skip]
pub fn get_spendable_outputs(&self, tx: &Transaction, confirmation_height: u32) -> Vec<SpendableOutputDescriptor> {
let inner = self.inner.lock().unwrap();
let current_height = inner.best_block.height;
let funding = get_confirmed_funding_scope!(inner);
let mut spendable_outputs = inner.get_spendable_outputs(&funding, tx);
spendable_outputs.retain(|descriptor| {
let mut conf_threshold = current_height.saturating_sub(ANTI_REORG_DELAY) + 1;
if let SpendableOutputDescriptor::DelayedPaymentOutput(descriptor) = descriptor {
conf_threshold = cmp::min(conf_threshold,
current_height.saturating_sub(descriptor.to_self_delay as u32) + 1);
}
conf_threshold >= confirmation_height
});
spendable_outputs
}
/// Checks if the monitor is fully resolved. Resolved monitor is one that has claimed all of
/// its outputs and balances (i.e. [`Self::get_claimable_balances`] returns an empty set) and
/// which does not have any payment preimages for HTLCs which are still pending on other
/// channels.
///
/// Additionally may update state to track when the balances set became empty.
///
/// This function returns a tuple of two booleans, the first indicating whether the monitor is
/// fully resolved, and the second whether the monitor needs persistence to ensure it is
/// reliably marked as resolved within [`ARCHIVAL_DELAY_BLOCKS`] blocks.
///
/// The first boolean is true only if [`Self::get_claimable_balances`] has been empty for at
/// least [`ARCHIVAL_DELAY_BLOCKS`] blocks as an additional protection against any bugs
/// resulting in spuriously empty balance sets.
#[rustfmt::skip]
pub fn check_and_update_full_resolution_status<L: Logger>(&self, logger: &L) -> (bool, bool) {
let mut is_all_funds_claimed = self.get_claimable_balances().is_empty();
let current_height = self.current_best_block().height;
let mut inner = self.inner.lock().unwrap();
if inner.is_closed_without_updates()
&& is_all_funds_claimed
&& !inner.funding_spend_seen
{
// We closed the channel without ever advancing it and didn't have any funds in it.
// We should immediately archive this monitor as there's nothing for us to ever do with
// it.
return (true, false);
}
if is_all_funds_claimed && !inner.funding_spend_seen {
debug_assert!(false, "We should see funding spend by the time a monitor clears out");
is_all_funds_claimed = false;
}
// As long as HTLCs remain unresolved, they'll be present as a `Balance`. After that point,
// if they contained a preimage, an event will appear in `pending_monitor_events` which,
// once processed, implies the preimage exists in the corresponding inbound channel.
let preimages_not_needed_elsewhere = inner.pending_monitor_events.is_empty();
match (inner.balances_empty_height, is_all_funds_claimed, preimages_not_needed_elsewhere) {
(Some(balances_empty_height), true, true) => {
// Claimed all funds, check if reached the blocks threshold.
(current_height >= balances_empty_height + ARCHIVAL_DELAY_BLOCKS, false)
},
(Some(_), false, _)|(Some(_), _, false) => {
// previously assumed we claimed all funds, but we have new funds to claim or
// preimages are suddenly needed (because of a duplicate-hash HTLC).
// This should never happen as once the `Balance`s and preimages are clear, we
// should never create new ones.
debug_assert!(false,
"Thought we were done claiming funds, but claimable_balances now has entries");
log_error!(logger,
"WARNING: LDK thought it was done claiming all the available funds in the ChannelMonitor for channel {}, but later decided it had more to claim. This is potentially an important bug in LDK, please report it at https://github.com/lightningdevkit/rust-lightning/issues/new",
inner.get_funding_txo());
inner.balances_empty_height = None;
(false, true)
},
(None, true, true) => {
// Claimed all funds and preimages can be deleted, but `balances_empty_height` is
// None. It is set to the current block height.
log_debug!(logger,
"ChannelMonitor funded at {} is now fully resolved. It will become archivable in {} blocks",
inner.get_funding_txo(), ARCHIVAL_DELAY_BLOCKS);
inner.balances_empty_height = Some(current_height);
(false, true)
},
(None, false, _)|(None, _, false) => {
// Have funds to claim or our preimages are still needed.
(false, false)
},
}
}
#[cfg(test)]
pub fn get_counterparty_payment_script(&self) -> ScriptBuf {
self.inner.lock().unwrap().counterparty_payment_script.clone()
}
#[cfg(test)]
pub fn set_counterparty_payment_script(&self, script: ScriptBuf) {
self.inner.lock().unwrap().counterparty_payment_script = script;
}
#[cfg(any(test, feature = "_test_utils"))]
pub fn do_mut_signer_call<F: FnMut(&mut Signer) -> ()>(&self, mut f: F) {
let mut inner = self.inner.lock().unwrap();
f(&mut inner.onchain_tx_handler.signer);
}
}
impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
/// Helper for get_claimable_balances which does the work for an individual HTLC, generating up
/// to one `Balance` for the HTLC.
#[rustfmt::skip]
fn get_htlc_balance(&self, htlc: &HTLCOutputInCommitment, source: Option<&HTLCSource>,
holder_commitment: bool, counterparty_revoked_commitment: bool,
confirmed_txid: Option<Txid>
) -> Option<Balance> {
let htlc_commitment_tx_output_idx = htlc.transaction_output_index?;
let mut htlc_spend_txid_opt = None;
let mut htlc_spend_tx_opt = None;
let mut holder_timeout_spend_pending = None;
let mut htlc_spend_pending = None;
let mut holder_delayed_output_pending = None;
for event in self.onchain_events_awaiting_threshold_conf.iter() {
match event.event {
OnchainEvent::HTLCUpdate { commitment_tx_output_idx, htlc_value_satoshis, .. }
if commitment_tx_output_idx == Some(htlc_commitment_tx_output_idx) => {
debug_assert!(htlc_spend_txid_opt.is_none());
htlc_spend_txid_opt = Some(&event.txid);
debug_assert!(htlc_spend_tx_opt.is_none());
htlc_spend_tx_opt = event.transaction.as_ref();
debug_assert!(holder_timeout_spend_pending.is_none());
debug_assert_eq!(htlc_value_satoshis.unwrap(), htlc.amount_msat / 1000);
holder_timeout_spend_pending = Some(event.confirmation_threshold());
},
OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx, preimage, .. }
if commitment_tx_output_idx == htlc_commitment_tx_output_idx => {
debug_assert!(htlc_spend_txid_opt.is_none());
htlc_spend_txid_opt = Some(&event.txid);
debug_assert!(htlc_spend_tx_opt.is_none());
htlc_spend_tx_opt = event.transaction.as_ref();
debug_assert!(htlc_spend_pending.is_none());
htlc_spend_pending = Some((event.confirmation_threshold(), preimage.is_some()));
},
OnchainEvent::MaturingOutput {
descriptor: SpendableOutputDescriptor::DelayedPaymentOutput(ref descriptor) }
if event.transaction.as_ref().map(|tx| tx.input.iter().enumerate()
.any(|(input_idx, inp)|
Some(inp.previous_output.txid) == confirmed_txid &&
inp.previous_output.vout == htlc_commitment_tx_output_idx &&
// A maturing output for an HTLC claim will always be at the same
// index as the HTLC input. This is true pre-anchors, as there's
// only 1 input and 1 output. This is also true post-anchors,
// because we have a SIGHASH_SINGLE|ANYONECANPAY signature from our
// channel counterparty.
descriptor.outpoint.index as usize == input_idx
))
.unwrap_or(false)
=> {
debug_assert!(holder_delayed_output_pending.is_none());
holder_delayed_output_pending = Some(event.confirmation_threshold());
},
_ => {},
}
}
let htlc_resolved = self.htlcs_resolved_on_chain.iter()
.any(|v| if v.commitment_tx_output_idx == Some(htlc_commitment_tx_output_idx) {
debug_assert!(htlc_spend_txid_opt.is_none());
htlc_spend_txid_opt = v.resolving_txid.as_ref();
debug_assert!(htlc_spend_tx_opt.is_none());
htlc_spend_tx_opt = v.resolving_tx.as_ref();
true
} else { false });
debug_assert!(holder_timeout_spend_pending.is_some() as u8 + htlc_spend_pending.is_some() as u8 + htlc_resolved as u8 <= 1);
let htlc_commitment_outpoint = BitcoinOutPoint::new(confirmed_txid.unwrap(), htlc_commitment_tx_output_idx);
let htlc_output_to_spend =
if let Some(txid) = htlc_spend_txid_opt {
// Because HTLC transactions either only have 1 input and 1 output (pre-anchors) or
// are signed with SIGHASH_SINGLE|ANYONECANPAY under BIP-0143 (post-anchors), we can
// locate the correct output by ensuring its adjacent input spends the HTLC output
// in the commitment.
if let Some(ref tx) = htlc_spend_tx_opt {
let htlc_input_idx_opt = tx.input.iter().enumerate()
.find(|(_, input)| input.previous_output == htlc_commitment_outpoint)
.map(|(idx, _)| idx as u32);
debug_assert!(htlc_input_idx_opt.is_some());
BitcoinOutPoint::new(*txid, htlc_input_idx_opt.unwrap_or(0))
} else {
let funding = get_confirmed_funding_scope!(self);
debug_assert!(!funding.channel_type_features().supports_anchors_zero_fee_htlc_tx());
debug_assert!(!funding.channel_type_features().supports_anchor_zero_fee_commitments());
BitcoinOutPoint::new(*txid, 0)
}
} else {
htlc_commitment_outpoint
};
let htlc_output_spend_pending = self.onchain_tx_handler.is_output_spend_pending(&htlc_output_to_spend);
if let Some(conf_thresh) = holder_delayed_output_pending {
debug_assert!(holder_commitment);
return Some(Balance::ClaimableAwaitingConfirmations {
amount_satoshis: htlc.amount_msat / 1000,
confirmation_height: conf_thresh,
source: BalanceSource::Htlc,
});
} else if htlc_resolved && !htlc_output_spend_pending {
// Funding transaction spends should be fully confirmed by the time any
// HTLC transactions are resolved, unless we're talking about a holder
// commitment tx, whose resolution is delayed until the CSV timeout is
// reached, even though HTLCs may be resolved after only
// ANTI_REORG_DELAY confirmations.
debug_assert!(holder_commitment || self.funding_spend_confirmed.is_some());
} else if counterparty_revoked_commitment {
let htlc_output_claim_pending = self.onchain_events_awaiting_threshold_conf.iter().any(|event| {
if let OnchainEvent::MaturingOutput {
descriptor: SpendableOutputDescriptor::StaticOutput { .. }
} = &event.event {
event.transaction.as_ref().map(|tx| tx.input.iter().any(|inp| {
if let Some(htlc_spend_txid) = htlc_spend_txid_opt {
tx.compute_txid() == *htlc_spend_txid || inp.previous_output.txid == *htlc_spend_txid
} else {
Some(inp.previous_output.txid) == confirmed_txid &&
inp.previous_output.vout == htlc_commitment_tx_output_idx
}
})).unwrap_or(false)
} else {
false
}
});
if htlc_output_claim_pending {
// We already push `Balance`s onto the `res` list for every
// `StaticOutput` in a `MaturingOutput` in the revoked
// counterparty commitment transaction case generally, so don't
// need to do so again here.
} else {
debug_assert!(holder_timeout_spend_pending.is_none(),
"HTLCUpdate OnchainEvents should never appear for preimage claims");
debug_assert!(!htlc.offered || htlc_spend_pending.is_none() || !htlc_spend_pending.unwrap().1,
"We don't (currently) generate preimage claims against revoked outputs, where did you get one?!");
return Some(Balance::CounterpartyRevokedOutputClaimable {
amount_satoshis: htlc.amount_msat / 1000,
});
}
} else if htlc.offered == holder_commitment {
// If the payment was outbound, check if there's an HTLCUpdate
// indicating we have spent this HTLC with a timeout, claiming it back
// and awaiting confirmations on it.
if let Some(conf_thresh) = holder_timeout_spend_pending {
return Some(Balance::ClaimableAwaitingConfirmations {
amount_satoshis: htlc.amount_msat / 1000,
confirmation_height: conf_thresh,
source: BalanceSource::Htlc,
});
} else {
let outbound_payment = match source {
None => panic!("Outbound HTLCs should have a source"),
Some(&HTLCSource::PreviousHopData(_)) => false,
Some(&HTLCSource::OutboundRoute { .. }) => true,
};
return Some(Balance::MaybeTimeoutClaimableHTLC {
amount_satoshis: htlc.amount_msat / 1000,
claimable_height: htlc.cltv_expiry,
payment_hash: htlc.payment_hash,
outbound_payment,
});
}
} else if let Some((payment_preimage, _)) = self.payment_preimages.get(&htlc.payment_hash) {
// Otherwise (the payment was inbound), only expose it as claimable if
// we know the preimage.
// Note that if there is a pending claim, but it did not use the
// preimage, we lost funds to our counterparty! We will then continue
// to show it as ContentiousClaimable until ANTI_REORG_DELAY.
debug_assert!(holder_timeout_spend_pending.is_none());
if let Some((conf_thresh, true)) = htlc_spend_pending {
return Some(Balance::ClaimableAwaitingConfirmations {
amount_satoshis: htlc.amount_msat / 1000,
confirmation_height: conf_thresh,
source: BalanceSource::Htlc,
});
} else {
return Some(Balance::ContentiousClaimable {
amount_satoshis: htlc.amount_msat / 1000,
timeout_height: htlc.cltv_expiry,
payment_hash: htlc.payment_hash,
payment_preimage: *payment_preimage,
});
}
} else if !htlc_resolved {
return Some(Balance::MaybePreimageClaimableHTLC {
amount_satoshis: htlc.amount_msat / 1000,
expiry_height: htlc.cltv_expiry,
payment_hash: htlc.payment_hash,
});
}
None
}
}
impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
/// Gets the balances in this channel which are either claimable by us if we were to
/// force-close the channel now or which are claimable on-chain (possibly awaiting
/// confirmation).
///
/// Any balances in the channel which are available on-chain (excluding on-chain fees) are
/// included here until an [`Event::SpendableOutputs`] event has been generated for the
/// balance, or until our counterparty has claimed the balance and accrued several
/// confirmations on the claim transaction.
///
/// Note that for `ChannelMonitors` which track a channel which went on-chain with versions of
/// LDK prior to 0.0.111, not all or excess balances may be included.
///
/// See [`Balance`] for additional details on the types of claimable balances which
/// may be returned here and their meanings.
#[rustfmt::skip]
pub fn get_claimable_balances(&self) -> Vec<Balance> {
let mut res = Vec::new();
let us = self.inner.lock().unwrap();
let mut confirmed_txid = us.funding_spend_confirmed;
let mut confirmed_counterparty_output = us.confirmed_commitment_tx_counterparty_output;
let mut pending_commitment_tx_conf_thresh = None;
let funding_spend_pending = us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
if let OnchainEvent::FundingSpendConfirmation { commitment_tx_to_counterparty_output, .. } =
event.event
{
confirmed_counterparty_output = commitment_tx_to_counterparty_output;
Some((event.txid, event.confirmation_threshold()))
} else { None }
});
if let Some((txid, conf_thresh)) = funding_spend_pending {
debug_assert!(us.funding_spend_confirmed.is_none(),
"We have a pending funding spend awaiting anti-reorg confirmation, we can't have confirmed it already!");
confirmed_txid = Some(txid);
pending_commitment_tx_conf_thresh = Some(conf_thresh);
}
macro_rules! walk_htlcs {
($holder_commitment: expr, $counterparty_revoked_commitment: expr, $htlc_iter: expr) => {
for (htlc, source) in $htlc_iter {
if htlc.transaction_output_index.is_some() {
if let Some(bal) = us.get_htlc_balance(
htlc, source, $holder_commitment, $counterparty_revoked_commitment, confirmed_txid
) {
res.push(bal);
}
}
}
}
}
if let Some(txid) = confirmed_txid {
let funding_spent = get_confirmed_funding_scope!(us);
let mut found_commitment_tx = false;
if let Some(counterparty_tx_htlcs) = funding_spent.counterparty_claimable_outpoints.get(&txid) {
// First look for the to_remote output back to us.
if let Some(conf_thresh) = pending_commitment_tx_conf_thresh {
if let Some(value) = us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
if let OnchainEvent::MaturingOutput {
descriptor: SpendableOutputDescriptor::StaticPaymentOutput(descriptor)
} = &event.event {
Some(descriptor.output.value)
} else { None }
}) {
res.push(Balance::ClaimableAwaitingConfirmations {
amount_satoshis: value.to_sat(),
confirmation_height: conf_thresh,
source: BalanceSource::CounterpartyForceClosed,
});
} else {
// If a counterparty commitment transaction is awaiting confirmation, we
// should either have a StaticPaymentOutput MaturingOutput event awaiting
// confirmation with the same height or have never met our dust amount.
}
}
if Some(txid) == funding_spent.current_counterparty_commitment_txid || Some(txid) == funding_spent.prev_counterparty_commitment_txid {
walk_htlcs!(false, false, counterparty_tx_htlcs.iter().map(|(a, b)| (a, b.as_ref().map(|b| &**b))));
} else {
walk_htlcs!(false, true, counterparty_tx_htlcs.iter().map(|(a, b)| (a, b.as_ref().map(|b| &**b))));
// The counterparty broadcasted a revoked state!
// Look for any StaticOutputs first, generating claimable balances for those.
// If any match the confirmed counterparty revoked to_self output, skip
// generating a CounterpartyRevokedOutputClaimable.
let mut spent_counterparty_output = false;
for event in us.onchain_events_awaiting_threshold_conf.iter() {
if let OnchainEvent::MaturingOutput {
descriptor: SpendableOutputDescriptor::StaticOutput { output, .. }
} = &event.event {
res.push(Balance::ClaimableAwaitingConfirmations {
amount_satoshis: output.value.to_sat(),
confirmation_height: event.confirmation_threshold(),
source: BalanceSource::CounterpartyForceClosed,
});
if let Some(confirmed_to_self_idx) = confirmed_counterparty_output.map(|(idx, _)| idx) {
if event.transaction.as_ref().map(|tx|
tx.input.iter().any(|inp| inp.previous_output.vout == confirmed_to_self_idx)
).unwrap_or(false) {
spent_counterparty_output = true;
}
}
}
}
if spent_counterparty_output {
} else if let Some((confirmed_to_self_idx, amt)) = confirmed_counterparty_output {
let output_spendable = us.onchain_tx_handler
.is_output_spend_pending(&BitcoinOutPoint::new(txid, confirmed_to_self_idx));
if output_spendable {
res.push(Balance::CounterpartyRevokedOutputClaimable {
amount_satoshis: amt.to_sat(),
});
}
} else {
// Counterparty output is missing, either it was broadcasted on a
// previous version of LDK or the counterparty hadn't met dust.
}
}
found_commitment_tx = true;
} else if txid == funding_spent.current_holder_commitment_tx.trust().txid() {
walk_htlcs!(true, false, holder_commitment_htlcs!(us, CURRENT_WITH_SOURCES));
if let Some(conf_thresh) = pending_commitment_tx_conf_thresh {
res.push(Balance::ClaimableAwaitingConfirmations {
amount_satoshis: funding_spent.current_holder_commitment_tx.to_broadcaster_value_sat(),
confirmation_height: conf_thresh,
source: BalanceSource::HolderForceClosed,
});
}
found_commitment_tx = true;
} else if let Some(prev_holder_commitment_tx) = &funding_spent.prev_holder_commitment_tx {
if txid == prev_holder_commitment_tx.trust().txid() {
walk_htlcs!(true, false, holder_commitment_htlcs!(us, PREV_WITH_SOURCES).unwrap());
if let Some(conf_thresh) = pending_commitment_tx_conf_thresh {
res.push(Balance::ClaimableAwaitingConfirmations {
amount_satoshis: prev_holder_commitment_tx.to_broadcaster_value_sat(),
confirmation_height: conf_thresh,
source: BalanceSource::HolderForceClosed,
});
}
found_commitment_tx = true;
}
}
if !found_commitment_tx {
if let Some(conf_thresh) = pending_commitment_tx_conf_thresh {
// We blindly assume this is a cooperative close transaction here, and that
// neither us nor our counterparty misbehaved. At worst we've under-estimated
// the amount we can claim as we'll punish a misbehaving counterparty.
res.push(Balance::ClaimableAwaitingConfirmations {
amount_satoshis: funding_spent.current_holder_commitment_tx.to_broadcaster_value_sat(),
confirmation_height: conf_thresh,
source: BalanceSource::CoopClose,
});
}
}
} else {
let mut claimable_inbound_htlc_value_sat = 0;
let mut outbound_payment_htlc_rounded_msat = 0;
let mut outbound_forwarded_htlc_rounded_msat = 0;
let mut inbound_claiming_htlc_rounded_msat = 0;
let mut inbound_htlc_rounded_msat = 0;
// We share the same set of HTLCs across all scopes, so we don't need to check the other
// scopes as it'd be redundant.
for (htlc, source) in holder_commitment_htlcs!(us, CURRENT_WITH_SOURCES) {
let rounded_value_msat = if htlc.transaction_output_index.is_none() {
htlc.amount_msat
} else { htlc.amount_msat % 1000 };
if htlc.offered {
let outbound_payment = match source {
None => panic!("Outbound HTLCs should have a source"),
Some(HTLCSource::PreviousHopData(_)) => false,
Some(HTLCSource::OutboundRoute { .. }) => true,
};
if outbound_payment {
outbound_payment_htlc_rounded_msat += rounded_value_msat;
} else {
outbound_forwarded_htlc_rounded_msat += rounded_value_msat;
}
if htlc.transaction_output_index.is_some() {
res.push(Balance::MaybeTimeoutClaimableHTLC {
amount_satoshis: htlc.amount_msat / 1000,
claimable_height: htlc.cltv_expiry,
payment_hash: htlc.payment_hash,
outbound_payment,
});
}
} else if us.payment_preimages.contains_key(&htlc.payment_hash) {
inbound_claiming_htlc_rounded_msat += rounded_value_msat;
if htlc.transaction_output_index.is_some() {
claimable_inbound_htlc_value_sat += htlc.amount_msat / 1000;
}
} else {
inbound_htlc_rounded_msat += rounded_value_msat;
if htlc.transaction_output_index.is_some() {
// As long as the HTLC is still in our latest commitment state, treat
// it as potentially claimable, even if it has long-since expired.
res.push(Balance::MaybePreimageClaimableHTLC {
amount_satoshis: htlc.amount_msat / 1000,
expiry_height: htlc.cltv_expiry,
payment_hash: htlc.payment_hash,
});
}
}
}
let balance_candidates = core::iter::once(&us.funding)
.chain(us.pending_funding.iter())
.map(|funding| {
let to_self_value_sat = funding.current_holder_commitment_tx.to_broadcaster_value_sat();
// In addition to `commit_tx_fee_sat`, this can also include dust HTLCs, any
// elided anchors, and the total msat amount rounded down from non-dust HTLCs.
let transaction_fee_satoshis = if us.holder_pays_commitment_tx_fee.unwrap_or(true) {
let transaction = &funding.current_holder_commitment_tx.trust().built_transaction().transaction;
let output_value_sat: u64 = transaction.output.iter().map(|txout| txout.value.to_sat()).sum();
funding.channel_parameters.channel_value_satoshis - output_value_sat
} else {
0
};
HolderCommitmentTransactionBalance {
amount_satoshis: to_self_value_sat + claimable_inbound_htlc_value_sat,
transaction_fee_satoshis,
}
})
.collect::<Vec<_>>();
let confirmed_balance_candidate_index = core::iter::once(&us.funding)
.chain(us.pending_funding.iter())
.enumerate()
.find(|(_, funding)| {
us.alternative_funding_confirmed
.map(|(funding_txid_confirmed, _)| funding.funding_txid() == funding_txid_confirmed)
// If `alternative_funding_confirmed` is not set, we can assume the current
// funding is confirmed.
.unwrap_or(true)
})
.map(|(idx, _)| idx)
.expect("We must have one FundingScope that is confirmed");
// Only push a primary balance if either the channel isn't closed or we've advanced the
// channel state machine at least once (implying there are multiple previous commitment
// transactions) or we actually have a balance.
// Avoiding including a `Balance` if none of these are true allows us to prune monitors
// for chanels that were opened inbound to us but where the funding transaction never
// confirmed at all.
if !us.is_closed_without_updates()
|| balance_candidates.iter().any(|bal| bal.amount_satoshis != 0)
{
res.push(Balance::ClaimableOnChannelClose {
balance_candidates,
confirmed_balance_candidate_index,
outbound_payment_htlc_rounded_msat,
outbound_forwarded_htlc_rounded_msat,
inbound_claiming_htlc_rounded_msat,
inbound_htlc_rounded_msat,
});
}
}
res
}
/// Gets the set of outbound HTLCs which can be (or have been) resolved by this
/// `ChannelMonitor`. This is used to determine if an HTLC was removed from the channel prior
/// to the `ChannelManager` having been persisted.
pub(crate) fn get_all_current_outbound_htlcs(
&self,
) -> HashMap<HTLCSource, (HTLCOutputInCommitment, Option<PaymentPreimage>)> {
let mut res = new_hash_map();
// Just examine the available counterparty commitment transactions. See docs on
// `fail_unbroadcast_htlcs`, below, for justification.
let us = self.inner.lock().unwrap();
let mut walk_counterparty_commitment = |txid| {
if let Some(latest_outpoints) = us.funding.counterparty_claimable_outpoints.get(txid) {
for &(ref htlc, ref source_option) in latest_outpoints.iter() {
if let &Some(ref source) = source_option {
let htlc_id = SentHTLCId::from_source(source);
if !us.htlcs_resolved_to_user.contains(&htlc_id) {
let preimage_opt =
us.counterparty_fulfilled_htlcs.get(&htlc_id).cloned();
res.insert((**source).clone(), (htlc.clone(), preimage_opt));
}
}
}
}
};
if let Some(ref txid) = us.funding.current_counterparty_commitment_txid {
walk_counterparty_commitment(txid);
}
if let Some(ref txid) = us.funding.prev_counterparty_commitment_txid {
walk_counterparty_commitment(txid);
}
res
}
/// Gets the set of outbound HTLCs which hit the chain and ultimately were claimed by us via
/// the timeout path and reached [`ANTI_REORG_DELAY`] confirmations. This is used to determine
/// if an HTLC has failed without the `ChannelManager` having seen it prior to being persisted.
pub(crate) fn get_onchain_failed_outbound_htlcs(&self) -> HashMap<HTLCSource, PaymentHash> {
let mut res = new_hash_map();
let us = self.inner.lock().unwrap();
// We only want HTLCs with ANTI_REORG_DELAY confirmations, which implies the commitment
// transaction has least ANTI_REORG_DELAY confirmations for any dependent HTLC transactions
// to have been confirmed.
let confirmed_txid = us.funding_spend_confirmed.or_else(|| {
us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
if let OnchainEvent::FundingSpendConfirmation { .. } = event.event {
if event.height + ANTI_REORG_DELAY - 1 <= us.best_block.height {
Some(event.txid)
} else {
None
}
} else {
None
}
})
});
let confirmed_txid = if let Some(txid) = confirmed_txid {
txid
} else {
return res;
};
macro_rules! walk_htlcs {
($htlc_iter: expr) => {
let mut walk_candidate_htlcs = |htlcs| {
for &(ref candidate_htlc, ref candidate_source) in htlcs {
let candidate_htlc: &HTLCOutputInCommitment = &candidate_htlc;
let candidate_source: &Option<Box<HTLCSource>> = &candidate_source;
let source: &HTLCSource = if let Some(source) = candidate_source {
source
} else {
continue;
};
let htlc_id = SentHTLCId::from_source(source);
if us.htlcs_resolved_to_user.contains(&htlc_id) {
continue;
}
let confirmed = $htlc_iter.find(|(_, conf_src)| Some(source) == *conf_src);
if let Some((confirmed_htlc, _)) = confirmed {
let filter = |v: &&IrrevocablyResolvedHTLC| {
v.commitment_tx_output_idx
== confirmed_htlc.transaction_output_index
};
// The HTLC was included in the confirmed commitment transaction, so we
// need to see if it has been irrevocably failed yet.
if confirmed_htlc.transaction_output_index.is_none() {
// Dust HTLCs are always implicitly failed once the commitment
// transaction reaches ANTI_REORG_DELAY confirmations.
res.insert(source.clone(), confirmed_htlc.payment_hash);
} else if let Some(state) =
us.htlcs_resolved_on_chain.iter().filter(filter).next()
{
if state.payment_preimage.is_none() {
res.insert(source.clone(), confirmed_htlc.payment_hash);
}
}
} else {
// The HTLC was not included in the confirmed commitment transaction,
// which has now reached ANTI_REORG_DELAY confirmations and thus the
// HTLC has been failed.
res.insert(source.clone(), candidate_htlc.payment_hash);
}
}
};
// We walk the set of HTLCs in the unrevoked counterparty commitment transactions (see
// `fail_unbroadcast_htlcs` for a description of why).
if let Some(ref txid) = us.funding.current_counterparty_commitment_txid {
let htlcs = us.funding.counterparty_claimable_outpoints.get(txid);
walk_candidate_htlcs(htlcs.expect("Missing tx info for latest tx"));
}
if let Some(ref txid) = us.funding.prev_counterparty_commitment_txid {
let htlcs = us.funding.counterparty_claimable_outpoints.get(txid);
walk_candidate_htlcs(htlcs.expect("Missing tx info for previous tx"));
}
};
}
let funding = get_confirmed_funding_scope!(us);
if Some(confirmed_txid) == funding.current_counterparty_commitment_txid
|| Some(confirmed_txid) == funding.prev_counterparty_commitment_txid
{
let htlcs = funding.counterparty_claimable_outpoints.get(&confirmed_txid).unwrap();
walk_htlcs!(htlcs.iter().filter_map(|(a, b)| {
if let &Some(ref source) = b {
Some((a, Some(&**source)))
} else {
None
}
}));
} else if confirmed_txid == funding.current_holder_commitment_tx.trust().txid() {
walk_htlcs!(holder_commitment_htlcs!(us, CURRENT_WITH_SOURCES));
} else if let Some(prev_commitment_tx) = &funding.prev_holder_commitment_tx {
if confirmed_txid == prev_commitment_tx.trust().txid() {
walk_htlcs!(holder_commitment_htlcs!(us, PREV_WITH_SOURCES).unwrap());
} else {
let htlcs_confirmed: &[(&HTLCOutputInCommitment, _)] = &[];
walk_htlcs!(htlcs_confirmed.iter());
}
} else {
let htlcs_confirmed: &[(&HTLCOutputInCommitment, _)] = &[];
walk_htlcs!(htlcs_confirmed.iter());
}
res
}
pub(crate) fn get_stored_preimages(
&self,
) -> HashMap<PaymentHash, (PaymentPreimage, Vec<PaymentClaimDetails>)> {
self.inner.lock().unwrap().payment_preimages.clone()
}
}
/// Compares a broadcasted commitment transaction's HTLCs with those in the latest state,
/// failing any HTLCs which didn't make it into the broadcasted commitment transaction back
/// after ANTI_REORG_DELAY blocks.
///
/// We always compare against the set of HTLCs in counterparty commitment transactions, as those
/// are the commitment transactions which are generated by us. The off-chain state machine in
/// `Channel` will automatically resolve any HTLCs which were never included in a commitment
/// transaction when it detects channel closure, but it is up to us to ensure any HTLCs which were
/// included in a remote commitment transaction are failed back if they are not present in the
/// broadcasted commitment transaction.
///
/// Specifically, the removal process for HTLCs in `Channel` is always based on the counterparty
/// sending a `revoke_and_ack`, which causes us to clear `prev_counterparty_commitment_txid`. Thus,
/// as long as we examine both the current counterparty commitment transaction and, if it hasn't
/// been revoked yet, the previous one, we we will never "forget" to resolve an HTLC.
macro_rules! fail_unbroadcast_htlcs {
($self: expr, $commitment_tx_type: expr, $commitment_txid_confirmed: expr, $commitment_tx_confirmed: expr,
$commitment_tx_conf_height: expr, $commitment_tx_conf_hash: expr, $confirmed_htlcs_list: expr, $logger: expr) => { {
debug_assert_eq!($commitment_tx_confirmed.compute_txid(), $commitment_txid_confirmed);
macro_rules! check_htlc_fails {
($txid: expr, $commitment_tx: expr, $per_commitment_outpoints: expr) => {
if let Some(ref latest_outpoints) = $per_commitment_outpoints {
for &(ref htlc, ref source_option) in latest_outpoints.iter() {
if let &Some(ref source) = source_option {
// Check if the HTLC is present in the commitment transaction that was
// broadcast, but not if it was below the dust limit, which we should
// fail backwards immediately as there is no way for us to learn the
// payment_preimage.
// Note that if the dust limit were allowed to change between
// commitment transactions we'd want to be check whether *any*
// broadcastable commitment transaction has the HTLC in it, but it
// cannot currently change after channel initialization, so we don't
// need to here.
let confirmed_htlcs_iter: &mut dyn Iterator<Item = (&HTLCOutputInCommitment, Option<&HTLCSource>)> = &mut $confirmed_htlcs_list;
let mut matched_htlc = false;
for (ref broadcast_htlc, ref broadcast_source) in confirmed_htlcs_iter {
if broadcast_htlc.transaction_output_index.is_some() &&
(Some(&**source) == *broadcast_source ||
(broadcast_source.is_none() &&
broadcast_htlc.payment_hash == htlc.payment_hash &&
broadcast_htlc.amount_msat == htlc.amount_msat)) {
matched_htlc = true;
break;
}
}
if matched_htlc { continue; }
if $self.counterparty_fulfilled_htlcs.get(&SentHTLCId::from_source(source)).is_some() {
continue;
}
$self.onchain_events_awaiting_threshold_conf.retain(|ref entry| {
if entry.height != $commitment_tx_conf_height { return true; }
match entry.event {
OnchainEvent::HTLCUpdate { source: ref update_source, .. } => {
*update_source != **source
},
_ => true,
}
});
let entry = OnchainEventEntry {
txid: $commitment_txid_confirmed,
transaction: Some($commitment_tx_confirmed.clone()),
height: $commitment_tx_conf_height,
block_hash: Some(*$commitment_tx_conf_hash),
event: OnchainEvent::HTLCUpdate {
source: (**source).clone(),
payment_hash: htlc.payment_hash.clone(),
htlc_value_satoshis: Some(htlc.amount_msat / 1000),
commitment_tx_output_idx: None,
},
};
log_trace!($logger, "Failing HTLC with payment_hash {} from {} counterparty commitment tx due to broadcast of {} commitment transaction {}, waiting for confirmation (at height {})",
&htlc.payment_hash, $commitment_tx, $commitment_tx_type,
$commitment_txid_confirmed, entry.confirmation_threshold());
$self.onchain_events_awaiting_threshold_conf.push(entry);
}
}
}
}
}
if let Some(ref txid) = $self.funding.current_counterparty_commitment_txid {
check_htlc_fails!(txid, "current", $self.funding.counterparty_claimable_outpoints.get(txid));
}
if let Some(ref txid) = $self.funding.prev_counterparty_commitment_txid {
check_htlc_fails!(txid, "previous", $self.funding.counterparty_claimable_outpoints.get(txid));
}
} }
}
// In the `test_invalid_funding_tx` test, we need a bogus script which matches the HTLC-Accepted
// witness length match (ie is 136 bytes long). We generate one here which we also use in some
// in-line tests later.
#[cfg(any(test, feature = "_test_utils"))]
pub fn deliberately_bogus_accepted_htlc_witness_program() -> Vec<u8> {
use bitcoin::opcodes;
let mut ret = [opcodes::all::OP_NOP.to_u8(); 136];
ret[131] = opcodes::all::OP_DROP.to_u8();
ret[132] = opcodes::all::OP_DROP.to_u8();
ret[133] = opcodes::all::OP_DROP.to_u8();
ret[134] = opcodes::all::OP_DROP.to_u8();
ret[135] = opcodes::OP_TRUE.to_u8();
Vec::from(&ret[..])
}
#[cfg(any(test, feature = "_test_utils"))]
#[rustfmt::skip]
pub fn deliberately_bogus_accepted_htlc_witness() -> Vec<Vec<u8>> {
vec![Vec::new(), Vec::new(), Vec::new(), Vec::new(), deliberately_bogus_accepted_htlc_witness_program().into()].into()
}
impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
/// Gets the [`ConfirmationTarget`] we should use when selecting feerates for channel closure
/// transactions for this channel right now.
#[rustfmt::skip]
fn closure_conf_target(&self) -> ConfirmationTarget {
// Treat the sweep as urgent as long as there is at least one HTLC which is pending on a
// valid commitment transaction.
// TODO: This has always considered dust, but maybe it shouldn't?
if holder_commitment_htlcs!(self, CURRENT).next().is_some() {
return ConfirmationTarget::UrgentOnChainSweep;
}
if holder_commitment_htlcs!(self, PREV).map(|mut htlcs| htlcs.next().is_some()).unwrap_or(false) {
return ConfirmationTarget::UrgentOnChainSweep;
}
if let Some(txid) = self.funding.current_counterparty_commitment_txid {
if !self.funding.counterparty_claimable_outpoints.get(&txid).unwrap().is_empty() {
return ConfirmationTarget::UrgentOnChainSweep;
}
}
if let Some(txid) = self.funding.prev_counterparty_commitment_txid {
if !self.funding.counterparty_claimable_outpoints.get(&txid).unwrap().is_empty() {
return ConfirmationTarget::UrgentOnChainSweep;
}
}
ConfirmationTarget::OutputSpendingFee
}
/// Inserts a revocation secret into this channel monitor. Prunes old preimages if neither
/// needed by holder commitment transactions HTCLs nor by counterparty ones. Unless we haven't already seen
/// counterparty commitment transaction's secret, they are de facto pruned (we can use revocation key).
#[rustfmt::skip]
fn provide_secret(&mut self, idx: u64, secret: [u8; 32]) -> Result<(), &'static str> {
if let Err(()) = self.commitment_secrets.provide_secret(idx, secret) {
return Err("Previous secret did not match new one");
}
// Prune HTLCs from the previous counterparty commitment tx so we don't generate failure/fulfill
// events for now-revoked/fulfilled HTLCs.
let mut removed_fulfilled_htlcs = false;
let prune_htlc_sources = |funding: &mut FundingScope| {
if let Some(txid) = funding.prev_counterparty_commitment_txid.take() {
if funding.current_counterparty_commitment_txid.unwrap() != txid {
let cur_claimables = funding.counterparty_claimable_outpoints.get(
&funding.current_counterparty_commitment_txid.unwrap()).unwrap();
// We only need to remove fulfilled HTLCs once for the first `FundingScope` we
// come across since all `FundingScope`s share the same set of HTLC sources.
if !removed_fulfilled_htlcs {
for (_, ref source_opt) in funding.counterparty_claimable_outpoints.get(&txid).unwrap() {
if let Some(source) = source_opt {
if !cur_claimables.iter()
.any(|(_, cur_source_opt)| cur_source_opt == source_opt)
{
self.counterparty_fulfilled_htlcs.remove(&SentHTLCId::from_source(source));
}
}
}
removed_fulfilled_htlcs = true;
}
for &mut (_, ref mut source_opt) in funding.counterparty_claimable_outpoints.get_mut(&txid).unwrap() {
*source_opt = None;
}
} else {
assert!(cfg!(fuzzing), "Commitment txids are unique outside of fuzzing, where hashes can collide");
}
}
};
core::iter::once(&mut self.funding).chain(&mut self.pending_funding).for_each(prune_htlc_sources);
if !self.payment_preimages.is_empty() {
let min_idx = self.get_min_seen_secret();
let counterparty_hash_commitment_number = &mut self.counterparty_hash_commitment_number;
self.payment_preimages.retain(|&k, _| {
for htlc in holder_commitment_htlcs!(self, CURRENT) {
if k == htlc.payment_hash {
return true
}
}
if let Some(htlcs) = holder_commitment_htlcs!(self, PREV) {
for htlc in htlcs {
if k == htlc.payment_hash {
return true
}
}
}
let contains = if let Some(cn) = counterparty_hash_commitment_number.get(&k) {
if *cn < min_idx {
return true
}
true
} else { false };
if contains {
counterparty_hash_commitment_number.remove(&k);
}
false
});
}
Ok(())
}
#[rustfmt::skip]
fn provide_initial_counterparty_commitment_tx(
&mut self, commitment_tx: CommitmentTransaction,
) {
// We populate this field for downgrades
self.initial_counterparty_commitment_info = Some((commitment_tx.per_commitment_point(),
commitment_tx.negotiated_feerate_per_kw(), commitment_tx.to_broadcaster_value_sat(), commitment_tx.to_countersignatory_value_sat()));
#[cfg(debug_assertions)] {
let rebuilt_commitment_tx = self.initial_counterparty_commitment_tx().unwrap();
debug_assert_eq!(rebuilt_commitment_tx.trust().txid(), commitment_tx.trust().txid());
}
self.provide_latest_counterparty_commitment_tx(commitment_tx.trust().txid(), Vec::new(), commitment_tx.commitment_number(),
commitment_tx.per_commitment_point());
// Soon, we will only populate this field
self.initial_counterparty_commitment_tx = Some(commitment_tx);
}
#[rustfmt::skip]
fn provide_latest_counterparty_commitment_tx(
&mut self, txid: Txid, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>,
commitment_number: u64, their_per_commitment_point: PublicKey,
) {
// TODO: Encrypt the htlc_outputs data with the single-hash of the commitment transaction
// so that a remote monitor doesn't learn anything unless there is a malicious close.
// (only maybe, sadly we cant do the same for local info, as we need to be aware of
// timeouts)
for &(ref htlc, _) in &htlc_outputs {
self.counterparty_hash_commitment_number.insert(htlc.payment_hash, commitment_number);
}
self.funding.prev_counterparty_commitment_txid = self.funding.current_counterparty_commitment_txid.take();
self.funding.current_counterparty_commitment_txid = Some(txid);
self.funding.counterparty_claimable_outpoints.insert(txid, htlc_outputs);
self.current_counterparty_commitment_number = commitment_number;
//TODO: Merge this into the other per-counterparty-transaction output storage stuff
match self.their_cur_per_commitment_points {
Some(old_points) => {
if old_points.0 == commitment_number + 1 {
self.their_cur_per_commitment_points = Some((old_points.0, old_points.1, Some(their_per_commitment_point)));
} else if old_points.0 == commitment_number + 2 {
if let Some(old_second_point) = old_points.2 {
self.their_cur_per_commitment_points = Some((old_points.0 - 1, old_second_point, Some(their_per_commitment_point)));
} else {
self.their_cur_per_commitment_points = Some((commitment_number, their_per_commitment_point, None));
}
} else {
self.their_cur_per_commitment_points = Some((commitment_number, their_per_commitment_point, None));
}
},
None => {
self.their_cur_per_commitment_points = Some((commitment_number, their_per_commitment_point, None));
}
}
}
fn update_counterparty_commitment_data(
&mut self, commitment_txs: &[CommitmentTransaction], htlc_data: &CommitmentHTLCData,
) -> Result<(), &'static str> {
self.verify_matching_commitment_transactions(commitment_txs.iter())?;
let htlcs_for_commitment = |commitment: &CommitmentTransaction| {
debug_assert!(htlc_data.nondust_htlc_sources.len() <= commitment.nondust_htlcs().len());
let mut nondust_htlcs = commitment.nondust_htlcs().iter();
let mut sources = htlc_data.nondust_htlc_sources.iter();
let nondust_htlcs = core::iter::from_fn(move || {
let htlc = nondust_htlcs.next()?.clone();
let source = (!htlc.offered).then(|| {
let source = sources
.next()
.expect("Every inbound non-dust HTLC should have a corresponding source")
.clone();
Box::new(source)
});
Some((htlc, source))
});
let dust_htlcs = htlc_data.dust_htlcs.iter().map(|(htlc, source)| {
(htlc.clone(), source.as_ref().map(|source| Box::new(source.clone())))
});
nondust_htlcs.chain(dust_htlcs).collect::<Vec<_>>()
};
let current_funding_commitment_tx = commitment_txs.first().unwrap();
self.provide_latest_counterparty_commitment_tx(
current_funding_commitment_tx.trust().txid(),
htlcs_for_commitment(current_funding_commitment_tx),
current_funding_commitment_tx.commitment_number(),
current_funding_commitment_tx.per_commitment_point(),
);
for (pending_funding, commitment_tx) in
self.pending_funding.iter_mut().zip(commitment_txs.iter().skip(1))
{
let commitment_txid = commitment_tx.trust().txid();
pending_funding.prev_counterparty_commitment_txid =
pending_funding.current_counterparty_commitment_txid.take();
pending_funding.current_counterparty_commitment_txid = Some(commitment_txid);
pending_funding
.counterparty_claimable_outpoints
.insert(commitment_txid, htlcs_for_commitment(commitment_tx));
}
Ok(())
}
/// Informs this monitor of the latest holder (ie broadcastable) commitment transaction. The
/// monitor watches for timeouts and may broadcast it if we approach such a timeout. Thus, it
/// is important that any clones of this channel monitor (including remote clones) by kept
/// up-to-date as our holder commitment transaction is updated.
/// Panics if set_on_holder_tx_csv has never been called.
#[rustfmt::skip]
fn provide_latest_holder_commitment_tx(
&mut self, holder_commitment_tx: HolderCommitmentTransaction,
htlc_outputs: &[(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)],
claimed_htlcs: &[(SentHTLCId, PaymentPreimage)], mut nondust_htlc_sources: Vec<HTLCSource>,
) -> Result<(), &'static str> {
let dust_htlcs = if htlc_outputs.iter().any(|(_, s, _)| s.is_some()) {
// If we have non-dust HTLCs in htlc_outputs, ensure they match the HTLCs in the
// `holder_commitment_tx`. In the future, we'll no longer provide the redundant data
// and just pass in source data via `nondust_htlc_sources`.
debug_assert_eq!(htlc_outputs.iter().filter(|(_, s, _)| s.is_some()).count(), holder_commitment_tx.trust().nondust_htlcs().len());
for (a, b) in htlc_outputs.iter().filter(|(_, s, _)| s.is_some()).map(|(h, _, _)| h).zip(holder_commitment_tx.trust().nondust_htlcs().iter()) {
debug_assert_eq!(a, b);
}
debug_assert_eq!(htlc_outputs.iter().filter(|(_, s, _)| s.is_some()).count(), holder_commitment_tx.counterparty_htlc_sigs.len());
for (a, b) in htlc_outputs.iter().filter_map(|(_, s, _)| s.as_ref()).zip(holder_commitment_tx.counterparty_htlc_sigs.iter()) {
debug_assert_eq!(a, b);
}
// Backfill the non-dust HTLC sources.
debug_assert!(nondust_htlc_sources.is_empty());
nondust_htlc_sources.reserve_exact(holder_commitment_tx.nondust_htlcs().len());
htlc_outputs.iter().filter_map(|(htlc, _, source)| {
// Filter our non-dust HTLCs, while at the same time pushing their sources into
// `nondust_htlc_sources`.
if htlc.transaction_output_index.is_none() {
return Some((htlc.clone(), source.clone()));
}
if htlc.offered {
nondust_htlc_sources.push(source.clone().expect("Outbound HTLCs should have a source"));
}
None
}).collect()
} else {
// If we don't have any non-dust HTLCs in htlc_outputs, assume they were all passed via
// `nondust_htlc_sources`, building up the final htlc_outputs by combining
// `nondust_htlc_sources` and the `holder_commitment_tx`
{
let mut prev = -1;
for htlc in holder_commitment_tx.trust().nondust_htlcs().iter() {
assert!(htlc.transaction_output_index.unwrap() as i32 > prev);
prev = htlc.transaction_output_index.unwrap() as i32;
}
}
debug_assert!(htlc_outputs.iter().all(|(htlc, _, _)| htlc.transaction_output_index.is_none()));
debug_assert!(htlc_outputs.iter().all(|(_, sig_opt, _)| sig_opt.is_none()));
debug_assert_eq!(holder_commitment_tx.trust().nondust_htlcs().len(), holder_commitment_tx.counterparty_htlc_sigs.len());
let mut sources = nondust_htlc_sources.iter();
for htlc in holder_commitment_tx.trust().nondust_htlcs().iter() {
if htlc.offered {
let source = sources.next().expect("Non-dust HTLC sources didn't match commitment tx");
assert!(source.possibly_matches_output(htlc));
}
}
assert!(sources.next().is_none(), "All HTLC sources should have been exhausted");
// This only includes dust HTLCs as checked above.
htlc_outputs.iter().map(|(htlc, _, source)| (htlc.clone(), source.clone())).collect()
};
let htlc_data = CommitmentHTLCData { nondust_htlc_sources, dust_htlcs };
self.update_holder_commitment_data(vec![holder_commitment_tx], htlc_data, claimed_htlcs)
}
fn verify_matching_commitment_transactions<
'a,
I: ExactSizeIterator<Item = &'a CommitmentTransaction>,
>(
&self, commitment_txs: I,
) -> Result<(), &'static str> {
if self.pending_funding.len() + 1 != commitment_txs.len() {
return Err("Commitment transaction count mismatch");
}
let mut other_commitment_tx = None::<&CommitmentTransaction>;
for (funding, commitment_tx) in
core::iter::once(&self.funding).chain(self.pending_funding.iter()).zip(commitment_txs)
{
let trusted_tx = &commitment_tx.trust().built_transaction().transaction;
if trusted_tx.input.len() != 1 {
return Err("Commitment transactions must only spend one input");
}
let funding_outpoint_spent = trusted_tx.input[0].previous_output;
if funding_outpoint_spent != funding.funding_outpoint().into_bitcoin_outpoint() {
return Err("Commitment transaction spends invalid funding outpoint");
}
if let Some(other_commitment_tx) = other_commitment_tx {
if commitment_tx.commitment_number() != other_commitment_tx.commitment_number() {
return Err("Commitment number mismatch");
}
if commitment_tx.per_commitment_point()
!= other_commitment_tx.per_commitment_point()
{
return Err("Per-commitment-point mismatch");
}
if commitment_tx.negotiated_feerate_per_kw()
!= other_commitment_tx.negotiated_feerate_per_kw()
{
return Err("Commitment fee rate mismatch");
}
let nondust_htlcs = commitment_tx.nondust_htlcs();
let other_nondust_htlcs = other_commitment_tx.nondust_htlcs();
if nondust_htlcs.len() != other_nondust_htlcs.len() {
return Err("Non-dust HTLC count mismatch");
}
for (nondust_htlc, other_nondust_htlc) in
nondust_htlcs.iter().zip(other_nondust_htlcs.iter())
{
if !nondust_htlc.is_data_equal(other_nondust_htlc) {
return Err("Non-dust HTLC mismatch");
}
}
}
other_commitment_tx = Some(commitment_tx);
}
Ok(())
}
fn update_holder_commitment_data(
&mut self, commitment_txs: Vec<HolderCommitmentTransaction>,
mut htlc_data: CommitmentHTLCData, claimed_htlcs: &[(SentHTLCId, PaymentPreimage)],
) -> Result<(), &'static str> {
self.verify_matching_commitment_transactions(
commitment_txs.iter().map(|holder_commitment_tx| holder_commitment_tx.deref()),
)?;
let current_funding_commitment_tx = commitment_txs.first().unwrap();
self.current_holder_commitment_number = current_funding_commitment_tx.commitment_number();
self.onchain_tx_handler.provide_latest_holder_tx(current_funding_commitment_tx.clone());
for (funding, mut commitment_tx) in core::iter::once(&mut self.funding)
.chain(self.pending_funding.iter_mut())
.zip(commitment_txs.into_iter())
{
mem::swap(&mut commitment_tx, &mut funding.current_holder_commitment_tx);
funding.prev_holder_commitment_tx = Some(commitment_tx);
}
mem::swap(&mut htlc_data, &mut self.current_holder_htlc_data);
self.prev_holder_htlc_data = Some(htlc_data);
for (claimed_htlc_id, claimed_preimage) in claimed_htlcs {
#[cfg(debug_assertions)]
{
let cur_counterparty_htlcs = self
.funding
.counterparty_claimable_outpoints
.get(&self.funding.current_counterparty_commitment_txid.unwrap())
.unwrap();
assert!(cur_counterparty_htlcs.iter().any(|(_, source_opt)| {
if let Some(source) = source_opt {
SentHTLCId::from_source(source) == *claimed_htlc_id
} else {
false
}
}));
}
self.counterparty_fulfilled_htlcs.insert(*claimed_htlc_id, *claimed_preimage);
}
Ok(())
}
/// Provides a payment_hash->payment_preimage mapping. Will be automatically pruned when all
/// commitment_tx_infos which contain the payment hash have been revoked.
///
/// Note that this is often called multiple times for the same payment and must be idempotent.
#[rustfmt::skip]
fn provide_payment_preimage<B: Deref, F: Deref, L: Deref>(
&mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage,
payment_info: &Option<PaymentClaimDetails>, broadcaster: &B,
fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &WithContext<L>)
where B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
self.payment_preimages.entry(payment_hash.clone())
.and_modify(|(_, payment_infos)| {
if let Some(payment_info) = payment_info {
if !payment_infos.contains(&payment_info) {
payment_infos.push(payment_info.clone());
}
}
})
.or_insert_with(|| {
(payment_preimage.clone(), payment_info.clone().into_iter().collect())
});
let confirmed_spend_info = self.funding_spend_confirmed
.map(|txid| (txid, None))
.or_else(|| {
self.onchain_events_awaiting_threshold_conf.iter().find_map(|event| match event.event {
OnchainEvent::FundingSpendConfirmation { .. } => Some((event.txid, Some(event.height))),
_ => None,
})
});
let (confirmed_spend_txid, confirmed_spend_height) =
if let Some((txid, height)) = confirmed_spend_info {
(txid, height)
} else {
return;
};
let funding_spent = get_confirmed_funding_scope!(self);
// If the channel is force closed, try to claim the output from this preimage.
// First check if a counterparty commitment transaction has been broadcasted:
macro_rules! claim_htlcs {
($commitment_number: expr, $txid: expr, $htlcs: expr) => {
let htlc_claim_reqs = self.get_counterparty_output_claims_for_preimage(*payment_preimage, funding_spent, $commitment_number, $txid, $htlcs, confirmed_spend_height);
let conf_target = self.closure_conf_target();
self.onchain_tx_handler.update_claims_view_from_requests(
htlc_claim_reqs, self.best_block.height, self.best_block.height, broadcaster,
conf_target, &self.destination_script, fee_estimator, logger,
);
}
}
if let Some(txid) = funding_spent.current_counterparty_commitment_txid {
if txid == confirmed_spend_txid {
if let Some(commitment_number) = self.counterparty_commitment_txn_on_chain.get(&txid) {
claim_htlcs!(*commitment_number, txid, funding_spent.counterparty_claimable_outpoints.get(&txid));
} else {
debug_assert!(false);
log_error!(logger, "Detected counterparty commitment tx on-chain without tracking commitment number");
}
return;
}
}
if let Some(txid) = funding_spent.prev_counterparty_commitment_txid {
if txid == confirmed_spend_txid {
if let Some(commitment_number) = self.counterparty_commitment_txn_on_chain.get(&txid) {
claim_htlcs!(*commitment_number, txid, funding_spent.counterparty_claimable_outpoints.get(&txid));
} else {
debug_assert!(false);
log_error!(logger, "Detected counterparty commitment tx on-chain without tracking commitment number");
}
return;
}
}
// Then if a holder commitment transaction has been seen on-chain, broadcast transactions
// claiming the HTLC output from each of the holder commitment transactions.
// Note that we can't just use `self.holder_tx_signed`, because that only covers the case where
// *we* sign a holder commitment transaction, not when e.g. a watchtower broadcasts one of our
// holder commitment transactions.
if self.broadcasted_holder_revokable_script.is_some() {
let holder_commitment_tx = if funding_spent.current_holder_commitment_tx.trust().txid() == confirmed_spend_txid {
Some(&funding_spent.current_holder_commitment_tx)
} else if let Some(prev_holder_commitment_tx) = &funding_spent.prev_holder_commitment_tx {
if prev_holder_commitment_tx.trust().txid() == confirmed_spend_txid {
Some(prev_holder_commitment_tx)
} else {
None
}
} else {
None
};
if let Some(holder_commitment_tx) = holder_commitment_tx {
// Assume that the broadcasted commitment transaction confirmed in the current best
// block. Even if not, its a reasonable metric for the bump criteria on the HTLC
// transactions.
let (claim_reqs, _) = self.get_broadcasted_holder_claims(
funding_spent, holder_commitment_tx, self.best_block.height,
);
let conf_target = self.closure_conf_target();
self.onchain_tx_handler.update_claims_view_from_requests(
claim_reqs, self.best_block.height, self.best_block.height, broadcaster,
conf_target, &self.destination_script, fee_estimator, logger,
);
}
}
}
#[rustfmt::skip]
fn generate_claimable_outpoints_and_watch_outputs(
&mut self, generate_monitor_event_with_reason: Option<ClosureReason>,
require_funding_seen: bool,
) -> (Vec<PackageTemplate>, Vec<TransactionOutputs>) {
let funding = get_confirmed_funding_scope!(self);
let holder_commitment_tx = &funding.current_holder_commitment_tx;
let funding_outp = HolderFundingOutput::build(
holder_commitment_tx.clone(),
funding.channel_parameters.clone(),
);
let funding_outpoint = funding.funding_outpoint();
let commitment_package = PackageTemplate::build_package(
funding_outpoint.txid.clone(), funding_outpoint.index as u32,
PackageSolvingData::HolderFundingOutput(funding_outp),
self.best_block.height,
);
let mut claimable_outpoints = vec![commitment_package];
if let Some(reason) = generate_monitor_event_with_reason {
let event = MonitorEvent::HolderForceClosedWithInfo {
reason,
outpoint: funding_outpoint,
channel_id: self.channel_id,
};
self.pending_monitor_events.push(event);
}
// Although we aren't signing the transaction directly here, the transaction will be signed
// in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject
// new channel updates.
self.holder_tx_signed = true;
// In manual-broadcast mode, if we have not yet observed the funding transaction on-chain,
// return empty vectors rather than triggering a broadcast.
if require_funding_seen && self.is_manual_broadcast && !self.funding_seen_onchain {
return (Vec::new(), Vec::new());
}
let mut watch_outputs = Vec::new();
// In CSV anchor channels, we can't broadcast our HTLC transactions while the commitment transaction is
// unconfirmed.
// We'll delay doing so until we detect the confirmed commitment in `transactions_confirmed`.
//
// TODO: For now in 0FC channels, we also delay broadcasting any HTLC transactions until the commitment
// transaction gets confirmed. It is nonetheless possible to add HTLC spends to the P2A spend
// transaction while the commitment transaction is still unconfirmed.
let zero_fee_htlcs =
self.channel_type_features().supports_anchors_zero_fee_htlc_tx();
let zero_fee_commitments =
self.channel_type_features().supports_anchor_zero_fee_commitments();
if !zero_fee_htlcs && !zero_fee_commitments {
// Because we're broadcasting a commitment transaction, we should construct the package
// assuming it gets confirmed in the next block. Sadly, we have code which considers
// "not yet confirmed" things as discardable, so we cannot do that here.
let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(
funding, holder_commitment_tx, self.best_block.height,
);
let new_outputs = self.get_broadcasted_holder_watch_outputs(holder_commitment_tx);
if !new_outputs.is_empty() {
watch_outputs.push((holder_commitment_tx.trust().txid(), new_outputs));
}
claimable_outpoints.append(&mut new_outpoints);
}
(claimable_outpoints, watch_outputs)
}
#[rustfmt::skip]
/// Note: For channels where the funding transaction is being manually managed (see
/// [`crate::ln::channelmanager::ChannelManager::funding_transaction_generated_manual_broadcast`]),
/// this method returns without queuing any transactions until the funding transaction has been
/// observed on-chain, unless `require_funding_seen` is `false`. This prevents attempting to
/// broadcast unconfirmable holder commitment transactions before the funding is visible.
/// See also [`ChannelMonitor::broadcast_latest_holder_commitment_txn`].
///
/// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`]: crate::chain::channelmonitor::ChannelMonitor::broadcast_latest_holder_commitment_txn
pub(crate) fn queue_latest_holder_commitment_txn_for_broadcast<B: Deref, F: Deref, L: Deref>(
&mut self, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &WithContext<L>,
require_funding_seen: bool,
)
where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
let reason = ClosureReason::HolderForceClosed {
broadcasted_latest_txn: Some(true),
message: "ChannelMonitor-initiated commitment transaction broadcast".to_owned(),
};
let (claimable_outpoints, _) =
self.generate_claimable_outpoints_and_watch_outputs(Some(reason), require_funding_seen);
// In manual-broadcast mode, if `require_funding_seen` is true and we have not yet observed
// the funding transaction on-chain, do not queue any transactions.
if require_funding_seen && self.is_manual_broadcast && !self.funding_seen_onchain {
log_info!(logger, "Not broadcasting holder commitment for manual-broadcast channel before funding appears on-chain");
return;
}
let conf_target = self.closure_conf_target();
self.onchain_tx_handler.update_claims_view_from_requests(
claimable_outpoints, self.best_block.height, self.best_block.height, broadcaster,
conf_target, &self.destination_script, fee_estimator, logger,
);
}
fn renegotiated_funding<L: Deref>(
&mut self, logger: &WithContext<L>, channel_parameters: &ChannelTransactionParameters,
alternative_holder_commitment_tx: &HolderCommitmentTransaction,
alternative_counterparty_commitment_tx: &CommitmentTransaction,
) -> Result<(), ()>
where
L::Target: Logger,
{
let alternative_counterparty_commitment_txid =
alternative_counterparty_commitment_tx.trust().txid();
// Both the current counterparty commitment and the alternative one share the same set of
// non-dust and dust HTLCs in the same order, though the index of each non-dust HTLC may be
// different.
//
// We clone all HTLCs and their sources to use in the alternative funding scope, and update
// each non-dust HTLC with their corresponding index in the alternative counterparty
// commitment.
let current_counterparty_commitment_htlcs =
if let Some(txid) = &self.funding.current_counterparty_commitment_txid {
self.funding.counterparty_claimable_outpoints.get(txid).unwrap()
} else {
debug_assert!(false);
log_error!(
logger,
"Received funding renegotiation while initial funding negotiation is still pending"
);
return Err(());
};
let mut htlcs_with_sources = current_counterparty_commitment_htlcs.clone();
let alternative_htlcs = alternative_counterparty_commitment_tx.nondust_htlcs();
let expected_non_dust_htlc_count = htlcs_with_sources
.iter()
// Non-dust HTLCs always come first, so the position of the first dust HTLC is equal to
// our non-dust HTLC count.
.position(|(htlc, _)| htlc.transaction_output_index.is_none())
.unwrap_or(htlcs_with_sources.len());
if alternative_htlcs.len() != expected_non_dust_htlc_count {
log_error!(
logger,
"Received alternative counterparty commitment with HTLC count mismatch"
);
return Err(());
}
for (alternative_htlc, (htlc, _)) in
alternative_htlcs.iter().zip(htlcs_with_sources.iter_mut())
{
debug_assert!(htlc.transaction_output_index.is_some());
debug_assert!(alternative_htlc.transaction_output_index.is_some());
if !alternative_htlc.is_data_equal(htlc) {
log_error!(
logger,
"Received alternative counterparty commitment with non-dust HTLC mismatch"
);
return Err(());
}
htlc.transaction_output_index = alternative_htlc.transaction_output_index;
}
let mut counterparty_claimable_outpoints = new_hash_map();
counterparty_claimable_outpoints
.insert(alternative_counterparty_commitment_txid, htlcs_with_sources);
// TODO(splicing): Enforce any necessary RBF validity checks.
let alternative_funding = FundingScope {
channel_parameters: channel_parameters.clone(),
current_counterparty_commitment_txid: Some(alternative_counterparty_commitment_txid),
prev_counterparty_commitment_txid: None,
counterparty_claimable_outpoints,
current_holder_commitment_tx: alternative_holder_commitment_tx.clone(),
prev_holder_commitment_tx: None,
};
let alternative_funding_outpoint = alternative_funding.funding_outpoint();
if self
.pending_funding
.iter()
.any(|funding| funding.funding_txid() == alternative_funding_outpoint.txid)
{
log_error!(
logger,
"Renegotiated funding transaction with a duplicate funding txid {}",
alternative_funding_outpoint.txid
);
return Err(());
}
if let Some(parent_funding_txid) = channel_parameters.splice_parent_funding_txid.as_ref() {
// Only one splice can be negotiated at a time after we've exchanged `channel_ready`
// (implying our funding is confirmed) that spends our currently locked funding.
if !self.pending_funding.is_empty() {
log_error!(
logger,
"Negotiated splice while channel is pending channel_ready/splice_locked"
);
return Err(());
}
if *parent_funding_txid != self.funding.funding_txid() {
log_error!(
logger,
"Negotiated splice that does not spend currently locked funding transaction"
);
return Err(());
}
} else if self.funding.is_splice() {
// If we've already spliced at least once, we're no longer able to RBF the original
// funding transaction.
return Err(());
}
let script_pubkey = channel_parameters.make_funding_redeemscript().to_p2wsh();
self.outputs_to_watch.insert(
alternative_funding_outpoint.txid,
vec![(alternative_funding_outpoint.index as u32, script_pubkey)],
);
self.pending_funding.push(alternative_funding);
Ok(())
}
fn promote_funding(&mut self, new_funding_txid: Txid) -> Result<(), ()> {
let prev_funding_txid = self.funding.funding_txid();
let new_funding = self
.pending_funding
.iter_mut()
.find(|funding| funding.funding_txid() == new_funding_txid);
if new_funding.is_none() {
return Err(());
}
let mut new_funding = new_funding.unwrap();
mem::swap(&mut self.funding, &mut new_funding);
self.onchain_tx_handler.update_after_renegotiated_funding_locked(
self.funding.channel_parameters.clone(),
self.funding.current_holder_commitment_tx.clone(),
self.funding.prev_holder_commitment_tx.clone(),
);
// It's possible that no commitment updates happened during the lifecycle of the pending
// splice's `FundingScope` that was promoted. If so, our `prev_holder_htlc_data` is
// now irrelevant, since there's no valid previous commitment that exists for the current
// funding transaction that could be broadcast.
if self.funding.prev_holder_commitment_tx.is_none() {
self.prev_holder_htlc_data.take();
}
let no_further_updates_allowed = self.no_further_updates_allowed();
// The swap above places the previous `FundingScope` into `pending_funding`.
for funding in self.pending_funding.drain(..) {
let funding_txid = funding.funding_txid();
self.outputs_to_watch.remove(&funding_txid);
if no_further_updates_allowed && funding_txid != prev_funding_txid {
self.pending_events.push(Event::DiscardFunding {
channel_id: self.channel_id,
funding_info: crate::events::FundingInfo::OutPoint {
outpoint: funding.funding_outpoint(),
},
});
}
}
if let Some((alternative_funding_txid, _)) = self.alternative_funding_confirmed.take() {
// In exceedingly rare cases, it's possible there was a reorg that caused a potential funding to
// be locked in that this `ChannelMonitor` has not yet seen. Thus, we avoid a runtime assertion
// and only assert in debug mode.
debug_assert_eq!(alternative_funding_txid, new_funding_txid);
}
Ok(())
}
#[rustfmt::skip]
fn update_monitor<B: Deref, F: Deref, L: Deref>(
&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &WithContext<L>
) -> Result<(), ()>
where B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
if self.latest_update_id == LEGACY_CLOSED_CHANNEL_UPDATE_ID && updates.update_id == LEGACY_CLOSED_CHANNEL_UPDATE_ID {
log_info!(logger, "Applying pre-0.1 post-force-closed update to monitor {} with {} change(s).",
log_funding_info!(self), updates.updates.len());
} else if updates.update_id == LEGACY_CLOSED_CHANNEL_UPDATE_ID {
log_info!(logger, "Applying pre-0.1 force close update to monitor {} with {} change(s).",
log_funding_info!(self), updates.updates.len());
} else {
log_info!(logger, "Applying update to monitor {}, bringing update_id from {} to {} with {} change(s).",
log_funding_info!(self), self.latest_update_id, updates.update_id, updates.updates.len());
}
// ChannelMonitor updates may be applied after force close if we receive a preimage for a
// broadcasted commitment transaction HTLC output that we'd like to claim on-chain. If this
// is the case, we no longer have guaranteed access to the monitor's update ID, so we use a
// sentinel value instead.
//
// The `ChannelManager` may also queue redundant `ChannelForceClosed` updates if it still
// thinks the channel needs to have its commitment transaction broadcast, so we'll allow
// them as well.
if updates.update_id == LEGACY_CLOSED_CHANNEL_UPDATE_ID || self.lockdown_from_offchain {
assert_eq!(updates.updates.len(), 1);
match updates.updates[0] {
ChannelMonitorUpdateStep::ReleasePaymentComplete { .. } => {},
ChannelMonitorUpdateStep::ChannelForceClosed { .. } => {},
// We should have already seen a `ChannelForceClosed` update if we're trying to
// provide a preimage at this point.
ChannelMonitorUpdateStep::PaymentPreimage { .. } =>
debug_assert!(self.lockdown_from_offchain),
_ => {
log_error!(logger, "Attempted to apply post-force-close ChannelMonitorUpdate of type {}", updates.updates[0].variant_name());
panic!("Attempted to apply post-force-close ChannelMonitorUpdate that wasn't providing a payment preimage");
},
}
}
if updates.update_id != LEGACY_CLOSED_CHANNEL_UPDATE_ID {
if self.latest_update_id + 1 != updates.update_id {
panic!("Attempted to apply ChannelMonitorUpdates out of order, check the update_id before passing an update to update_monitor!");
}
}
let mut ret = Ok(());
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&**fee_estimator);
for update in updates.updates.iter() {
match update {
ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs, claimed_htlcs, nondust_htlc_sources } => {
log_trace!(logger, "Updating ChannelMonitor with latest holder commitment transaction info");
if self.lockdown_from_offchain { panic!(); }
if let Err(e) = self.provide_latest_holder_commitment_tx(
commitment_tx.clone(), htlc_outputs, &claimed_htlcs,
nondust_htlc_sources.clone()
) {
log_error!(logger, "Failed updating latest holder commitment transaction info: {}", e);
ret = Err(());
}
}
ChannelMonitorUpdateStep::LatestHolderCommitment {
commitment_txs, htlc_data, claimed_htlcs,
} => {
log_trace!(logger, "Updating ChannelMonitor with {} latest holder commitment(s)", commitment_txs.len());
assert!(!self.lockdown_from_offchain);
if let Err(e) = self.update_holder_commitment_data(
commitment_txs.clone(), htlc_data.clone(), claimed_htlcs,
) {
log_error!(logger, "Failed updating latest holder commitment state: {}", e);
ret = Err(());
}
},
// Soon we will drop the `LatestCounterpartyCommitmentTXInfo` variant in favor of `LatestCounterpartyCommitment`.
// For now we just add the code to handle the new updates.
// Next step: in channel, switch channel monitor updates to use the `LatestCounterpartyCommitment` variant.
ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { commitment_txid, htlc_outputs, commitment_number, their_per_commitment_point, .. } => {
log_trace!(logger, "Updating ChannelMonitor with latest counterparty commitment transaction info");
if self.pending_funding.is_empty() {
self.provide_latest_counterparty_commitment_tx(*commitment_txid, htlc_outputs.clone(), *commitment_number, *their_per_commitment_point)
} else {
log_error!(logger, "Received unexpected non-splice counterparty commitment monitor update");
ret = Err(());
}
},
ChannelMonitorUpdateStep::LatestCounterpartyCommitment {
commitment_txs, htlc_data,
} => {
log_trace!(logger, "Updating ChannelMonitor with {} latest counterparty commitments", commitment_txs.len());
if let Err(e) = self.update_counterparty_commitment_data(commitment_txs, htlc_data) {
log_error!(logger, "Failed updating latest counterparty commitment state: {}", e);
ret = Err(());
}
},
ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage, payment_info } => {
log_trace!(logger, "Updating ChannelMonitor with payment preimage");
self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array()), &payment_preimage, payment_info, broadcaster, &bounded_fee_estimator, logger)
},
ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } => {
log_trace!(logger, "Updating ChannelMonitor with commitment secret");
if let Err(e) = self.provide_secret(*idx, *secret) {
debug_assert!(false, "Latest counterparty commitment secret was invalid");
log_error!(logger, "Providing latest counterparty commitment secret failed/was refused:");
log_error!(logger, " {}", e);
ret = Err(());
}
},
ChannelMonitorUpdateStep::RenegotiatedFunding {
channel_parameters, holder_commitment_tx, counterparty_commitment_tx,
} => {
log_trace!(logger, "Updating ChannelMonitor with alternative holder and counterparty commitment transactions for funding txid {}",
channel_parameters.funding_outpoint.unwrap().txid);
if let Err(_) = self.renegotiated_funding(
logger, channel_parameters, holder_commitment_tx, counterparty_commitment_tx,
) {
ret = Err(());
}
},
ChannelMonitorUpdateStep::RenegotiatedFundingLocked { funding_txid } => {
log_trace!(logger, "Updating ChannelMonitor with locked renegotiated funding txid {}", funding_txid);
if let Err(_) = self.promote_funding(*funding_txid) {
log_error!(logger, "Unknown funding with txid {} became locked", funding_txid);
ret = Err(());
}
},
ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } => {
log_trace!(logger, "Updating ChannelMonitor: channel force closed, should broadcast: {}", should_broadcast);
self.lockdown_from_offchain = true;
if *should_broadcast {
// There's no need to broadcast our commitment transaction if we've seen one
// confirmed (even with 1 confirmation) as it'll be rejected as
// duplicate/conflicting.
let detected_funding_spend = self.funding_spend_confirmed.is_some() ||
self.onchain_events_awaiting_threshold_conf.iter().any(
|event| matches!(event.event, OnchainEvent::FundingSpendConfirmation { .. }));
if detected_funding_spend {
log_trace!(logger, "Avoiding commitment broadcast, already detected confirmed spend onchain");
continue;
}
self.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &bounded_fee_estimator, logger, true);
} else if !self.holder_tx_signed {
log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast");
log_error!(logger, " in channel monitor for channel {}!", &self.channel_id());
log_error!(logger, " Read the docs for ChannelMonitor::broadcast_latest_holder_commitment_txn to take manual action!");
} else {
// If we generated a MonitorEvent::HolderForceClosed, the ChannelManager
// will still give us a ChannelForceClosed event with !should_broadcast, but we
// shouldn't print the scary warning above.
log_info!(logger, "Channel off-chain state closed after we broadcasted our latest commitment transaction.");
}
},
ChannelMonitorUpdateStep::ShutdownScript { scriptpubkey } => {
log_trace!(logger, "Updating ChannelMonitor with shutdown script");
if let Some(shutdown_script) = self.shutdown_script.replace(scriptpubkey.clone()) {
panic!("Attempted to replace shutdown script {} with {}", shutdown_script, scriptpubkey);
}
},
ChannelMonitorUpdateStep::ReleasePaymentComplete { htlc } => {
log_trace!(logger, "HTLC {htlc:?} permanently and fully resolved");
self.htlcs_resolved_to_user.insert(*htlc);
},
}
}
#[cfg(debug_assertions)] {
self.counterparty_commitment_txs_from_update(updates);
}
self.latest_update_id = updates.update_id;
// Refuse updates after we've detected a spend onchain (or if the channel was otherwise
// closed), but only if the update isn't the kind of update we expect to see after channel
// closure.
let mut is_pre_close_update = false;
for update in updates.updates.iter() {
match update {
ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { .. }
|ChannelMonitorUpdateStep::LatestHolderCommitment { .. }
|ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { .. }
|ChannelMonitorUpdateStep::LatestCounterpartyCommitment { .. }
|ChannelMonitorUpdateStep::ShutdownScript { .. }
|ChannelMonitorUpdateStep::CommitmentSecret { .. }
|ChannelMonitorUpdateStep::RenegotiatedFunding { .. }
|ChannelMonitorUpdateStep::RenegotiatedFundingLocked { .. } =>
is_pre_close_update = true,
// After a channel is closed, we don't communicate with our peer about it, so the
// only things we will update is getting a new preimage (from a different channel),
// being told that the channel is closed, or being told a payment which was
// resolved on-chain has had its resolution communicated to the user. All other
// updates are generated while talking to our peer.
ChannelMonitorUpdateStep::PaymentPreimage { .. } => {},
ChannelMonitorUpdateStep::ChannelForceClosed { .. } => {},
ChannelMonitorUpdateStep::ReleasePaymentComplete { .. } => {},
}
}
if ret.is_ok() && self.no_further_updates_allowed() && is_pre_close_update {
log_error!(logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent");
Err(())
} else { ret }
}
/// Returns true if the channel has been closed (i.e. no further updates are allowed) and no
/// commitment state updates ever happened.
fn is_closed_without_updates(&self) -> bool {
let mut commitment_not_advanced =
self.current_counterparty_commitment_number == INITIAL_COMMITMENT_NUMBER;
commitment_not_advanced &=
self.current_holder_commitment_number == INITIAL_COMMITMENT_NUMBER;
(self.holder_tx_signed || self.lockdown_from_offchain) && commitment_not_advanced
}
fn no_further_updates_allowed(&self) -> bool {
self.funding_spend_seen || self.lockdown_from_offchain || self.holder_tx_signed
}
fn get_latest_update_id(&self) -> u64 {
self.latest_update_id
}
/// Returns the outpoint we are currently monitoring the chain for spends. This will change for
/// every splice that has reached its intended confirmation depth.
#[rustfmt::skip]
fn get_funding_txo(&self) -> OutPoint {
self.funding.channel_parameters.funding_outpoint
.expect("Funding outpoint must be set for active monitor")
}
/// Returns the P2WSH script we are currently monitoring the chain for spends. This will change
/// for every splice that has reached its intended confirmation depth.
fn get_funding_script(&self) -> ScriptBuf {
self.funding.channel_parameters.make_funding_redeemscript().to_p2wsh()
}
pub fn channel_id(&self) -> ChannelId {
self.channel_id
}
fn get_outputs_to_watch(&self) -> &HashMap<Txid, Vec<(u32, ScriptBuf)>> {
// If we've detected a counterparty commitment tx on chain, we must include it in the set
// of outputs to watch for spends of, otherwise we're likely to lose user funds. Because
// its trivial to do, double-check that here.
for txid in self.counterparty_commitment_txn_on_chain.keys() {
self.outputs_to_watch.get(txid).expect("Counterparty commitment txn which have been broadcast should have outputs registered");
}
&self.outputs_to_watch
}
fn get_and_clear_pending_monitor_events(&mut self) -> Vec<MonitorEvent> {
let mut ret = Vec::new();
mem::swap(&mut ret, &mut self.pending_monitor_events);
ret
}
/// Gets the set of events that are repeated regularly (e.g. those which RBF bump
/// transactions). We're okay if we lose these on restart as they'll be regenerated for us at
/// some regular interval via [`ChannelMonitor::rebroadcast_pending_claims`].
#[rustfmt::skip]
pub(super) fn get_repeated_events(&mut self) -> Vec<Event> {
let pending_claim_events = self.onchain_tx_handler.get_and_clear_pending_claim_events();
let mut ret = Vec::with_capacity(pending_claim_events.len());
for (claim_id, claim_event) in pending_claim_events {
match claim_event {
ClaimEvent::BumpCommitment {
package_target_feerate_sat_per_1000_weight, commitment_tx,
commitment_tx_fee_satoshis, pending_nondust_htlcs, anchor_output_idx,
channel_parameters,
} => {
let channel_id = self.channel_id;
let counterparty_node_id = self.counterparty_node_id;
let commitment_txid = commitment_tx.compute_txid();
ret.push(Event::BumpTransaction(BumpTransactionEvent::ChannelClose {
channel_id,
counterparty_node_id,
claim_id,
package_target_feerate_sat_per_1000_weight,
anchor_descriptor: AnchorDescriptor {
channel_derivation_parameters: ChannelDerivationParameters {
keys_id: self.channel_keys_id,
value_satoshis: channel_parameters.channel_value_satoshis,
transaction_parameters: channel_parameters,
},
outpoint: BitcoinOutPoint {
txid: commitment_txid,
vout: anchor_output_idx,
},
value: commitment_tx.output[anchor_output_idx as usize].value,
},
pending_htlcs: pending_nondust_htlcs,
commitment_tx,
commitment_tx_fee_satoshis,
}));
},
ClaimEvent::BumpHTLC {
target_feerate_sat_per_1000_weight, htlcs, tx_lock_time,
} => {
let channel_id = self.channel_id;
let counterparty_node_id = self.counterparty_node_id;
ret.push(Event::BumpTransaction(BumpTransactionEvent::HTLCResolution {
channel_id,
counterparty_node_id,
claim_id,
target_feerate_sat_per_1000_weight,
htlc_descriptors: htlcs,
tx_lock_time,
}));
}
}
}
ret
}
fn initial_counterparty_commitment_tx(&mut self) -> Option<CommitmentTransaction> {
self.initial_counterparty_commitment_tx.clone().or_else(|| {
// This provides forward compatibility; an old monitor will not contain the full
// transaction; only enough information to rebuild it
self.initial_counterparty_commitment_info.map(
|(
their_per_commitment_point,
feerate_per_kw,
to_broadcaster_value,
to_countersignatory_value,
)| {
let nondust_htlcs = vec![];
// Since we're expected to only reach here during the initial persistence of a
// monitor (i.e., via [`Persist::persist_new_channel`]), we expect to only have
// one `FundingScope` present.
debug_assert!(self.pending_funding.is_empty());
let channel_parameters = &self.funding.channel_parameters;
let commitment_tx = self.build_counterparty_commitment_tx(
channel_parameters,
INITIAL_COMMITMENT_NUMBER,
&their_per_commitment_point,
to_broadcaster_value,
to_countersignatory_value,
feerate_per_kw,
nondust_htlcs,
);
// Take the opportunity to populate this recently introduced field
self.initial_counterparty_commitment_tx = Some(commitment_tx.clone());
commitment_tx
},
)
})
}
#[rustfmt::skip]
fn build_counterparty_commitment_tx(
&self, channel_parameters: &ChannelTransactionParameters, commitment_number: u64,
their_per_commitment_point: &PublicKey, to_broadcaster_value: u64,
to_countersignatory_value: u64, feerate_per_kw: u32,
nondust_htlcs: Vec<HTLCOutputInCommitment>
) -> CommitmentTransaction {
let channel_parameters = &channel_parameters.as_counterparty_broadcastable();
CommitmentTransaction::new(commitment_number, their_per_commitment_point,
to_broadcaster_value, to_countersignatory_value, feerate_per_kw, nondust_htlcs, channel_parameters, &self.onchain_tx_handler.secp_ctx)
}
#[rustfmt::skip]
fn counterparty_commitment_txs_from_update(&self, update: &ChannelMonitorUpdate) -> Vec<CommitmentTransaction> {
update.updates.iter().filter_map(|update| {
// Soon we will drop the first branch here in favor of the second.
// In preparation, we just add the second branch without deleting the first.
// Next step: in channel, switch channel monitor updates to use the `LatestCounterpartyCommitment` variant.
match update {
&ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { commitment_txid,
ref htlc_outputs, commitment_number, their_per_commitment_point,
feerate_per_kw: Some(feerate_per_kw),
to_broadcaster_value_sat: Some(to_broadcaster_value),
to_countersignatory_value_sat: Some(to_countersignatory_value) } => {
let nondust_htlcs = htlc_outputs.iter().filter_map(|(htlc, _)| {
htlc.transaction_output_index.map(|_| htlc).cloned()
}).collect::<Vec<_>>();
// This monitor update variant is only applicable while there's a single
// `FundingScope` active, otherwise we expect to see
// `LatestCounterpartyCommitment` instead.
debug_assert!(self.pending_funding.is_empty());
let channel_parameters = &self.funding.channel_parameters;
let commitment_tx = self.build_counterparty_commitment_tx(
channel_parameters,
commitment_number,
&their_per_commitment_point,
to_broadcaster_value,
to_countersignatory_value,
feerate_per_kw,
nondust_htlcs,
);
debug_assert_eq!(commitment_tx.trust().txid(), commitment_txid);
Some(vec![commitment_tx])
},
&ChannelMonitorUpdateStep::LatestCounterpartyCommitment { ref commitment_txs, .. } => {
Some(commitment_txs.clone())
},
&ChannelMonitorUpdateStep::RenegotiatedFunding { ref counterparty_commitment_tx, .. } => {
Some(vec![counterparty_commitment_tx.clone()])
},
_ => None,
}
}).flatten().collect()
}
#[rustfmt::skip]
fn sign_to_local_justice_tx(
&self, mut justice_tx: Transaction, input_idx: usize, value: u64, commitment_number: u64
) -> Result<Transaction, ()> {
let secret = self.get_secret(commitment_number).ok_or(())?;
let per_commitment_key = SecretKey::from_slice(&secret).map_err(|_| ())?;
let their_per_commitment_point = PublicKey::from_secret_key(
&self.onchain_tx_handler.secp_ctx, &per_commitment_key);
let revocation_pubkey = RevocationKey::from_basepoint(&self.onchain_tx_handler.secp_ctx,
&self.holder_revocation_basepoint, &their_per_commitment_point);
let delayed_key = DelayedPaymentKey::from_basepoint(&self.onchain_tx_handler.secp_ctx,
&self.counterparty_commitment_params.counterparty_delayed_payment_base_key, &their_per_commitment_point);
let revokeable_redeemscript = chan_utils::get_revokeable_redeemscript(&revocation_pubkey,
self.counterparty_commitment_params.on_counterparty_tx_csv, &delayed_key);
let commitment_txid = &justice_tx.input[input_idx].previous_output.txid;
// Since there may be multiple counterparty commitment transactions for the same commitment
// number due to splicing, we have to locate the matching `FundingScope::channel_parameters`
// to provide the signer. Since this is intended to be called during
// `Persist::update_persisted_channel`, the monitor should have already had the update
// applied.
let channel_parameters = core::iter::once(&self.funding)
.chain(&self.pending_funding)
.find(|funding| funding.counterparty_claimable_outpoints.contains_key(commitment_txid))
.map(|funding| &funding.channel_parameters)
.ok_or(())?;
let sig = self.onchain_tx_handler.signer.sign_justice_revoked_output(
&channel_parameters, &justice_tx, input_idx, value, &per_commitment_key,
&self.onchain_tx_handler.secp_ctx,
)?;
justice_tx.input[input_idx].witness.push_ecdsa_signature(&BitcoinSignature::sighash_all(sig));
justice_tx.input[input_idx].witness.push(&[1u8]);
justice_tx.input[input_idx].witness.push(revokeable_redeemscript.as_bytes());
Ok(justice_tx)
}
/// Can only fail if idx is < get_min_seen_secret
fn get_secret(&self, idx: u64) -> Option<[u8; 32]> {
self.commitment_secrets.get_secret(idx)
}
fn get_min_seen_secret(&self) -> u64 {
self.commitment_secrets.get_min_seen_secret()
}
fn get_cur_counterparty_commitment_number(&self) -> u64 {
self.current_counterparty_commitment_number
}
fn get_cur_holder_commitment_number(&self) -> u64 {
self.current_holder_commitment_number
}
/// Attempts to claim a counterparty commitment transaction's outputs using the revocation key and
/// data in counterparty_claimable_outpoints. Will directly claim any HTLC outputs which expire at a
/// height > height + CLTV_SHARED_CLAIM_BUFFER. In any case, will install monitoring for
/// HTLC-Success/HTLC-Timeout transactions.
///
/// Returns packages to claim the revoked output(s) and general information about the output that
/// is to the counterparty in the commitment transaction.
#[rustfmt::skip]
fn check_spend_counterparty_transaction<L: Deref>(&mut self, commitment_txid: Txid, commitment_tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L)
-> (Vec<PackageTemplate>, CommitmentTxCounterpartyOutputInfo)
where L::Target: Logger {
// Most secp and related errors trying to create keys means we have no hope of constructing
// a spend transaction...so we return no transactions to broadcast
let mut claimable_outpoints = Vec::new();
let mut to_counterparty_output_info = None;
let funding_spent = get_confirmed_funding_scope!(self);
let per_commitment_option = funding_spent.counterparty_claimable_outpoints.get(&commitment_txid);
macro_rules! ignore_error {
( $thing : expr ) => {
match $thing {
Ok(a) => a,
Err(_) => return (claimable_outpoints, to_counterparty_output_info)
}
};
}
let funding_txid_spent = commitment_tx.input[0].previous_output.txid;
let commitment_number = 0xffffffffffff - ((((commitment_tx.input[0].sequence.0 as u64 & 0xffffff) << 3*8) | (commitment_tx.lock_time.to_consensus_u32() as u64 & 0xffffff)) ^ self.commitment_transaction_number_obscure_factor);
if commitment_number >= self.get_min_seen_secret() {
assert_eq!(funding_spent.funding_txid(), funding_txid_spent);
let secret = self.get_secret(commitment_number).unwrap();
let per_commitment_key = ignore_error!(SecretKey::from_slice(&secret));
let per_commitment_point = PublicKey::from_secret_key(&self.onchain_tx_handler.secp_ctx, &per_commitment_key);
let revocation_pubkey = RevocationKey::from_basepoint(&self.onchain_tx_handler.secp_ctx, &self.holder_revocation_basepoint, &per_commitment_point,);
let delayed_key = DelayedPaymentKey::from_basepoint(&self.onchain_tx_handler.secp_ctx, &self.counterparty_commitment_params.counterparty_delayed_payment_base_key, &PublicKey::from_secret_key(&self.onchain_tx_handler.secp_ctx, &per_commitment_key));
let revokeable_redeemscript = chan_utils::get_revokeable_redeemscript(&revocation_pubkey, self.counterparty_commitment_params.on_counterparty_tx_csv, &delayed_key);
let revokeable_p2wsh = revokeable_redeemscript.to_p2wsh();
// First, process non-htlc outputs (to_holder & to_counterparty)
for (idx, outp) in commitment_tx.output.iter().enumerate() {
if outp.script_pubkey == revokeable_p2wsh {
let revk_outp = RevokedOutput::build(
per_commitment_point, per_commitment_key, outp.value,
funding_spent.channel_parameters.clone(), height,
);
let justice_package = PackageTemplate::build_package(
commitment_txid, idx as u32,
PackageSolvingData::RevokedOutput(revk_outp),
height + self.counterparty_commitment_params.on_counterparty_tx_csv as u32,
);
claimable_outpoints.push(justice_package);
to_counterparty_output_info =
Some((idx.try_into().expect("Txn can't have more than 2^32 outputs"), outp.value));
}
}
// Then, try to find revoked htlc outputs
if let Some(per_commitment_claimable_data) = per_commitment_option {
for (htlc, _) in per_commitment_claimable_data {
if let Some(transaction_output_index) = htlc.transaction_output_index {
if transaction_output_index as usize >= commitment_tx.output.len() ||
commitment_tx.output[transaction_output_index as usize].value != htlc.to_bitcoin_amount() {
// per_commitment_data is corrupt or our commitment signing key leaked!
return (claimable_outpoints, to_counterparty_output_info);
}
let revk_htlc_outp = RevokedHTLCOutput::build(
per_commitment_point, per_commitment_key, htlc.clone(),
funding_spent.channel_parameters.clone(), height,
);
let counterparty_spendable_height = if htlc.offered {
htlc.cltv_expiry
} else {
height
};
let justice_package = PackageTemplate::build_package(
commitment_txid,
transaction_output_index,
PackageSolvingData::RevokedHTLCOutput(revk_htlc_outp),
counterparty_spendable_height,
);
claimable_outpoints.push(justice_package);
}
}
}
// Last, track onchain revoked commitment transaction and fail backward outgoing HTLCs as payment path is broken
if !claimable_outpoints.is_empty() || per_commitment_option.is_some() { // ie we're confident this is actually ours
// We're definitely a counterparty commitment transaction!
log_error!(logger, "Got broadcast of revoked counterparty commitment transaction, going to generate general spend tx with {} inputs", claimable_outpoints.len());
self.counterparty_commitment_txn_on_chain.insert(commitment_txid, commitment_number);
if let Some(per_commitment_claimable_data) = per_commitment_option {
fail_unbroadcast_htlcs!(self, "revoked_counterparty", commitment_txid, commitment_tx, height,
block_hash, per_commitment_claimable_data.iter().map(|(htlc, htlc_source)|
(htlc, htlc_source.as_ref().map(|htlc_source| htlc_source.as_ref()))
), logger);
} else {
// Our fuzzers aren't constrained by pesky things like valid signatures, so can
// spend our funding output with a transaction which doesn't match our past
// commitment transactions. Thus, we can only debug-assert here when not
// fuzzing.
debug_assert!(cfg!(fuzzing), "We should have per-commitment option for any recognized old commitment txn");
fail_unbroadcast_htlcs!(self, "revoked counterparty", commitment_txid, commitment_tx, height,
block_hash, [].iter().map(|reference| *reference), logger);
}
}
} else if let Some(per_commitment_claimable_data) = per_commitment_option {
assert_eq!(funding_spent.funding_txid(), funding_txid_spent);
// While this isn't useful yet, there is a potential race where if a counterparty
// revokes a state at the same time as the commitment transaction for that state is
// confirmed, and the watchtower receives the block before the user, the user could
// upload a new ChannelMonitor with the revocation secret but the watchtower has
// already processed the block, resulting in the counterparty_commitment_txn_on_chain entry
// not being generated by the above conditional. Thus, to be safe, we go ahead and
// insert it here.
self.counterparty_commitment_txn_on_chain.insert(commitment_txid, commitment_number);
log_info!(logger, "Got broadcast of non-revoked counterparty commitment transaction {}", commitment_txid);
fail_unbroadcast_htlcs!(self, "counterparty", commitment_txid, commitment_tx, height, block_hash,
per_commitment_claimable_data.iter().map(|(htlc, htlc_source)|
(htlc, htlc_source.as_ref().map(|htlc_source| htlc_source.as_ref()))
), logger);
let (htlc_claim_reqs, counterparty_output_info) =
self.get_counterparty_output_claim_info(funding_spent, commitment_number, commitment_txid, commitment_tx, per_commitment_claimable_data, Some(height));
to_counterparty_output_info = counterparty_output_info;
for req in htlc_claim_reqs {
claimable_outpoints.push(req);
}
}
(claimable_outpoints, to_counterparty_output_info)
}
fn get_point_for_commitment_number(&self, commitment_number: u64) -> Option<PublicKey> {
let per_commitment_points = &self.their_cur_per_commitment_points?;
// If the counterparty commitment tx is the latest valid state, use their latest
// per-commitment point
if per_commitment_points.0 == commitment_number {
Some(per_commitment_points.1)
} else if let Some(point) = per_commitment_points.2.as_ref() {
// If counterparty commitment tx is the state previous to the latest valid state, use
// their previous per-commitment point (non-atomicity of revocation means it's valid for
// them to temporarily have two valid commitment txns from our viewpoint)
if per_commitment_points.0 == commitment_number + 1 {
Some(*point)
} else {
None
}
} else {
None
}
}
fn get_counterparty_output_claims_for_preimage(
&self, preimage: PaymentPreimage, funding_spent: &FundingScope, commitment_number: u64,
commitment_txid: Txid,
per_commitment_option: Option<&Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>>,
confirmation_height: Option<u32>,
) -> Vec<PackageTemplate> {
let per_commitment_claimable_data = match per_commitment_option {
Some(outputs) => outputs,
None => return Vec::new(),
};
let per_commitment_point = match self.get_point_for_commitment_number(commitment_number) {
Some(point) => point,
None => return Vec::new(),
};
let matching_payment_hash = PaymentHash::from(preimage);
per_commitment_claimable_data
.iter()
.filter_map(|(htlc, _)| {
if let Some(transaction_output_index) = htlc.transaction_output_index {
if htlc.offered && htlc.payment_hash == matching_payment_hash {
let htlc_data = PackageSolvingData::CounterpartyOfferedHTLCOutput(
CounterpartyOfferedHTLCOutput::build(
per_commitment_point,
preimage,
htlc.clone(),
funding_spent.channel_parameters.clone(),
confirmation_height,
),
);
Some(PackageTemplate::build_package(
commitment_txid,
transaction_output_index,
htlc_data,
htlc.cltv_expiry,
))
} else {
None
}
} else {
None
}
})
.collect()
}
/// Returns the HTLC claim package templates and the counterparty output info
fn get_counterparty_output_claim_info(
&self, funding_spent: &FundingScope, commitment_number: u64, commitment_txid: Txid,
tx: &Transaction,
per_commitment_claimable_data: &[(HTLCOutputInCommitment, Option<Box<HTLCSource>>)],
confirmation_height: Option<u32>,
) -> (Vec<PackageTemplate>, CommitmentTxCounterpartyOutputInfo) {
let mut claimable_outpoints = Vec::new();
let mut to_counterparty_output_info: CommitmentTxCounterpartyOutputInfo = None;
let per_commitment_point = match self.get_point_for_commitment_number(commitment_number) {
Some(point) => point,
None => return (claimable_outpoints, to_counterparty_output_info),
};
let revocation_pubkey = RevocationKey::from_basepoint(
&self.onchain_tx_handler.secp_ctx,
&self.holder_revocation_basepoint,
&per_commitment_point,
);
let delayed_key = DelayedPaymentKey::from_basepoint(
&self.onchain_tx_handler.secp_ctx,
&self.counterparty_commitment_params.counterparty_delayed_payment_base_key,
&per_commitment_point,
);
let revokeable_p2wsh = chan_utils::get_revokeable_redeemscript(
&revocation_pubkey,
self.counterparty_commitment_params.on_counterparty_tx_csv,
&delayed_key,
)
.to_p2wsh();
for (idx, outp) in tx.output.iter().enumerate() {
if outp.script_pubkey == revokeable_p2wsh {
to_counterparty_output_info =
Some((idx.try_into().expect("Can't have > 2^32 outputs"), outp.value));
}
}
for &(ref htlc, _) in per_commitment_claimable_data.iter() {
if let Some(transaction_output_index) = htlc.transaction_output_index {
if transaction_output_index as usize >= tx.output.len()
|| tx.output[transaction_output_index as usize].value
!= htlc.to_bitcoin_amount()
{
// per_commitment_data is corrupt or our commitment signing key leaked!
return (claimable_outpoints, to_counterparty_output_info);
}
let preimage = if htlc.offered {
if let Some((p, _)) = self.payment_preimages.get(&htlc.payment_hash) {
Some(*p)
} else {
None
}
} else {
None
};
if preimage.is_some() || !htlc.offered {
let counterparty_htlc_outp = if htlc.offered {
PackageSolvingData::CounterpartyOfferedHTLCOutput(
CounterpartyOfferedHTLCOutput::build(
per_commitment_point,
preimage.unwrap(),
htlc.clone(),
funding_spent.channel_parameters.clone(),
confirmation_height,
),
)
} else {
PackageSolvingData::CounterpartyReceivedHTLCOutput(
CounterpartyReceivedHTLCOutput::build(
per_commitment_point,
htlc.clone(),
funding_spent.channel_parameters.clone(),
confirmation_height,
),
)
};
let counterparty_package = PackageTemplate::build_package(
commitment_txid,
transaction_output_index,
counterparty_htlc_outp,
htlc.cltv_expiry,
);
claimable_outpoints.push(counterparty_package);
}
}
}
(claimable_outpoints, to_counterparty_output_info)
}
/// Attempts to claim a counterparty HTLC-Success/HTLC-Timeout's outputs using the revocation key
#[rustfmt::skip]
fn check_spend_counterparty_htlc<L: Deref>(
&mut self, tx: &Transaction, commitment_number: u64, commitment_txid: &Txid, height: u32, logger: &L
) -> (Vec<PackageTemplate>, Option<TransactionOutputs>) where L::Target: Logger {
let secret = if let Some(secret) = self.get_secret(commitment_number) { secret } else { return (Vec::new(), None); };
let per_commitment_key = match SecretKey::from_slice(&secret) {
Ok(key) => key,
Err(_) => return (Vec::new(), None)
};
let per_commitment_point = PublicKey::from_secret_key(&self.onchain_tx_handler.secp_ctx, &per_commitment_key);
let funding_spent = get_confirmed_funding_scope!(self);
debug_assert!(funding_spent.counterparty_claimable_outpoints.contains_key(commitment_txid));
let htlc_txid = tx.compute_txid();
let mut claimable_outpoints = vec![];
let mut outputs_to_watch = None;
// Previously, we would only claim HTLCs from revoked HTLC transactions if they had 1 input
// with a witness of 5 elements and 1 output. This wasn't enough for anchor outputs, as the
// counterparty can now aggregate multiple HTLCs into a single transaction thanks to
// `SIGHASH_SINGLE` remote signatures, leading us to not claim any HTLCs upon seeing a
// confirmed revoked HTLC transaction (for more details, see
// https://lists.linuxfoundation.org/pipermail/lightning-dev/2022-April/003561.html).
//
// We make sure we're not vulnerable to this case by checking all inputs of the transaction,
// and claim those which spend the commitment transaction, have a witness of 5 elements, and
// have a corresponding output at the same index within the transaction.
for (idx, input) in tx.input.iter().enumerate() {
if input.previous_output.txid == *commitment_txid && input.witness.len() == 5 && tx.output.get(idx).is_some() {
log_error!(logger, "Got broadcast of revoked counterparty HTLC transaction, spending {}:{}", htlc_txid, idx);
let revk_outp = RevokedOutput::build(
per_commitment_point, per_commitment_key, tx.output[idx].value,
self.funding.channel_parameters.clone(), height,
);
let justice_package = PackageTemplate::build_package(
htlc_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp),
height + self.counterparty_commitment_params.on_counterparty_tx_csv as u32,
);
claimable_outpoints.push(justice_package);
if outputs_to_watch.is_none() {
outputs_to_watch = Some((htlc_txid, vec![]));
}
outputs_to_watch.as_mut().unwrap().1.push((idx as u32, tx.output[idx].clone()));
}
}
(claimable_outpoints, outputs_to_watch)
}
#[rustfmt::skip]
fn get_broadcasted_holder_htlc_descriptors(
&self, funding: &FundingScope, holder_tx: &HolderCommitmentTransaction,
) -> Vec<HTLCDescriptor> {
let tx = holder_tx.trust();
let mut htlcs = Vec::with_capacity(holder_tx.nondust_htlcs().len());
debug_assert_eq!(holder_tx.nondust_htlcs().len(), holder_tx.counterparty_htlc_sigs.len());
for (htlc, counterparty_sig) in holder_tx.nondust_htlcs().iter().zip(holder_tx.counterparty_htlc_sigs.iter()) {
assert!(htlc.transaction_output_index.is_some(), "Expected transaction output index for non-dust HTLC");
let preimage = if htlc.offered {
None
} else if let Some((preimage, _)) = self.payment_preimages.get(&htlc.payment_hash) {
Some(*preimage)
} else {
// We can't build an HTLC-Success transaction without the preimage
continue;
};
htlcs.push(HTLCDescriptor {
channel_derivation_parameters: ChannelDerivationParameters {
value_satoshis: funding.channel_parameters.channel_value_satoshis,
keys_id: self.channel_keys_id,
transaction_parameters: funding.channel_parameters.clone(),
},
commitment_txid: tx.txid(),
per_commitment_number: tx.commitment_number(),
per_commitment_point: tx.per_commitment_point(),
feerate_per_kw: tx.negotiated_feerate_per_kw(),
htlc: htlc.clone(),
preimage,
counterparty_sig: *counterparty_sig,
});
}
htlcs
}
// Returns (1) `PackageTemplate`s that can be given to the OnchainTxHandler, so that the handler can
// broadcast transactions claiming holder HTLC commitment outputs and (2) a holder revokable
// script so we can detect whether a holder transaction has been seen on-chain.
#[rustfmt::skip]
fn get_broadcasted_holder_claims(
&self, funding: &FundingScope, holder_tx: &HolderCommitmentTransaction, conf_height: u32,
) -> (Vec<PackageTemplate>, Option<(ScriptBuf, PublicKey, RevocationKey)>) {
let tx = holder_tx.trust();
let keys = tx.keys();
let redeem_script = chan_utils::get_revokeable_redeemscript(
&keys.revocation_key, self.on_holder_tx_csv, &keys.broadcaster_delayed_payment_key,
);
let broadcasted_holder_revokable_script = Some((
redeem_script.to_p2wsh(), holder_tx.per_commitment_point(), keys.revocation_key.clone(),
));
let claim_requests = self.get_broadcasted_holder_htlc_descriptors(funding, holder_tx).into_iter()
.map(|htlc_descriptor| {
let counterparty_spendable_height = if htlc_descriptor.htlc.offered {
conf_height
} else {
htlc_descriptor.htlc.cltv_expiry
};
let transaction_output_index = htlc_descriptor.htlc.transaction_output_index
.expect("Expected transaction output index for non-dust HTLC");
PackageTemplate::build_package(
tx.txid(), transaction_output_index,
PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build(htlc_descriptor, conf_height)),
counterparty_spendable_height,
)
})
.collect();
(claim_requests, broadcasted_holder_revokable_script)
}
// Returns holder HTLC outputs to watch and react to in case of spending.
#[rustfmt::skip]
fn get_broadcasted_holder_watch_outputs(&self, holder_tx: &HolderCommitmentTransaction) -> Vec<(u32, TxOut)> {
let mut watch_outputs = Vec::with_capacity(holder_tx.nondust_htlcs().len());
let tx = holder_tx.trust();
for htlc in holder_tx.nondust_htlcs() {
if let Some(transaction_output_index) = htlc.transaction_output_index {
watch_outputs.push((
transaction_output_index,
tx.built_transaction().transaction.output[transaction_output_index as usize].clone(),
));
} else {
debug_assert!(false, "Expected transaction output index for non-dust HTLC");
}
}
watch_outputs
}
/// Attempts to claim any claimable HTLCs in a commitment transaction which was not (yet)
/// revoked using data in holder_claimable_outpoints.
/// Should not be used if check_spend_revoked_transaction succeeds.
/// Returns None unless the transaction is definitely one of our commitment transactions.
fn check_spend_holder_transaction<L: Deref>(
&mut self, commitment_txid: Txid, commitment_tx: &Transaction, height: u32,
block_hash: &BlockHash, logger: &L,
) -> Option<(Vec<PackageTemplate>, TransactionOutputs)>
where
L::Target: Logger,
{
let funding_spent = get_confirmed_funding_scope!(self);
// HTLCs set may differ between last and previous holder commitment txn, in case of one them hitting chain, ensure we cancel all HTLCs backward
let holder_commitment_tx = Some((&funding_spent.current_holder_commitment_tx, true))
.filter(|(current_holder_commitment_tx, _)| {
current_holder_commitment_tx.trust().txid() == commitment_txid
})
.or_else(|| {
funding_spent
.prev_holder_commitment_tx
.as_ref()
.map(|prev_holder_commitment_tx| (prev_holder_commitment_tx, false))
.filter(|(prev_holder_commitment_tx, _)| {
prev_holder_commitment_tx.trust().txid() == commitment_txid
})
});
if let Some((holder_commitment_tx, current)) = holder_commitment_tx {
let funding_txid_spent = commitment_tx.input[0].previous_output.txid;
assert_eq!(funding_spent.funding_txid(), funding_txid_spent);
let current_msg = if current { "latest holder" } else { "previous holder" };
log_info!(logger, "Got broadcast of {current_msg} commitment tx {commitment_txid}, searching for available HTLCs to claim");
let (claim_requests, broadcasted_holder_revokable_script) =
self.get_broadcasted_holder_claims(funding_spent, holder_commitment_tx, height);
self.broadcasted_holder_revokable_script = broadcasted_holder_revokable_script;
let watch_outputs = self.get_broadcasted_holder_watch_outputs(holder_commitment_tx);
if current {
fail_unbroadcast_htlcs!(
self,
current_msg,
commitment_txid,
commitment_tx,
height,
block_hash,
holder_commitment_htlcs!(self, CURRENT_WITH_SOURCES),
logger
);
} else {
fail_unbroadcast_htlcs!(
self,
current_msg,
commitment_txid,
commitment_tx,
height,
block_hash,
holder_commitment_htlcs!(self, PREV_WITH_SOURCES).unwrap(),
logger
);
}
Some((claim_requests, (commitment_txid, watch_outputs)))
} else {
None
}
}
/// Cancels any existing pending claims for a commitment that previously confirmed and has now
/// been replaced by another.
#[rustfmt::skip]
pub fn cancel_prev_commitment_claims<L: Deref>(
&mut self, logger: &L, confirmed_commitment_txid: &Txid
) where L::Target: Logger {
for (counterparty_commitment_txid, _) in &self.counterparty_commitment_txn_on_chain {
// Cancel any pending claims for counterparty commitments we've seen confirm.
if counterparty_commitment_txid == confirmed_commitment_txid {
continue;
}
// If we have generated claims for counterparty_commitment_txid earlier, we can rely on always
// having claim related htlcs for counterparty_commitment_txid in counterparty_claimable_outpoints.
for funding in core::iter::once(&self.funding).chain(self.pending_funding.iter()) {
let mut found_claim = false;
for (htlc, _) in funding.counterparty_claimable_outpoints.get(counterparty_commitment_txid).unwrap_or(&vec![]) {
let mut outpoint = BitcoinOutPoint { txid: *counterparty_commitment_txid, vout: 0 };
if let Some(vout) = htlc.transaction_output_index {
outpoint.vout = vout;
if self.onchain_tx_handler.abandon_claim(&outpoint) {
found_claim = true;
}
}
}
if found_claim {
log_trace!(logger, "Canceled claims for previously confirmed counterparty commitment with txid {counterparty_commitment_txid}");
}
}
}
// Cancel any pending claims for any holder commitments in case they had previously
// confirmed or been signed (in which case we will start attempting to claim without
// waiting for confirmation).
for funding in core::iter::once(&self.funding).chain(self.pending_funding.iter()) {
if funding.current_holder_commitment_tx.trust().txid() != *confirmed_commitment_txid {
let mut found_claim = false;
let txid = funding.current_holder_commitment_tx.trust().txid();
let mut outpoint = BitcoinOutPoint { txid, vout: 0 };
for htlc in funding.current_holder_commitment_tx.nondust_htlcs() {
if let Some(vout) = htlc.transaction_output_index {
outpoint.vout = vout;
if self.onchain_tx_handler.abandon_claim(&outpoint) {
found_claim = true;
}
} else {
debug_assert!(false, "Expected transaction output index for non-dust HTLC");
}
}
if found_claim {
log_trace!(logger, "Canceled claims for previously broadcast holder commitment with txid {txid}");
}
}
if let Some(prev_holder_commitment_tx) = &funding.prev_holder_commitment_tx {
let txid = prev_holder_commitment_tx.trust().txid();
if txid != *confirmed_commitment_txid {
let mut found_claim = false;
let mut outpoint = BitcoinOutPoint { txid, vout: 0 };
for htlc in prev_holder_commitment_tx.nondust_htlcs() {
if let Some(vout) = htlc.transaction_output_index {
outpoint.vout = vout;
if self.onchain_tx_handler.abandon_claim(&outpoint) {
found_claim = true;
}
} else {
debug_assert!(false, "Expected transaction output index for non-dust HTLC");
}
}
if found_claim {
log_trace!(logger, "Canceled claims for previously broadcast holder commitment with txid {txid}");
}
}
}
}
}
#[cfg(any(test, feature = "_test_utils", feature = "unsafe_revoked_tx_signing"))]
/// Note that this includes possibly-locktimed-in-the-future transactions!
#[rustfmt::skip]
fn unsafe_get_latest_holder_commitment_txn<L: Deref>(
&mut self, logger: &WithContext<L>
) -> Vec<Transaction> where L::Target: Logger {
log_debug!(logger, "Getting signed copy of latest holder commitment transaction!");
let commitment_tx = {
let sig = self.onchain_tx_handler.signer.unsafe_sign_holder_commitment(
&self.funding.channel_parameters, &self.funding.current_holder_commitment_tx,
&self.onchain_tx_handler.secp_ctx,
).expect("sign holder commitment");
let redeem_script = self.funding.channel_parameters.make_funding_redeemscript();
self.funding.current_holder_commitment_tx.add_holder_sig(&redeem_script, sig)
};
let mut holder_transactions = vec![commitment_tx];
if self.channel_type_features().supports_anchors_zero_fee_htlc_tx()
|| self.channel_type_features().supports_anchor_zero_fee_commitments()
{
// HTLC transactions in these channels require external funding before finalized,
// so we return the commitment transaction alone here.
//
// In 0FC channels, we *could* use HTLC transactions to pay for fees on a
// 0FC commitment transaction to save the fixed transaction overhead
// (locktime + version), but we would still have to pay for fees using
// external UTXOs to avoid invalidating the counterparty HTLC signature.
// This is something we would consider in the future.
//
// Furthermore, we can't broadcast a HTLC claim transaction while the
// anchor claim transaction and its parent are still unconfirmed due to the
// current single-child restriction on TRUC transactions.
return holder_transactions;
}
self.get_broadcasted_holder_htlc_descriptors(&self.funding, &self.funding.current_holder_commitment_tx)
.into_iter()
.for_each(|htlc_descriptor| {
let txid = self.funding.current_holder_commitment_tx.trust().txid();
let vout = htlc_descriptor.htlc.transaction_output_index
.expect("Expected transaction output index for non-dust HTLC");
let htlc_output = HolderHTLCOutput::build(htlc_descriptor, 0);
if let Some(htlc_tx) = htlc_output.get_maybe_signed_htlc_tx(
&mut self.onchain_tx_handler, &::bitcoin::OutPoint { txid, vout },
) {
if htlc_tx.is_fully_signed() {
holder_transactions.push(htlc_tx.0);
}
}
});
holder_transactions
}
#[rustfmt::skip]
fn block_connected<B: Deref, F: Deref, L: Deref>(
&mut self, header: &Header, txdata: &TransactionData, height: u32, broadcaster: B,
fee_estimator: F, logger: &WithContext<L>,
) -> Vec<TransactionOutputs>
where B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
let block_hash = header.block_hash();
self.best_block = BestBlock::new(block_hash, height);
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
self.transactions_confirmed(header, txdata, height, broadcaster, &bounded_fee_estimator, logger)
}
#[rustfmt::skip]
fn best_block_updated<B: Deref, F: Deref, L: Deref>(
&mut self,
header: &Header,
height: u32,
broadcaster: B,
fee_estimator: &LowerBoundedFeeEstimator<F>,
logger: &WithContext<L>,
) -> Vec<TransactionOutputs>
where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
let block_hash = header.block_hash();
if height > self.best_block.height {
self.best_block = BestBlock::new(block_hash, height);
log_trace!(logger, "Connecting new block {} at height {}", block_hash, height);
self.block_confirmed(height, block_hash, vec![], vec![], vec![], &broadcaster, &fee_estimator, logger)
} else if block_hash != self.best_block.block_hash {
self.best_block = BestBlock::new(block_hash, height);
log_trace!(logger, "Best block re-orged, replaced with new block {} at height {}", block_hash, height);
self.onchain_events_awaiting_threshold_conf.retain(|ref entry| entry.height <= height);
let conf_target = self.closure_conf_target();
self.onchain_tx_handler.blocks_disconnected(
height, &broadcaster, conf_target, &self.destination_script, fee_estimator, logger,
);
Vec::new()
} else { Vec::new() }
}
#[rustfmt::skip]
fn transactions_confirmed<B: Deref, F: Deref, L: Deref>(
&mut self,
header: &Header,
txdata: &TransactionData,
height: u32,
broadcaster: B,
fee_estimator: &LowerBoundedFeeEstimator<F>,
logger: &WithContext<L>,
) -> Vec<TransactionOutputs>
where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
let funding_seen_before = self.funding_seen_onchain;
let txn_matched = self.filter_block(txdata);
if !self.funding_seen_onchain {
for &(_, tx) in txdata.iter() {
let txid = tx.compute_txid();
if txid == self.funding.funding_txid() ||
self.pending_funding.iter().any(|f| f.funding_txid() == txid)
{
self.funding_seen_onchain = true;
break;
}
}
}
for tx in &txn_matched {
let mut output_val = Amount::ZERO;
for out in tx.output.iter() {
if out.value > Amount::MAX_MONEY { panic!("Value-overflowing transaction provided to block connected"); }
output_val += out.value;
if output_val > Amount::MAX_MONEY { panic!("Value-overflowing transaction provided to block connected"); }
}
}
let block_hash = header.block_hash();
// We may need to broadcast our holder commitment if we see a funding transaction reorg,
// with a different funding transaction confirming. It's possible we process a
// holder/counterparty commitment within this same block that would invalidate the one we're
// intending to broadcast, so we track whether we should broadcast and wait until all
// transactions in the block have been processed.
let mut should_broadcast_commitment = false;
let mut watch_outputs = Vec::new();
let mut claimable_outpoints = Vec::new();
if self.is_manual_broadcast && !funding_seen_before && self.funding_seen_onchain && self.holder_tx_signed
{
should_broadcast_commitment = true;
}
'tx_iter: for tx in &txn_matched {
let txid = tx.compute_txid();
log_trace!(logger, "Transaction {} confirmed in block {}", txid , block_hash);
// If a transaction has already been confirmed, ensure we don't bother processing it duplicatively.
if self.alternative_funding_confirmed.map(|(alternative_funding_txid, _)| alternative_funding_txid == txid).unwrap_or(false) {
log_debug!(logger, "Skipping redundant processing of funding-spend tx {} as it was previously confirmed", txid);
continue 'tx_iter;
}
if Some(txid) == self.funding_spend_confirmed {
log_debug!(logger, "Skipping redundant processing of funding-spend tx {} as it was previously confirmed", txid);
continue 'tx_iter;
}
for ev in self.onchain_events_awaiting_threshold_conf.iter() {
if ev.txid == txid {
if let Some(conf_hash) = ev.block_hash {
assert_eq!(header.block_hash(), conf_hash,
"Transaction {} was already confirmed and is being re-confirmed in a different block.\n\
This indicates a severe bug in the transaction connection logic - a reorg should have been processed first!", ev.txid);
}
log_debug!(logger, "Skipping redundant processing of confirming tx {} as it was previously confirmed", txid);
continue 'tx_iter;
}
}
for htlc in self.htlcs_resolved_on_chain.iter() {
if Some(txid) == htlc.resolving_txid {
log_debug!(logger, "Skipping redundant processing of HTLC resolution tx {} as it was previously confirmed", txid);
continue 'tx_iter;
}
}
for spendable_txid in self.spendable_txids_confirmed.iter() {
if txid == *spendable_txid {
log_debug!(logger, "Skipping redundant processing of spendable tx {} as it was previously confirmed", txid);
continue 'tx_iter;
}
}
// A splice/dual-funded RBF transaction has confirmed. We can't promote the
// `FundingScope` scope until we see the
// [`ChannelMonitorUpdateStep::RenegotiatedFundingLocked`] for it, but we track the txid
// so we know which holder commitment transaction we may need to broadcast.
if let Some(alternative_funding) = self
.pending_funding
.iter()
.find(|funding| funding.funding_txid() == txid)
{
assert!(self.alternative_funding_confirmed.is_none());
assert!(
!self.onchain_events_awaiting_threshold_conf.iter()
.any(|e| matches!(e.event, OnchainEvent::AlternativeFundingConfirmation {}))
);
assert!(self.funding_spend_confirmed.is_none());
assert!(
!self.onchain_events_awaiting_threshold_conf.iter()
.any(|e| matches!(e.event, OnchainEvent::FundingSpendConfirmation { .. }))
);
let (desc, msg) = if alternative_funding.is_splice() {
debug_assert!(tx.input.iter().any(|input| {
let funding_outpoint = self.funding.funding_outpoint().into_bitcoin_outpoint();
input.previous_output == funding_outpoint
}));
("Splice", "splice_locked")
} else {
("Dual-funded RBF", "channel_ready")
};
let action = if self.holder_tx_signed || self.funding_spend_seen {
", broadcasting holder commitment transaction".to_string()
} else if !self.no_further_updates_allowed() {
format!(", waiting for `{msg}` exchange")
} else {
"".to_string()
};
log_info!(logger, "{desc} for channel {} confirmed with txid {txid}{action}", self.channel_id());
self.alternative_funding_confirmed = Some((txid, height));
if self.no_further_updates_allowed() {
// We can no longer rely on
// [`ChannelMonitorUpdateStep::RenegotiatedFundingLocked`] to promote the
// scope; do so when the funding is no longer under reorg risk.
self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry {
txid,
transaction: Some((*tx).clone()),
height,
block_hash: Some(block_hash),
event: OnchainEvent::AlternativeFundingConfirmation {},
});
}
if self.holder_tx_signed || self.funding_spend_seen {
// Cancel any previous claims that are no longer valid as they stemmed from a
// different funding transaction.
let new_holder_commitment_txid =
alternative_funding.current_holder_commitment_tx.trust().txid();
self.cancel_prev_commitment_claims(&logger, &new_holder_commitment_txid);
// We either attempted to broadcast a holder commitment, or saw one confirm
// onchain, so broadcast the new holder commitment for the confirmed funding to
// claim our funds as the channel is no longer operational.
should_broadcast_commitment = true;
}
continue 'tx_iter;
}
if tx.input.len() == 1 {
// Assuming our keys were not leaked (in which case we're screwed no matter what),
// commitment transactions and HTLC transactions will all only ever have one input
// (except for HTLC transactions for channels with anchor outputs), which is an easy
// way to filter out any potential non-matching txn for lazy filters.
if let Some(funding_txid_spent) = core::iter::once(&self.funding)
.chain(self.pending_funding.iter())
.find(|funding| {
let funding_outpoint = funding.funding_outpoint().into_bitcoin_outpoint();
funding_outpoint == tx.input[0].previous_output
})
.map(|funding| funding.funding_txid())
{
assert_eq!(
funding_txid_spent,
self.alternative_funding_confirmed
.map(|(txid, _)| txid)
.unwrap_or_else(|| self.funding.funding_txid())
);
log_info!(logger, "Channel {} closed by funding output spend in txid {txid}",
self.channel_id());
if !self.funding_spend_seen {
self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(()));
}
self.funding_spend_seen = true;
let mut balance_spendable_csv = None;
let mut commitment_tx_to_counterparty_output = None;
// Is it a commitment transaction?
if (tx.input[0].sequence.0 >> 8*3) as u8 == 0x80 && (tx.lock_time.to_consensus_u32() >> 8*3) as u8 == 0x20 {
if let Some((mut new_outpoints, new_outputs)) = self.check_spend_holder_transaction(txid, &tx, height, &block_hash, &logger) {
if !new_outputs.1.is_empty() {
watch_outputs.push(new_outputs);
}
claimable_outpoints.append(&mut new_outpoints);
balance_spendable_csv = Some(self.on_holder_tx_csv);
} else {
let mut new_watch_outputs = Vec::new();
for (idx, outp) in tx.output.iter().enumerate() {
new_watch_outputs.push((idx as u32, outp.clone()));
}
watch_outputs.push((txid, new_watch_outputs));
let (mut new_outpoints, counterparty_output_idx_sats) =
self.check_spend_counterparty_transaction(txid, &tx, height, &block_hash, &logger);
commitment_tx_to_counterparty_output = counterparty_output_idx_sats;
claimable_outpoints.append(&mut new_outpoints);
}
// We've just seen a commitment confirm, which conflicts with the holder
// commitment we intend to broadcast
if should_broadcast_commitment {
log_info!(logger, "Canceling our queued holder commitment broadcast as we've found a conflict confirm instead");
should_broadcast_commitment = false;
}
}
self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry {
txid,
transaction: Some((*tx).clone()),
height,
block_hash: Some(block_hash),
event: OnchainEvent::FundingSpendConfirmation {
on_local_output_csv: balance_spendable_csv,
commitment_tx_to_counterparty_output,
},
});
// Now that we've detected a confirmed commitment transaction, attempt to cancel
// pending claims for any commitments that were previously confirmed such that
// we don't continue claiming inputs that no longer exist.
self.cancel_prev_commitment_claims(&logger, &txid);
}
}
if tx.input.len() >= 1 {
// While all commitment transactions have one input, HTLC transactions may have more
// if the HTLC was present in an anchor channel. HTLCs can also be resolved in a few
// other ways which can have more than one output.
for tx_input in &tx.input {
let commitment_txid = tx_input.previous_output.txid;
if let Some(&commitment_number) = self.counterparty_commitment_txn_on_chain.get(&commitment_txid) {
let (mut new_outpoints, new_outputs_option) = self.check_spend_counterparty_htlc(
&tx, commitment_number, &commitment_txid, height, &logger
);
claimable_outpoints.append(&mut new_outpoints);
if let Some(new_outputs) = new_outputs_option {
watch_outputs.push(new_outputs);
}
// Since there may be multiple HTLCs for this channel (all spending the
// same commitment tx) being claimed by the counterparty within the same
// transaction, and `check_spend_counterparty_htlc` already checks all the
// ones relevant to this channel, we can safely break from our loop.
break;
}
}
self.is_resolving_htlc_output(&tx, height, &block_hash, logger);
self.check_tx_and_push_spendable_outputs(&tx, height, &block_hash, logger);
}
}
if height > self.best_block.height {
self.best_block = BestBlock::new(block_hash, height);
}
if should_broadcast_commitment {
let (mut claimables, mut outputs) =
self.generate_claimable_outpoints_and_watch_outputs(None, false);
claimable_outpoints.append(&mut claimables);
watch_outputs.append(&mut outputs);
}
self.block_confirmed(height, block_hash, txn_matched, watch_outputs, claimable_outpoints, &broadcaster, &fee_estimator, logger)
}
/// Update state for new block(s)/transaction(s) confirmed. Note that the caller must update
/// `self.best_block` before calling if a new best blockchain tip is available. More
/// concretely, `self.best_block` must never be at a lower height than `conf_height`, avoiding
/// complexity especially in
/// `OnchainTx::update_claims_view_from_requests`/`OnchainTx::update_claims_view_from_matched_txn`.
///
/// `conf_height` should be set to the height at which any new transaction(s)/block(s) were
/// confirmed at, even if it is not the current best height.
#[rustfmt::skip]
fn block_confirmed<B: Deref, F: Deref, L: Deref>(
&mut self,
conf_height: u32,
conf_hash: BlockHash,
txn_matched: Vec<&Transaction>,
mut watch_outputs: Vec<TransactionOutputs>,
mut claimable_outpoints: Vec<PackageTemplate>,
broadcaster: &B,
fee_estimator: &LowerBoundedFeeEstimator<F>,
logger: &WithContext<L>,
) -> Vec<TransactionOutputs>
where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
log_trace!(logger, "Processing {} matched transactions for block at height {}.", txn_matched.len(), conf_height);
debug_assert!(self.best_block.height >= conf_height);
// Only generate claims if we haven't already done so (e.g., in transactions_confirmed).
if claimable_outpoints.is_empty() {
let should_broadcast = self.should_broadcast_holder_commitment_txn(logger);
if let Some(payment_hash) = should_broadcast {
let reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash) };
let (mut new_outpoints, mut new_outputs) =
self.generate_claimable_outpoints_and_watch_outputs(Some(reason), false);
if !self.is_manual_broadcast || self.funding_seen_onchain {
claimable_outpoints.append(&mut new_outpoints);
watch_outputs.append(&mut new_outputs);
} else {
log_info!(logger, "Not broadcasting holder commitment for manual-broadcast channel before funding appears on-chain");
}
}
}
// Find which on-chain events have reached their confirmation threshold.
let (onchain_events_reaching_threshold_conf, onchain_events_awaiting_threshold_conf): (Vec<_>, Vec<_>) =
self.onchain_events_awaiting_threshold_conf.drain(..).partition(
|entry| entry.has_reached_confirmation_threshold(&self.best_block));
self.onchain_events_awaiting_threshold_conf = onchain_events_awaiting_threshold_conf;
// Used to check for duplicate HTLC resolutions.
#[cfg(debug_assertions)]
let unmatured_htlcs: Vec<_> = self.onchain_events_awaiting_threshold_conf
.iter()
.filter_map(|entry| match &entry.event {
OnchainEvent::HTLCUpdate { source, .. } => Some(source.clone()),
_ => None,
})
.collect();
#[cfg(debug_assertions)]
let mut matured_htlcs = Vec::new();
// Produce actionable events from on-chain events having reached their threshold.
for entry in onchain_events_reaching_threshold_conf {
match entry.event {
OnchainEvent::HTLCUpdate { source, payment_hash, htlc_value_satoshis, commitment_tx_output_idx } => {
// Check for duplicate HTLC resolutions.
#[cfg(debug_assertions)]
{
debug_assert!(
!unmatured_htlcs.contains(&source),
"An unmature HTLC transaction conflicts with a maturing one; failed to \
call either transaction_unconfirmed for the conflicting transaction \
or blocks_disconnected for a block before it.");
debug_assert!(
!matured_htlcs.contains(&source),
"A matured HTLC transaction conflicts with a maturing one; failed to \
call either transaction_unconfirmed for the conflicting transaction \
or blocks_disconnected for a block before it.");
matured_htlcs.push(source.clone());
}
log_debug!(logger, "HTLC {} failure update in {} has got enough confirmations to be passed upstream",
&payment_hash, entry.txid);
self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
payment_hash,
payment_preimage: None,
source,
htlc_value_satoshis,
}));
self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC {
commitment_tx_output_idx,
resolving_txid: Some(entry.txid),
resolving_tx: entry.transaction,
payment_preimage: None,
});
},
OnchainEvent::MaturingOutput { descriptor } => {
log_debug!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor));
self.pending_events.push(Event::SpendableOutputs {
outputs: vec![descriptor],
channel_id: Some(self.channel_id()),
});
self.spendable_txids_confirmed.push(entry.txid);
},
OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx, preimage, .. } => {
self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC {
commitment_tx_output_idx: Some(commitment_tx_output_idx),
resolving_txid: Some(entry.txid),
resolving_tx: entry.transaction,
payment_preimage: preimage,
});
},
OnchainEvent::FundingSpendConfirmation { commitment_tx_to_counterparty_output, .. } => {
self.funding_spend_confirmed = Some(entry.txid);
self.confirmed_commitment_tx_counterparty_output = commitment_tx_to_counterparty_output;
if self.alternative_funding_confirmed.is_none() {
for funding in self.pending_funding.drain(..) {
self.outputs_to_watch.remove(&funding.funding_txid());
self.pending_events.push(Event::DiscardFunding {
channel_id: self.channel_id,
funding_info: crate::events::FundingInfo::OutPoint {
outpoint: funding.funding_outpoint(),
},
});
}
}
},
OnchainEvent::AlternativeFundingConfirmation {} => {
// An alternative funding transaction has irrevocably confirmed and we're no
// longer allowing monitor updates, so promote the `FundingScope` now.
debug_assert!(self.no_further_updates_allowed());
debug_assert_ne!(self.funding.funding_txid(), entry.txid);
if let Err(_) = self.promote_funding(entry.txid) {
debug_assert!(false);
log_error!(logger, "Missing scope for alternative funding confirmation with txid {}", entry.txid);
}
},
}
}
if self.no_further_updates_allowed() {
// Fail back HTLCs on backwards channels if they expire within
// `LATENCY_GRACE_PERIOD_BLOCKS` blocks and the channel is closed (i.e. we're at a
// point where no further off-chain updates will be accepted). If we haven't seen the
// preimage for an HTLC by the time the previous hop's timeout expires, we've lost that
// HTLC, so we might as well fail it back instead of having our counterparty force-close
// the inbound channel.
let current_counterparty_htlcs = if let Some(txid) = self.funding.current_counterparty_commitment_txid {
if let Some(htlc_outputs) = self.funding.counterparty_claimable_outpoints.get(&txid) {
Some(htlc_outputs.iter().map(|&(ref a, ref b)| (a, b.as_ref().map(|boxed| &**boxed))))
} else { None }
} else { None }.into_iter().flatten();
let prev_counterparty_htlcs = if let Some(txid) = self.funding.prev_counterparty_commitment_txid {
if let Some(htlc_outputs) = self.funding.counterparty_claimable_outpoints.get(&txid) {
Some(htlc_outputs.iter().map(|&(ref a, ref b)| (a, b.as_ref().map(|boxed| &**boxed))))
} else { None }
} else { None }.into_iter().flatten();
let htlcs = holder_commitment_htlcs!(self, CURRENT_WITH_SOURCES)
.chain(current_counterparty_htlcs)
.chain(prev_counterparty_htlcs);
let height = self.best_block.height;
for (htlc, source_opt) in htlcs {
// Only check forwarded HTLCs' previous hops
let source = match source_opt {
Some(source) => source,
None => continue,
};
let inbound_htlc_expiry = match source.inbound_htlc_expiry() {
Some(cltv_expiry) => cltv_expiry,
None => continue,
};
let max_expiry_height = height.saturating_add(LATENCY_GRACE_PERIOD_BLOCKS);
if inbound_htlc_expiry > max_expiry_height {
continue;
}
let duplicate_event = self.pending_monitor_events.iter().any(
|update| if let &MonitorEvent::HTLCEvent(ref upd) = update {
upd.source == *source
} else { false });
if duplicate_event {
continue;
}
if !self.failed_back_htlc_ids.insert(SentHTLCId::from_source(source)) {
continue;
}
if !duplicate_event {
log_error!(logger, "Failing back HTLC {} upstream to preserve the \
channel as the forward HTLC hasn't resolved and our backward HTLC \
expires soon at {}", log_bytes!(htlc.payment_hash.0), inbound_htlc_expiry);
self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
source: source.clone(),
payment_preimage: None,
payment_hash: htlc.payment_hash,
htlc_value_satoshis: Some(htlc.amount_msat / 1000),
}));
}
}
}
let conf_target = self.closure_conf_target();
self.onchain_tx_handler.update_claims_view_from_requests(
claimable_outpoints, conf_height, self.best_block.height, broadcaster, conf_target,
&self.destination_script, fee_estimator, logger,
);
self.onchain_tx_handler.update_claims_view_from_matched_txn(
&txn_matched, conf_height, conf_hash, self.best_block.height, broadcaster, conf_target,
&self.destination_script, fee_estimator, logger,
);
// Determine new outputs to watch by comparing against previously known outputs to watch,
// updating the latter in the process.
watch_outputs.retain(|&(ref txid, ref txouts)| {
let idx_and_scripts = txouts.iter().map(|o| (o.0, o.1.script_pubkey.clone())).collect();
self.outputs_to_watch.insert(txid.clone(), idx_and_scripts).is_none()
});
#[cfg(test)]
{
// If we see a transaction for which we registered outputs previously,
// make sure the registered scriptpubkey at the expected index match
// the actual transaction output one. We failed this case before #653.
for tx in &txn_matched {
if let Some(outputs) = self.get_outputs_to_watch().get(&tx.compute_txid()) {
for idx_and_script in outputs.iter() {
assert!((idx_and_script.0 as usize) < tx.output.len());
assert_eq!(tx.output[idx_and_script.0 as usize].script_pubkey, idx_and_script.1);
}
}
}
}
watch_outputs
}
#[rustfmt::skip]
fn blocks_disconnected<B: Deref, F: Deref, L: Deref>(
&mut self, fork_point: BestBlock, broadcaster: B, fee_estimator: F, logger: &WithContext<L>
) where B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
let new_height = fork_point.height;
log_trace!(logger, "Block(s) disconnected to height {}", new_height);
assert!(self.best_block.height > fork_point.height,
"Blocks disconnected must indicate disconnection from the current best height, i.e. the new chain tip must be lower than the previous best height");
//We may discard:
//- htlc update there as failure-trigger tx (revoked commitment tx, non-revoked commitment tx, HTLC-timeout tx) has been disconnected
//- maturing spendable output has transaction paying us has been disconnected
self.onchain_events_awaiting_threshold_conf.retain(|ref entry| entry.height <= new_height);
// TODO: Replace with `take_if` once our MSRV is >= 1.80.
let mut should_broadcast_commitment = false;
if let Some((_, conf_height)) = self.alternative_funding_confirmed.as_ref() {
if *conf_height > new_height {
self.alternative_funding_confirmed.take();
if self.holder_tx_signed || self.funding_spend_seen {
// Cancel any previous claims that are no longer valid as they stemmed from a
// different funding transaction.
let new_holder_commitment_txid =
self.funding.current_holder_commitment_tx.trust().txid();
self.cancel_prev_commitment_claims(&logger, &new_holder_commitment_txid);
should_broadcast_commitment = true;
}
}
}
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
let conf_target = self.closure_conf_target();
self.onchain_tx_handler.blocks_disconnected(
new_height, &broadcaster, conf_target, &self.destination_script, &bounded_fee_estimator, logger
);
// Only attempt to broadcast the new commitment after the `block_disconnected` call above so that
// it doesn't get removed from the set of pending claims.
if should_broadcast_commitment {
self.queue_latest_holder_commitment_txn_for_broadcast(&broadcaster, &bounded_fee_estimator, logger, true);
}
self.best_block = fork_point;
}
#[rustfmt::skip]
fn transaction_unconfirmed<B: Deref, F: Deref, L: Deref>(
&mut self,
txid: &Txid,
broadcaster: B,
fee_estimator: &LowerBoundedFeeEstimator<F>,
logger: &WithContext<L>,
) where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
let mut removed_height = None;
for entry in self.onchain_events_awaiting_threshold_conf.iter() {
if entry.txid == *txid {
removed_height = Some(entry.height);
break;
}
}
if let Some(removed_height) = removed_height {
log_info!(logger, "transaction_unconfirmed of txid {} implies height {} was reorg'd out", txid, removed_height);
self.onchain_events_awaiting_threshold_conf.retain(|ref entry| if entry.height >= removed_height {
log_info!(logger, "Transaction {} reorg'd out", entry.txid);
false
} else { true });
}
debug_assert!(!self.onchain_events_awaiting_threshold_conf.iter().any(|ref entry| entry.txid == *txid));
// TODO: Replace with `take_if` once our MSRV is >= 1.80.
let mut should_broadcast_commitment = false;
if let Some((alternative_funding_txid, _)) = self.alternative_funding_confirmed.as_ref() {
if alternative_funding_txid == txid {
self.alternative_funding_confirmed.take();
if self.holder_tx_signed || self.funding_spend_seen {
// Cancel any previous claims that are no longer valid as they stemmed from a
// different funding transaction.
let new_holder_commitment_txid =
self.funding.current_holder_commitment_tx.trust().txid();
self.cancel_prev_commitment_claims(&logger, &new_holder_commitment_txid);
should_broadcast_commitment = true;
}
}
}
let conf_target = self.closure_conf_target();
self.onchain_tx_handler.transaction_unconfirmed(
txid, &broadcaster, conf_target, &self.destination_script, fee_estimator, logger
);
// Only attempt to broadcast the new commitment after the `transaction_unconfirmed` call above so
// that it doesn't get removed from the set of pending claims.
if should_broadcast_commitment {
self.queue_latest_holder_commitment_txn_for_broadcast(&broadcaster, fee_estimator, logger, true);
}
}
/// Filters a block's `txdata` for transactions spending watched outputs or for any child
/// transactions thereof.
#[rustfmt::skip]
fn filter_block<'a>(&self, txdata: &TransactionData<'a>) -> Vec<&'a Transaction> {
let mut matched_txn = new_hash_set();
txdata.iter().filter(|&&(_, tx)| {
let mut matches = self.spends_watched_output(tx);
for input in tx.input.iter() {
if matches { break; }
if matched_txn.contains(&input.previous_output.txid) {
matches = true;
}
}
if matches {
matched_txn.insert(tx.compute_txid());
}
matches
}).map(|(_, tx)| *tx).collect()
}
/// Checks if a given transaction spends any watched outputs.
#[rustfmt::skip]
fn spends_watched_output(&self, tx: &Transaction) -> bool {
for input in tx.input.iter() {
if let Some(outputs) = self.get_outputs_to_watch().get(&input.previous_output.txid) {
for (idx, _script_pubkey) in outputs.iter() {
if *idx == input.previous_output.vout {
#[cfg(test)]
{
// If the expected script is a known type, check that the witness
// appears to be spending the correct type (ie that the match would
// actually succeed in BIP 158/159-style filters).
if _script_pubkey.is_p2wsh() {
if input.witness.last().unwrap().to_vec() == deliberately_bogus_accepted_htlc_witness_program() {
// In at least one test we use a deliberately bogus witness
// script which hit an old panic. Thus, we check for that here
// and avoid the assert if its the expected bogus script.
return true;
}
assert_eq!(&bitcoin::Address::p2wsh(&ScriptBuf::from(input.witness.last().unwrap().to_vec()), bitcoin::Network::Bitcoin).script_pubkey(), _script_pubkey);
} else if _script_pubkey.is_p2wpkh() {
assert_eq!(&bitcoin::Address::p2wpkh(&bitcoin::CompressedPublicKey(bitcoin::PublicKey::from_slice(&input.witness.last().unwrap()).unwrap().inner), bitcoin::Network::Bitcoin).script_pubkey(), _script_pubkey);
} else if _script_pubkey == &chan_utils::shared_anchor_script_pubkey() {
assert!(input.witness.is_empty());
} else { panic!(); }
}
return true;
}
}
}
}
false
}
#[rustfmt::skip]
fn should_broadcast_holder_commitment_txn<L: Deref>(
&self, logger: &WithContext<L>
) -> Option<PaymentHash> where L::Target: Logger {
// There's no need to broadcast our commitment transaction if we've seen one confirmed (even
// with 1 confirmation) as it'll be rejected as duplicate/conflicting.
if self.funding_spend_confirmed.is_some() ||
self.onchain_events_awaiting_threshold_conf.iter().find(|event| match event.event {
OnchainEvent::FundingSpendConfirmation { .. } => true,
_ => false,
}).is_some()
{
return None;
}
// We need to consider all HTLCs which are:
// * in any unrevoked counterparty commitment transaction, as they could broadcast said
// transactions and we'd end up in a race, or
// * are in our latest holder commitment transaction, as this is the thing we will
// broadcast if we go on-chain.
// Note that we consider HTLCs which were below dust threshold here - while they don't
// strictly imply that we need to fail the channel, we need to go ahead and fail them back
// to the source, and if we don't fail the channel we will have to ensure that the next
// updates that peer sends us are update_fails, failing the channel if not. It's probably
// easier to just fail the channel as this case should be rare enough anyway.
let height = self.best_block.height;
// Grace period in number of blocks we allow for an async payment to resolve before we
// force-close. 4032 blocks are roughly four weeks.
const ASYNC_PAYMENT_GRACE_PERIOD_BLOCKS: u32 = 4032;
macro_rules! scan_commitment {
($htlcs: expr, $holder_tx: expr) => {
for (ref htlc, ref source) in $htlcs {
// For inbound HTLCs which we know the preimage for, we have to ensure we hit the
// chain with enough room to claim the HTLC without our counterparty being able to
// time out the HTLC first.
// For outbound HTLCs which our counterparty hasn't failed/claimed, our primary
// concern is being able to claim the corresponding inbound HTLC (on another
// channel) before it expires. In fact, we don't even really care if our
// counterparty here claims such an outbound HTLC after it expired as long as we
// can still claim the corresponding HTLC. Thus, to avoid needlessly hitting the
// chain when our counterparty is waiting for expiration to off-chain fail an HTLC
// we give ourselves a few blocks of headroom after expiration before going
// on-chain for an expired HTLC. In the case of an outbound HTLC for
// an async payment, we allow `ASYNC_PAYMENT_GRACE_PERIOD_BLOCKS` before
// we force-close the channel so that if we've been offline for a
// while we give a chance for the HTLC to be failed on reconnect
// instead closing the channel.
let htlc_outbound = $holder_tx == htlc.offered;
let async_payment = htlc_outbound && matches!(
source.as_deref().expect("Every outbound HTLC should have a corresponding source"),
HTLCSource::OutboundRoute {
bolt12_invoice: Some(PaidBolt12Invoice::StaticInvoice(_)),
..
}
);
if ( htlc_outbound && htlc.cltv_expiry + LATENCY_GRACE_PERIOD_BLOCKS <= height && !async_payment) ||
( htlc_outbound && htlc.cltv_expiry + ASYNC_PAYMENT_GRACE_PERIOD_BLOCKS <= height && async_payment) ||
(!htlc_outbound && htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.payment_hash)) {
log_info!(logger, "Force-closing channel due to {} HTLC timeout - HTLC with payment hash {} expires at {}", if htlc_outbound { "outbound" } else { "inbound"}, htlc.payment_hash, htlc.cltv_expiry);
return Some(htlc.payment_hash);
}
}
}
}
scan_commitment!(holder_commitment_htlcs!(self, CURRENT_WITH_SOURCES), true);
if let Some(ref txid) = self.funding.current_counterparty_commitment_txid {
if let Some(ref htlc_outputs) = self.funding.counterparty_claimable_outpoints.get(txid) {
scan_commitment!(htlc_outputs.iter(), false);
}
}
if let Some(ref txid) = self.funding.prev_counterparty_commitment_txid {
if let Some(ref htlc_outputs) = self.funding.counterparty_claimable_outpoints.get(txid) {
scan_commitment!(htlc_outputs.iter(), false);
}
}
None
}
/// Check if any transaction broadcasted is resolving HTLC output by a success or timeout on a holder
/// or counterparty commitment tx, if so send back the source, preimage if found and payment_hash of resolved HTLC
#[rustfmt::skip]
fn is_resolving_htlc_output<L: Deref>(
&mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithContext<L>,
) where L::Target: Logger {
let funding_spent = get_confirmed_funding_scope!(self);
'outer_loop: for input in &tx.input {
let mut payment_data = None;
let htlc_claim = HTLCClaim::from_witness(&input.witness);
let revocation_sig_claim = htlc_claim == Some(HTLCClaim::Revocation);
let accepted_preimage_claim = htlc_claim == Some(HTLCClaim::AcceptedPreimage);
#[cfg(not(fuzzing))]
let accepted_timeout_claim = htlc_claim == Some(HTLCClaim::AcceptedTimeout);
let offered_preimage_claim = htlc_claim == Some(HTLCClaim::OfferedPreimage);
#[cfg(not(fuzzing))]
let offered_timeout_claim = htlc_claim == Some(HTLCClaim::OfferedTimeout);
let mut payment_preimage = PaymentPreimage([0; 32]);
if offered_preimage_claim || accepted_preimage_claim {
payment_preimage.0.copy_from_slice(input.witness.second_to_last().unwrap());
}
macro_rules! log_claim {
($tx_info: expr, $holder_tx: expr, $htlc: expr, $source_avail: expr) => {
let outbound_htlc = $holder_tx == $htlc.offered;
// HTLCs must either be claimed by a matching script type or through the
// revocation path:
#[cfg(not(fuzzing))] // Note that the fuzzer is not bound by pesky things like "signatures"
debug_assert!(!$htlc.offered || offered_preimage_claim || offered_timeout_claim || revocation_sig_claim);
#[cfg(not(fuzzing))] // Note that the fuzzer is not bound by pesky things like "signatures"
debug_assert!($htlc.offered || accepted_preimage_claim || accepted_timeout_claim || revocation_sig_claim);
// Further, only exactly one of the possible spend paths should have been
// matched by any HTLC spend:
#[cfg(not(fuzzing))] // Note that the fuzzer is not bound by pesky things like "signatures"
debug_assert_eq!(accepted_preimage_claim as u8 + accepted_timeout_claim as u8 +
offered_preimage_claim as u8 + offered_timeout_claim as u8 +
revocation_sig_claim as u8, 1);
if ($holder_tx && revocation_sig_claim) ||
(outbound_htlc && !$source_avail && (accepted_preimage_claim || offered_preimage_claim)) {
log_error!(logger, "Input spending {} ({}:{}) in {} resolves {} HTLC with payment hash {} with {}!",
$tx_info, input.previous_output.txid, input.previous_output.vout, tx.compute_txid(),
if outbound_htlc { "outbound" } else { "inbound" }, &$htlc.payment_hash,
if revocation_sig_claim { "revocation sig" } else { "preimage claim after we'd passed the HTLC resolution back. We can likely claim the HTLC output with a revocation claim" });
} else {
log_info!(logger, "Input spending {} ({}:{}) in {} resolves {} HTLC with payment hash {} with {}",
$tx_info, input.previous_output.txid, input.previous_output.vout, tx.compute_txid(),
if outbound_htlc { "outbound" } else { "inbound" }, &$htlc.payment_hash,
if revocation_sig_claim { "revocation sig" } else if accepted_preimage_claim || offered_preimage_claim { "preimage" } else { "timeout" });
}
}
}
macro_rules! check_htlc_valid_counterparty {
($htlc_output: expr, $per_commitment_data: expr) => {
for &(ref pending_htlc, ref pending_source) in $per_commitment_data {
if pending_htlc.payment_hash == $htlc_output.payment_hash && pending_htlc.amount_msat == $htlc_output.amount_msat {
if let &Some(ref source) = pending_source {
log_claim!("revoked counterparty commitment tx", false, pending_htlc, true);
payment_data = Some(((**source).clone(), $htlc_output.payment_hash, $htlc_output.amount_msat));
break;
}
}
}
}
}
macro_rules! scan_commitment {
($funding_spent: expr, $htlcs: expr, $tx_info: expr, $holder_tx: expr) => {
for (ref htlc_output, source_option) in $htlcs {
if Some(input.previous_output.vout) == htlc_output.transaction_output_index {
if let Some(ref source) = source_option {
log_claim!($tx_info, $holder_tx, htlc_output, true);
// We have a resolution of an HTLC either from one of our latest
// holder commitment transactions or an unrevoked counterparty commitment
// transaction. This implies we either learned a preimage, the HTLC
// has timed out, or we screwed up. In any case, we should now
// resolve the source HTLC with the original sender.
payment_data = Some(((*source).clone(), htlc_output.payment_hash, htlc_output.amount_msat));
} else if !$holder_tx {
if let Some(current_counterparty_commitment_txid) = &$funding_spent.current_counterparty_commitment_txid {
check_htlc_valid_counterparty!(htlc_output, $funding_spent.counterparty_claimable_outpoints.get(current_counterparty_commitment_txid).unwrap());
}
if payment_data.is_none() {
if let Some(prev_counterparty_commitment_txid) = &$funding_spent.prev_counterparty_commitment_txid {
check_htlc_valid_counterparty!(htlc_output, $funding_spent.counterparty_claimable_outpoints.get(prev_counterparty_commitment_txid).unwrap());
}
}
}
if payment_data.is_none() {
log_claim!($tx_info, $holder_tx, htlc_output, false);
let outbound_htlc = $holder_tx == htlc_output.offered;
self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry {
txid: tx.compute_txid(), height, block_hash: Some(*block_hash), transaction: Some(tx.clone()),
event: OnchainEvent::HTLCSpendConfirmation {
commitment_tx_output_idx: input.previous_output.vout,
preimage: if accepted_preimage_claim || offered_preimage_claim {
Some(payment_preimage) } else { None },
// If this is a payment to us (ie !outbound_htlc), wait for
// the CSV delay before dropping the HTLC from claimable
// balance if the claim was an HTLC-Success transaction (ie
// accepted_preimage_claim).
on_to_local_output_csv: if accepted_preimage_claim && !outbound_htlc {
Some(self.on_holder_tx_csv) } else { None },
},
});
continue 'outer_loop;
}
}
}
}
}
if input.previous_output.txid == funding_spent.current_holder_commitment_tx.trust().txid() {
scan_commitment!(
funding_spent, holder_commitment_htlcs!(self, CURRENT_WITH_SOURCES),
"our latest holder commitment tx", true
);
}
if let Some(prev_holder_commitment_tx) = funding_spent.prev_holder_commitment_tx.as_ref() {
if input.previous_output.txid == prev_holder_commitment_tx.trust().txid() {
scan_commitment!(
funding_spent, holder_commitment_htlcs!(self, PREV_WITH_SOURCES).unwrap(),
"our previous holder commitment tx", true
);
}
}
if let Some(ref htlc_outputs) = funding_spent.counterparty_claimable_outpoints.get(&input.previous_output.txid) {
let htlcs = htlc_outputs.iter()
.map(|&(ref a, ref b)| (a, b.as_ref().map(|boxed| &**boxed)));
scan_commitment!(funding_spent, htlcs, "counterparty commitment tx", false);
}
// Check that scan_commitment, above, decided there is some source worth relaying an
// HTLC resolution backwards to and figure out whether we learned a preimage from it.
if let Some((source, payment_hash, amount_msat)) = payment_data {
if accepted_preimage_claim {
if !self.pending_monitor_events.iter().any(
|update| if let &MonitorEvent::HTLCEvent(ref upd) = update { upd.source == source } else { false }) {
self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry {
txid: tx.compute_txid(),
height,
block_hash: Some(*block_hash),
transaction: Some(tx.clone()),
event: OnchainEvent::HTLCSpendConfirmation {
commitment_tx_output_idx: input.previous_output.vout,
preimage: Some(payment_preimage),
on_to_local_output_csv: None,
},
});
self.counterparty_fulfilled_htlcs.insert(SentHTLCId::from_source(&source), payment_preimage);
self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
source,
payment_preimage: Some(payment_preimage),
payment_hash,
htlc_value_satoshis: Some(amount_msat / 1000),
}));
}
} else if offered_preimage_claim {
if !self.pending_monitor_events.iter().any(
|update| if let &MonitorEvent::HTLCEvent(ref upd) = update {
upd.source == source
} else { false }) {
self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry {
txid: tx.compute_txid(),
transaction: Some(tx.clone()),
height,
block_hash: Some(*block_hash),
event: OnchainEvent::HTLCSpendConfirmation {
commitment_tx_output_idx: input.previous_output.vout,
preimage: Some(payment_preimage),
on_to_local_output_csv: None,
},
});
self.counterparty_fulfilled_htlcs.insert(SentHTLCId::from_source(&source), payment_preimage);
self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
source,
payment_preimage: Some(payment_preimage),
payment_hash,
htlc_value_satoshis: Some(amount_msat / 1000),
}));
}
} else {
self.onchain_events_awaiting_threshold_conf.retain(|ref entry| {
if entry.height != height { return true; }
match entry.event {
OnchainEvent::HTLCUpdate { source: ref htlc_source, .. } => {
*htlc_source != source
},
_ => true,
}
});
let entry = OnchainEventEntry {
txid: tx.compute_txid(),
transaction: Some(tx.clone()),
height,
block_hash: Some(*block_hash),
event: OnchainEvent::HTLCUpdate {
source,
payment_hash,
htlc_value_satoshis: Some(amount_msat / 1000),
commitment_tx_output_idx: Some(input.previous_output.vout),
},
};
log_info!(logger, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height {})", &payment_hash, entry.confirmation_threshold());
self.onchain_events_awaiting_threshold_conf.push(entry);
}
}
}
}
#[rustfmt::skip]
fn get_spendable_outputs(&self, funding_spent: &FundingScope, tx: &Transaction) -> Vec<SpendableOutputDescriptor> {
let mut spendable_outputs = Vec::new();
for (i, outp) in tx.output.iter().enumerate() {
if outp.script_pubkey == self.destination_script {
spendable_outputs.push(SpendableOutputDescriptor::StaticOutput {
outpoint: OutPoint { txid: tx.compute_txid(), index: i as u16 },
output: outp.clone(),
channel_keys_id: Some(self.channel_keys_id),
});
}
if let Some(ref broadcasted_holder_revokable_script) = self.broadcasted_holder_revokable_script {
if broadcasted_holder_revokable_script.0 == outp.script_pubkey {
spendable_outputs.push(SpendableOutputDescriptor::DelayedPaymentOutput(DelayedPaymentOutputDescriptor {
outpoint: OutPoint { txid: tx.compute_txid(), index: i as u16 },
per_commitment_point: broadcasted_holder_revokable_script.1,
to_self_delay: self.on_holder_tx_csv,
output: outp.clone(),
revocation_pubkey: broadcasted_holder_revokable_script.2,
channel_keys_id: self.channel_keys_id,
channel_value_satoshis: funding_spent.channel_parameters.channel_value_satoshis,
channel_transaction_parameters: Some(funding_spent.channel_parameters.clone()),
}));
}
}
if self.counterparty_payment_script == outp.script_pubkey {
spendable_outputs.push(SpendableOutputDescriptor::StaticPaymentOutput(StaticPaymentOutputDescriptor {
outpoint: OutPoint { txid: tx.compute_txid(), index: i as u16 },
output: outp.clone(),
channel_keys_id: self.channel_keys_id,
channel_value_satoshis: funding_spent.channel_parameters.channel_value_satoshis,
channel_transaction_parameters: Some(funding_spent.channel_parameters.clone()),
}));
}
if self.shutdown_script.as_ref() == Some(&outp.script_pubkey) {
spendable_outputs.push(SpendableOutputDescriptor::StaticOutput {
outpoint: OutPoint { txid: tx.compute_txid(), index: i as u16 },
output: outp.clone(),
channel_keys_id: Some(self.channel_keys_id),
});
}
}
spendable_outputs
}
/// Checks if the confirmed transaction is paying funds back to some address we can assume to
/// own.
#[rustfmt::skip]
fn check_tx_and_push_spendable_outputs<L: Deref>(
&mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithContext<L>,
) where L::Target: Logger {
let funding_spent = get_confirmed_funding_scope!(self);
for spendable_output in self.get_spendable_outputs(funding_spent, tx) {
let entry = OnchainEventEntry {
txid: tx.compute_txid(),
transaction: Some(tx.clone()),
height,
block_hash: Some(*block_hash),
event: OnchainEvent::MaturingOutput { descriptor: spendable_output.clone() },
};
log_info!(logger, "Received spendable output {}, spendable at height {}", log_spendable!(spendable_output), entry.confirmation_threshold());
self.onchain_events_awaiting_threshold_conf.push(entry);
}
}
fn channel_type_features(&self) -> &ChannelTypeFeatures {
&self.funding.channel_parameters.channel_type_features
}
}
impl<Signer: EcdsaChannelSigner, T: Deref, F: Deref, L: Deref> chain::Listen
for (ChannelMonitor<Signer>, T, F, L)
where
T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
self.0.block_connected(header, txdata, height, &*self.1, &*self.2, &self.3);
}
fn blocks_disconnected(&self, fork_point: BestBlock) {
self.0.blocks_disconnected(fork_point, &*self.1, &*self.2, &self.3);
}
}
impl<Signer: EcdsaChannelSigner, M, T: Deref, F: Deref, L: Deref> chain::Confirm for (M, T, F, L)
where
M: Deref<Target = ChannelMonitor<Signer>>,
T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) {
self.0.transactions_confirmed(header, txdata, height, &*self.1, &*self.2, &self.3);
}
fn transaction_unconfirmed(&self, txid: &Txid) {
self.0.transaction_unconfirmed(txid, &*self.1, &*self.2, &self.3);
}
fn best_block_updated(&self, header: &Header, height: u32) {
self.0.best_block_updated(header, height, &*self.1, &*self.2, &self.3);
}
fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option<BlockHash>)> {
self.0.get_relevant_txids()
}
}
const MAX_ALLOC_SIZE: usize = 64 * 1024;
impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP)>
for (BlockHash, ChannelMonitor<SP::EcdsaSigner>)
{
fn read<R: io::Read>(reader: &mut R, args: (&'a ES, &'b SP)) -> Result<Self, DecodeError> {
match <Option<Self>>::read(reader, args) {
Ok(Some(res)) => Ok(res),
Ok(None) => Err(DecodeError::UnknownRequiredFeature),
Err(e) => Err(e),
}
}
}
impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP)>
for Option<(BlockHash, ChannelMonitor<SP::EcdsaSigner>)>
{
#[rustfmt::skip]
fn read<R: io::Read>(reader: &mut R, args: (&'a ES, &'b SP)) -> Result<Self, DecodeError> {
macro_rules! unwrap_obj {
($key: expr) => {
match $key {
Ok(res) => res,
Err(_) => return Err(DecodeError::InvalidValue),
}
}
}
let (entropy_source, signer_provider) = args;
let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
let latest_update_id: u64 = Readable::read(reader)?;
let commitment_transaction_number_obscure_factor = <U48 as Readable>::read(reader)?.0;
let destination_script = Readable::read(reader)?;
let broadcasted_holder_revokable_script = match <u8 as Readable>::read(reader)? {
0 => {
let revokable_address = Readable::read(reader)?;
let per_commitment_point = Readable::read(reader)?;
let revokable_script = Readable::read(reader)?;
Some((revokable_address, per_commitment_point, revokable_script))
},
1 => { None },
_ => return Err(DecodeError::InvalidValue),
};
let mut counterparty_payment_script: ScriptBuf = Readable::read(reader)?;
let shutdown_script = {
let script = <ScriptBuf as Readable>::read(reader)?;
if script.is_empty() { None } else { Some(script) }
};
let channel_keys_id = Readable::read(reader)?;
let holder_revocation_basepoint = Readable::read(reader)?;
// Technically this can fail and serialize fail a round-trip, but only for serialization of
// barely-init'd ChannelMonitors that we can't do anything with.
let outpoint = OutPoint {
txid: Readable::read(reader)?,
index: Readable::read(reader)?,
};
let _funding_script: ScriptBuf = Readable::read(reader)?;
let current_counterparty_commitment_txid = Readable::read(reader)?;
let prev_counterparty_commitment_txid = Readable::read(reader)?;
let counterparty_commitment_params = Readable::read(reader)?;
let _funding_redeemscript: ScriptBuf = Readable::read(reader)?;
let channel_value_satoshis = Readable::read(reader)?;
let their_cur_per_commitment_points = {
let first_idx = <U48 as Readable>::read(reader)?.0;
if first_idx == 0 {
None
} else {
let first_point = Readable::read(reader)?;
let second_point_slice: [u8; 33] = Readable::read(reader)?;
if second_point_slice[0..32] == [0; 32] && second_point_slice[32] == 0 {
Some((first_idx, first_point, None))
} else {
Some((first_idx, first_point, Some(unwrap_obj!(PublicKey::from_slice(&second_point_slice)))))
}
}
};
let on_holder_tx_csv: u16 = Readable::read(reader)?;
let commitment_secrets = Readable::read(reader)?;
macro_rules! read_htlc_in_commitment {
() => {
{
let offered: bool = Readable::read(reader)?;
let amount_msat: u64 = Readable::read(reader)?;
let cltv_expiry: u32 = Readable::read(reader)?;
let payment_hash: PaymentHash = Readable::read(reader)?;
let transaction_output_index: Option<u32> = Readable::read(reader)?;
HTLCOutputInCommitment {
offered, amount_msat, cltv_expiry, payment_hash, transaction_output_index
}
}
}
}
let counterparty_claimable_outpoints_len: u64 = Readable::read(reader)?;
let mut counterparty_claimable_outpoints = hash_map_with_capacity(cmp::min(counterparty_claimable_outpoints_len as usize, MAX_ALLOC_SIZE / 64));
for _ in 0..counterparty_claimable_outpoints_len {
let txid: Txid = Readable::read(reader)?;
let htlcs_count: u64 = Readable::read(reader)?;
let mut htlcs = Vec::with_capacity(cmp::min(htlcs_count as usize, MAX_ALLOC_SIZE / 32));
for _ in 0..htlcs_count {
htlcs.push((read_htlc_in_commitment!(), <Option<HTLCSource> as Readable>::read(reader)?.map(|o: HTLCSource| Box::new(o))));
}
if counterparty_claimable_outpoints.insert(txid, htlcs).is_some() {
return Err(DecodeError::InvalidValue);
}
}
let counterparty_commitment_txn_on_chain_len: u64 = Readable::read(reader)?;
let mut counterparty_commitment_txn_on_chain = hash_map_with_capacity(cmp::min(counterparty_commitment_txn_on_chain_len as usize, MAX_ALLOC_SIZE / 32));
for _ in 0..counterparty_commitment_txn_on_chain_len {
let txid: Txid = Readable::read(reader)?;
let commitment_number = <U48 as Readable>::read(reader)?.0;
if counterparty_commitment_txn_on_chain.insert(txid, commitment_number).is_some() {
return Err(DecodeError::InvalidValue);
}
}
let counterparty_hash_commitment_number_len: u64 = Readable::read(reader)?;
let mut counterparty_hash_commitment_number = hash_map_with_capacity(cmp::min(counterparty_hash_commitment_number_len as usize, MAX_ALLOC_SIZE / 32));
for _ in 0..counterparty_hash_commitment_number_len {
let payment_hash: PaymentHash = Readable::read(reader)?;
let commitment_number = <U48 as Readable>::read(reader)?.0;
if counterparty_hash_commitment_number.insert(payment_hash, commitment_number).is_some() {
return Err(DecodeError::InvalidValue);
}
}
let prev_holder_signed_tx: Option<HolderSignedTx> =
match <u8 as Readable>::read(reader)? {
0 => None,
1 => Some(Readable::read(reader)?),
_ => return Err(DecodeError::InvalidValue),
};
let current_holder_signed_tx: HolderSignedTx = Readable::read(reader)?;
let current_counterparty_commitment_number = <U48 as Readable>::read(reader)?.0;
let current_holder_commitment_number = <U48 as Readable>::read(reader)?.0;
let payment_preimages_len: u64 = Readable::read(reader)?;
let mut payment_preimages = hash_map_with_capacity(cmp::min(payment_preimages_len as usize, MAX_ALLOC_SIZE / 32));
for _ in 0..payment_preimages_len {
let preimage: PaymentPreimage = Readable::read(reader)?;
let hash = PaymentHash(Sha256::hash(&preimage.0[..]).to_byte_array());
if payment_preimages.insert(hash, (preimage, Vec::new())).is_some() {
return Err(DecodeError::InvalidValue);
}
}
let pending_monitor_events_len: u64 = Readable::read(reader)?;
let mut pending_monitor_events = Some(
Vec::with_capacity(cmp::min(pending_monitor_events_len as usize, MAX_ALLOC_SIZE / (32 + 8*3))));
for _ in 0..pending_monitor_events_len {
let ev = match <u8 as Readable>::read(reader)? {
0 => MonitorEvent::HTLCEvent(Readable::read(reader)?),
1 => MonitorEvent::HolderForceClosed(outpoint),
_ => return Err(DecodeError::InvalidValue)
};
pending_monitor_events.as_mut().unwrap().push(ev);
}
let pending_events_len: u64 = Readable::read(reader)?;
let mut pending_events = Vec::with_capacity(cmp::min(pending_events_len as usize, MAX_ALLOC_SIZE / mem::size_of::<Event>()));
for _ in 0..pending_events_len {
if let Some(event) = MaybeReadable::read(reader)? {
pending_events.push(event);
}
}
let best_block = BestBlock::new(Readable::read(reader)?, Readable::read(reader)?);
let waiting_threshold_conf_len: u64 = Readable::read(reader)?;
let mut onchain_events_awaiting_threshold_conf = Vec::with_capacity(cmp::min(waiting_threshold_conf_len as usize, MAX_ALLOC_SIZE / 128));
for _ in 0..waiting_threshold_conf_len {
if let Some(val) = MaybeReadable::read(reader)? {
onchain_events_awaiting_threshold_conf.push(val);
}
}
let outputs_to_watch_len: u64 = Readable::read(reader)?;
let mut outputs_to_watch = hash_map_with_capacity(cmp::min(outputs_to_watch_len as usize, MAX_ALLOC_SIZE / (mem::size_of::<Txid>() + mem::size_of::<u32>() + mem::size_of::<Vec<ScriptBuf>>())));
for _ in 0..outputs_to_watch_len {
let txid = Readable::read(reader)?;
let outputs_len: u64 = Readable::read(reader)?;
let mut outputs = Vec::with_capacity(cmp::min(outputs_len as usize, MAX_ALLOC_SIZE / (mem::size_of::<u32>() + mem::size_of::<ScriptBuf>())));
for _ in 0..outputs_len {
outputs.push((Readable::read(reader)?, Readable::read(reader)?));
}
if outputs_to_watch.insert(txid, outputs).is_some() {
return Err(DecodeError::InvalidValue);
}
}
let onchain_tx_handler: OnchainTxHandler<SP::EcdsaSigner> = ReadableArgs::read(
reader, (entropy_source, signer_provider, channel_value_satoshis, channel_keys_id)
)?;
let lockdown_from_offchain = Readable::read(reader)?;
let holder_tx_signed = Readable::read(reader)?;
let mut funding_spend_confirmed = None;
let mut htlcs_resolved_on_chain = Some(Vec::new());
let mut htlcs_resolved_to_user = Some(new_hash_set());
let mut funding_spend_seen = Some(false);
let mut counterparty_node_id = None;
let mut confirmed_commitment_tx_counterparty_output = None;
let mut spendable_txids_confirmed = Some(Vec::new());
let mut counterparty_fulfilled_htlcs = Some(new_hash_map());
let mut initial_counterparty_commitment_info = None;
let mut initial_counterparty_commitment_tx = None;
let mut balances_empty_height = None;
let mut channel_id = None;
let mut holder_pays_commitment_tx_fee = None;
let mut payment_preimages_with_info: Option<HashMap<_, _>> = None;
let mut first_negotiated_funding_txo = RequiredWrapper(None);
let mut channel_parameters = None;
let mut pending_funding = None;
let mut alternative_funding_confirmed = None;
let mut is_manual_broadcast = RequiredWrapper(None);
let mut funding_seen_onchain = RequiredWrapper(None);
read_tlv_fields!(reader, {
(1, funding_spend_confirmed, option),
(3, htlcs_resolved_on_chain, optional_vec),
(5, pending_monitor_events, optional_vec),
(7, funding_spend_seen, option),
(9, counterparty_node_id, option),
(11, confirmed_commitment_tx_counterparty_output, option),
(13, spendable_txids_confirmed, optional_vec),
(15, counterparty_fulfilled_htlcs, option),
(17, initial_counterparty_commitment_info, option),
(19, channel_id, option),
(21, balances_empty_height, option),
(23, holder_pays_commitment_tx_fee, option),
(25, payment_preimages_with_info, option),
(27, first_negotiated_funding_txo, (default_value, outpoint)),
(29, initial_counterparty_commitment_tx, option),
(31, channel_parameters, (option: ReadableArgs, None)),
(32, pending_funding, optional_vec),
(33, htlcs_resolved_to_user, option),
(34, alternative_funding_confirmed, option),
(35, is_manual_broadcast, (default_value, false)),
(37, funding_seen_onchain, (default_value, true)),
});
// Note that `payment_preimages_with_info` was added (and is always written) in LDK 0.1, so
// we can use it to determine if this monitor was last written by LDK 0.1 or later.
let written_by_0_1_or_later = payment_preimages_with_info.is_some();
if let Some(payment_preimages_with_info) = payment_preimages_with_info {
if payment_preimages_with_info.len() != payment_preimages.len() {
return Err(DecodeError::InvalidValue);
}
for (payment_hash, (payment_preimage, _)) in payment_preimages.iter() {
// Note that because `payment_preimages` is built back from preimages directly,
// checking that the two maps have the same hash -> preimage pairs also checks that
// the payment hashes in `payment_preimages_with_info`'s preimages match its
// hashes.
let new_preimage = payment_preimages_with_info.get(payment_hash).map(|(p, _)| p);
if new_preimage != Some(payment_preimage) {
return Err(DecodeError::InvalidValue);
}
}
payment_preimages = payment_preimages_with_info;
}
// `HolderForceClosedWithInfo` replaced `HolderForceClosed` in v0.0.122. If we have both
// events, we can remove the `HolderForceClosed` event and just keep the `HolderForceClosedWithInfo`.
if let Some(ref mut pending_monitor_events) = pending_monitor_events {
if pending_monitor_events.iter().any(|e| matches!(e, MonitorEvent::HolderForceClosed(_))) &&
pending_monitor_events.iter().any(|e| matches!(e, MonitorEvent::HolderForceClosedWithInfo { .. }))
{
pending_monitor_events.retain(|e| !matches!(e, MonitorEvent::HolderForceClosed(_)));
}
}
let channel_parameters = channel_parameters.unwrap_or_else(|| {
onchain_tx_handler.channel_parameters().clone()
});
// Monitors for anchor outputs channels opened in v0.0.116 suffered from a bug in which the
// wrong `counterparty_payment_script` was being tracked. Fix it now on deserialization to
// give them a chance to recognize the spendable output.
if channel_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() &&
counterparty_payment_script.is_p2wpkh()
{
let payment_point = channel_parameters.holder_pubkeys.payment_point;
counterparty_payment_script =
chan_utils::get_to_countersigner_keyed_anchor_redeemscript(&payment_point).to_p2wsh();
}
let channel_id = channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(outpoint));
let (current_holder_commitment_tx, current_holder_htlc_data) = {
let holder_commitment_tx = onchain_tx_handler.current_holder_commitment_tx();
#[cfg(debug_assertions)]
let holder_signed_tx_copy = current_holder_signed_tx.clone();
let holder_commitment_htlc_data = CommitmentHTLCData::try_from(current_holder_signed_tx)
.map_err(|_| DecodeError::InvalidValue)?;
#[cfg(debug_assertions)] {
let mut stream = crate::util::ser::VecWriter(Vec::new());
write_legacy_holder_commitment_data(
&mut stream, &holder_commitment_tx, &holder_commitment_htlc_data
).map_err(|_| DecodeError::InvalidValue)?;
let mut cursor = crate::io::Cursor::new(stream.0);
if holder_signed_tx_copy != <HolderSignedTx as Readable>::read(&mut cursor)? {
return Err(DecodeError::InvalidValue);
}
}
(holder_commitment_tx.clone(), holder_commitment_htlc_data)
};
let (prev_holder_commitment_tx, prev_holder_htlc_data) =
if let Some(prev_holder_signed_tx) = prev_holder_signed_tx {
let holder_commitment_tx = onchain_tx_handler.prev_holder_commitment_tx();
if holder_commitment_tx.is_none() {
return Err(DecodeError::InvalidValue);
}
#[cfg(debug_assertions)]
let holder_signed_tx_copy = prev_holder_signed_tx.clone();
let holder_commitment_htlc_data = CommitmentHTLCData::try_from(prev_holder_signed_tx)
.map_err(|_| DecodeError::InvalidValue)?;
#[cfg(debug_assertions)] {
let mut stream = crate::util::ser::VecWriter(Vec::new());
write_legacy_holder_commitment_data(
&mut stream, &holder_commitment_tx.unwrap(), &holder_commitment_htlc_data
).map_err(|_| DecodeError::InvalidValue)?;
let mut cursor = crate::io::Cursor::new(stream.0);
if holder_signed_tx_copy != <HolderSignedTx as Readable>::read(&mut cursor)? {
return Err(DecodeError::InvalidValue);
}
}
(holder_commitment_tx.cloned(), Some(holder_commitment_htlc_data))
} else {
(None, None)
};
let dummy_node_id = PublicKey::from_slice(&[2; 33]).unwrap();
let monitor = ChannelMonitor::from_impl(ChannelMonitorImpl {
funding: FundingScope {
channel_parameters,
current_counterparty_commitment_txid,
prev_counterparty_commitment_txid,
counterparty_claimable_outpoints,
current_holder_commitment_tx,
prev_holder_commitment_tx,
},
pending_funding: pending_funding.unwrap_or(vec![]),
is_manual_broadcast: is_manual_broadcast.0.unwrap(),
// Older monitors prior to LDK 0.2 assume this is `true` when absent
// during upgrade so holder broadcasts aren't gated unexpectedly.
funding_seen_onchain: funding_seen_onchain.0.unwrap(),
latest_update_id,
commitment_transaction_number_obscure_factor,
destination_script,
broadcasted_holder_revokable_script,
counterparty_payment_script,
shutdown_script,
channel_keys_id,
holder_revocation_basepoint,
channel_id,
first_negotiated_funding_txo: first_negotiated_funding_txo.0.unwrap(),
counterparty_commitment_params,
their_cur_per_commitment_points,
on_holder_tx_csv,
commitment_secrets,
counterparty_commitment_txn_on_chain,
counterparty_hash_commitment_number,
counterparty_fulfilled_htlcs: counterparty_fulfilled_htlcs.unwrap(),
current_counterparty_commitment_number,
current_holder_commitment_number,
payment_preimages,
pending_monitor_events: pending_monitor_events.unwrap(),
pending_events,
is_processing_pending_events: false,
onchain_events_awaiting_threshold_conf,
outputs_to_watch,
onchain_tx_handler,
lockdown_from_offchain,
holder_tx_signed,
holder_pays_commitment_tx_fee,
funding_spend_seen: funding_spend_seen.unwrap(),
funding_spend_confirmed,
confirmed_commitment_tx_counterparty_output,
htlcs_resolved_on_chain: htlcs_resolved_on_chain.unwrap(),
htlcs_resolved_to_user: htlcs_resolved_to_user.unwrap(),
spendable_txids_confirmed: spendable_txids_confirmed.unwrap(),
best_block,
counterparty_node_id: counterparty_node_id.unwrap_or(dummy_node_id),
initial_counterparty_commitment_info,
initial_counterparty_commitment_tx,
balances_empty_height,
failed_back_htlc_ids: new_hash_set(),
current_holder_htlc_data,
prev_holder_htlc_data,
alternative_funding_confirmed,
written_by_0_1_or_later,
});
if counterparty_node_id.is_none() {
if (holder_tx_signed || lockdown_from_offchain) && monitor.get_claimable_balances().is_empty() {
// If the monitor is no longer readable, but it is a candidate for archiving,
// return Ok(None) to allow it to be skipped and not loaded.
return Ok(None);
} else {
panic!("Found monitor for channel {channel_id} with no updates since v0.0.118. \
These monitors are no longer supported. \
To continue, run a v0.1 release, send/route a payment over the channel or close it.");
}
}
Ok(Some((best_block.block_hash, monitor)))
}
}
#[cfg(test)]
mod tests {
use bitcoin::amount::Amount;
use bitcoin::hash_types::{BlockHash, Txid};
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hashes::Hash;
use bitcoin::hex::FromHex;
use bitcoin::locktime::absolute::LockTime;
use bitcoin::network::Network;
use bitcoin::opcodes;
use bitcoin::script::{Builder, ScriptBuf};
use bitcoin::secp256k1::Secp256k1;
use bitcoin::secp256k1::{PublicKey, SecretKey};
use bitcoin::sighash;
use bitcoin::sighash::EcdsaSighashType;
use bitcoin::transaction::OutPoint as BitcoinOutPoint;
use bitcoin::transaction::{Transaction, TxIn, TxOut, Version};
use bitcoin::{Sequence, Witness};
use crate::chain::chaininterface::LowerBoundedFeeEstimator;
use crate::events::ClosureReason;
use super::ChannelMonitorUpdateStep;
use crate::chain::channelmonitor::{ChannelMonitor, WithChannelMonitor};
use crate::chain::package::{
weight_offered_htlc, weight_received_htlc, weight_revoked_offered_htlc,
weight_revoked_received_htlc, WEIGHT_REVOKED_OUTPUT,
};
use crate::chain::transaction::OutPoint;
use crate::chain::{BestBlock, Confirm};
use crate::io;
use crate::ln::chan_utils::{
self, ChannelPublicKeys, ChannelTransactionParameters,
CounterpartyChannelTransactionParameters, HTLCOutputInCommitment,
HolderCommitmentTransaction,
};
use crate::ln::channel_keys::{
DelayedPaymentBasepoint, DelayedPaymentKey, HtlcBasepoint, RevocationBasepoint,
RevocationKey,
};
use crate::ln::channelmanager::{HTLCSource, PaymentId, RecipientOnionFields};
use crate::ln::functional_test_utils::*;
use crate::ln::script::ShutdownScript;
use crate::ln::types::ChannelId;
use crate::sign::{ChannelSigner, InMemorySigner};
use crate::sync::Arc;
use crate::types::features::ChannelTypeFeatures;
use crate::types::payment::{PaymentHash, PaymentPreimage};
use crate::util::logger::Logger;
use crate::util::ser::{ReadableArgs, Writeable};
use crate::util::test_utils::{TestBroadcaster, TestFeeEstimator, TestLogger};
use crate::{
check_added_monitors, check_spends, get_local_commitment_txn, get_monitor,
get_route_and_payment_hash,
};
#[allow(unused_imports)]
use crate::prelude::*;
use std::str::FromStr;
#[rustfmt::skip]
fn do_test_funding_spend_refuses_updates(use_local_txn: bool) {
// Previously, monitor updates were allowed freely even after a funding-spend transaction
// confirmed. This would allow a race condition where we could receive a payment (including
// the counterparty revoking their broadcasted state!) and accept it without recourse as
// long as the ChannelMonitor receives the block first, the full commitment update dance
// occurs after the block is connected, and before the ChannelManager receives the block.
// Obviously this is an incredibly contrived race given the counterparty would be risking
// their full channel balance for it, but its worth fixing nonetheless as it makes the
// potential ChannelMonitor states simpler to reason about.
//
// This test checks said behavior, as well as ensuring a ChannelMonitorUpdate with multiple
// updates is handled correctly in such conditions.
let chanmon_cfgs = create_chanmon_cfgs(3);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
let channel = create_announced_chan_between_nodes(&nodes, 0, 1);
create_announced_chan_between_nodes(&nodes, 1, 2);
// Rebalance somewhat
send_payment(&nodes[0], &[&nodes[1]], 10_000_000);
// First route two payments for testing at the end
let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000).0;
let payment_preimage_2 = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000).0;
let local_txn = get_local_commitment_txn!(nodes[1], channel.2);
assert_eq!(local_txn.len(), 1);
let remote_txn = get_local_commitment_txn!(nodes[0], channel.2);
assert_eq!(remote_txn.len(), 3); // Commitment and two HTLC-Timeouts
check_spends!(remote_txn[1], remote_txn[0]);
check_spends!(remote_txn[2], remote_txn[0]);
let broadcast_tx = if use_local_txn { &local_txn[0] } else { &remote_txn[0] };
// Connect a commitment transaction, but only to the ChainMonitor/ChannelMonitor. The
// channel is now closed, but the ChannelManager doesn't know that yet.
let new_header = create_dummy_header(nodes[0].best_block_info().0, 0);
let conf_height = nodes[0].best_block_info().1 + 1;
nodes[1].chain_monitor.chain_monitor.transactions_confirmed(&new_header,
&[(0, broadcast_tx)], conf_height);
let (_, pre_update_monitor) = <(BlockHash, ChannelMonitor<_>)>::read(
&mut io::Cursor::new(&get_monitor!(nodes[1], channel.2).encode()),
(&nodes[1].keys_manager.backing, &nodes[1].keys_manager.backing)).unwrap();
// If the ChannelManager tries to update the channel, however, the ChainMonitor will pass
// the update through to the ChannelMonitor which will refuse it (as the channel is closed).
let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 100_000);
nodes[1].node.send_payment_with_route(route, payment_hash,
RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
).unwrap();
check_added_monitors!(nodes[1], 1);
// Build a new ChannelMonitorUpdate which contains both the failing commitment tx update
// and provides the claim preimages for the two pending HTLCs. The first update generates
// an error, but the point of this test is to ensure the later updates are still applied.
let replay_update = {
let monitor_updates = nodes[1].chain_monitor.monitor_updates.lock().unwrap();
let mut replay_update = monitor_updates.get(&channel.2).unwrap().iter().next_back().unwrap().clone();
assert_eq!(replay_update.updates.len(), 1);
if let ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { .. } = replay_update.updates[0] {
} else { panic!(); }
replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage {
payment_preimage: payment_preimage_1, payment_info: None,
});
replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage {
payment_preimage: payment_preimage_2, payment_info: None,
});
replay_update
};
let broadcaster = TestBroadcaster::with_blocks(Arc::clone(&nodes[1].blocks));
assert!(
pre_update_monitor.update_monitor(&replay_update, &&broadcaster, &&chanmon_cfgs[1].fee_estimator, &nodes[1].logger)
.is_err());
// Even though we error'd on the first update, we should still have generated an HTLC claim
// transaction
let txn_broadcasted = broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert!(txn_broadcasted.len() >= 2);
let htlc_txn = txn_broadcasted.iter().filter(|tx| {
assert_eq!(tx.input.len(), 1);
tx.input[0].previous_output.txid == broadcast_tx.compute_txid()
}).collect::<Vec<_>>();
assert_eq!(htlc_txn.len(), 2);
check_spends!(htlc_txn[0], broadcast_tx);
check_spends!(htlc_txn[1], broadcast_tx);
check_closed_broadcast(&nodes[1], 1, true);
check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
check_added_monitors(&nodes[1], 1);
}
#[test]
fn test_funding_spend_refuses_updates() {
do_test_funding_spend_refuses_updates(true);
do_test_funding_spend_refuses_updates(false);
}
#[test]
#[rustfmt::skip]
fn test_prune_preimages() {
let secp_ctx = Secp256k1::new();
let logger = Arc::new(TestLogger::new());
let broadcaster = Arc::new(TestBroadcaster::new(Network::Testnet));
let fee_estimator = TestFeeEstimator::new(253);
let dummy_key = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let mut preimages = Vec::new();
{
for i in 0..20 {
let preimage = PaymentPreimage([i; 32]);
let hash = PaymentHash(Sha256::hash(&preimage.0[..]).to_byte_array());
preimages.push((preimage, hash));
}
}
let dummy_source = HTLCSource::dummy();
macro_rules! preimages_slice_to_htlcs {
($preimages_slice: expr) => {
{
let mut res = Vec::new();
for (idx, preimage) in $preimages_slice.iter().enumerate() {
res.push(HTLCOutputInCommitment {
offered: true,
amount_msat: 0,
cltv_expiry: 0,
payment_hash: preimage.1.clone(),
transaction_output_index: Some(idx as u32),
});
}
res
}
}
}
macro_rules! preimages_slice_to_htlc_outputs {
($preimages_slice: expr) => {
preimages_slice_to_htlcs!($preimages_slice).into_iter().map(|htlc| (htlc, None)).collect()
}
}
let dummy_sig = crate::crypto::utils::sign(&secp_ctx,
&bitcoin::secp256k1::Message::from_digest([42; 32]),
&SecretKey::from_slice(&[42; 32]).unwrap());
macro_rules! test_preimages_exist {
($preimages_slice: expr, $monitor: expr) => {
for preimage in $preimages_slice {
assert!($monitor.inner.lock().unwrap().payment_preimages.contains_key(&preimage.1));
}
}
}
let keys = InMemorySigner::new(
SecretKey::from_slice(&[41; 32]).unwrap(),
SecretKey::from_slice(&[41; 32]).unwrap(),
SecretKey::from_slice(&[41; 32]).unwrap(),
SecretKey::from_slice(&[41; 32]).unwrap(),
true,
SecretKey::from_slice(&[41; 32]).unwrap(),
SecretKey::from_slice(&[41; 32]).unwrap(),
[41; 32],
[0; 32],
[0; 32],
);
let counterparty_pubkeys = ChannelPublicKeys {
funding_pubkey: PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[44; 32]).unwrap()),
revocation_basepoint: RevocationBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap())),
payment_point: PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[46; 32]).unwrap()),
delayed_payment_basepoint: DelayedPaymentBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[47; 32]).unwrap())),
htlc_basepoint: HtlcBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[48; 32]).unwrap()))
};
let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::MAX };
let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
let channel_parameters = ChannelTransactionParameters {
holder_pubkeys: keys.pubkeys(&secp_ctx),
holder_selected_contest_delay: 66,
is_outbound_from_holder: true,
counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
pubkeys: counterparty_pubkeys,
selected_contest_delay: 67,
}),
funding_outpoint: Some(funding_outpoint),
splice_parent_funding_txid: None,
channel_type_features: ChannelTypeFeatures::only_static_remote_key(),
channel_value_satoshis: 0,
};
// Prune with one old state and a holder commitment tx holding a few overlaps with the
// old state.
let shutdown_pubkey = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let shutdown_script = ShutdownScript::new_p2wpkh_from_pubkey(shutdown_pubkey);
let best_block = BestBlock::from_network(Network::Testnet);
let monitor = ChannelMonitor::new(
Secp256k1::new(), keys, Some(shutdown_script.into_inner()), 0, &ScriptBuf::new(),
&channel_parameters, true, 0, HolderCommitmentTransaction::dummy(0, funding_outpoint, Vec::new()),
best_block, dummy_key, channel_id, false,
);
let nondust_htlcs = preimages_slice_to_htlcs!(preimages[0..10]);
let dummy_commitment_tx = HolderCommitmentTransaction::dummy(0, funding_outpoint, nondust_htlcs);
// These HTLCs now have their output indices assigned
let nondust_htlcs = dummy_commitment_tx.nondust_htlcs();
monitor.provide_latest_holder_commitment_tx(dummy_commitment_tx.clone(),
&nondust_htlcs.iter().map(|htlc| (htlc.clone(), Some(dummy_sig), Some(dummy_source.clone()))).collect::<Vec<_>>());
monitor.provide_latest_counterparty_commitment_tx(Txid::from_byte_array(Sha256::hash(b"1").to_byte_array()),
preimages_slice_to_htlc_outputs!(preimages[5..15]), 281474976710655, dummy_key);
monitor.provide_latest_counterparty_commitment_tx(Txid::from_byte_array(Sha256::hash(b"2").to_byte_array()),
preimages_slice_to_htlc_outputs!(preimages[15..20]), 281474976710654, dummy_key);
for &(ref preimage, ref hash) in preimages.iter() {
let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_estimator);
monitor.provide_payment_preimage_unsafe_legacy(
hash, preimage, &broadcaster, &bounded_fee_estimator, &logger
);
}
// Now provide a secret, pruning preimages 10-15
let mut secret = [0; 32];
secret[0..32].clone_from_slice(&<Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
monitor.provide_secret(281474976710655, secret.clone()).unwrap();
assert_eq!(monitor.inner.lock().unwrap().payment_preimages.len(), 15);
test_preimages_exist!(&preimages[0..10], monitor);
test_preimages_exist!(&preimages[15..20], monitor);
monitor.provide_latest_counterparty_commitment_tx(Txid::from_byte_array(Sha256::hash(b"3").to_byte_array()),
preimages_slice_to_htlc_outputs!(preimages[17..20]), 281474976710653, dummy_key);
// Now provide a further secret, pruning preimages 15-17
secret[0..32].clone_from_slice(&<Vec<u8>>::from_hex("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
monitor.provide_secret(281474976710654, secret.clone()).unwrap();
assert_eq!(monitor.inner.lock().unwrap().payment_preimages.len(), 13);
test_preimages_exist!(&preimages[0..10], monitor);
test_preimages_exist!(&preimages[17..20], monitor);
monitor.provide_latest_counterparty_commitment_tx(Txid::from_byte_array(Sha256::hash(b"4").to_byte_array()),
preimages_slice_to_htlc_outputs!(preimages[18..20]), 281474976710652, dummy_key);
// Now update holder commitment tx info, pruning only element 18 as we still care about the
// previous commitment tx's preimages too
let nondust_htlcs = preimages_slice_to_htlcs!(preimages[0..5]);
let dummy_commitment_tx = HolderCommitmentTransaction::dummy(0, funding_outpoint, nondust_htlcs);
// These HTLCs now have their output indices assigned
let nondust_htlcs = dummy_commitment_tx.nondust_htlcs();
monitor.provide_latest_holder_commitment_tx(dummy_commitment_tx.clone(),
&nondust_htlcs.iter().map(|htlc| (htlc.clone(), Some(dummy_sig), Some(dummy_source.clone()))).collect::<Vec<_>>());
secret[0..32].clone_from_slice(&<Vec<u8>>::from_hex("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
monitor.provide_secret(281474976710653, secret.clone()).unwrap();
assert_eq!(monitor.inner.lock().unwrap().payment_preimages.len(), 12);
test_preimages_exist!(&preimages[0..10], monitor);
test_preimages_exist!(&preimages[18..20], monitor);
// But if we do it again, we'll prune 5-10
let nondust_htlcs = preimages_slice_to_htlcs!(preimages[0..3]);
let dummy_commitment_tx = HolderCommitmentTransaction::dummy(0, funding_outpoint, nondust_htlcs);
// These HTLCs now have their output indices assigned
let nondust_htlcs = dummy_commitment_tx.nondust_htlcs();
monitor.provide_latest_holder_commitment_tx(dummy_commitment_tx.clone(),
&nondust_htlcs.iter().map(|htlc| (htlc.clone(), Some(dummy_sig), Some(dummy_source.clone()))).collect::<Vec<_>>());
secret[0..32].clone_from_slice(&<Vec<u8>>::from_hex("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
monitor.provide_secret(281474976710652, secret.clone()).unwrap();
assert_eq!(monitor.inner.lock().unwrap().payment_preimages.len(), 5);
test_preimages_exist!(&preimages[0..5], monitor);
}
#[test]
#[rustfmt::skip]
fn test_claim_txn_weight_computation() {
// We test Claim txn weight, knowing that we want expected weigth and
// not actual case to avoid sigs and time-lock delays hell variances.
let secp_ctx = Secp256k1::new();
let privkey = SecretKey::from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
let pubkey = PublicKey::from_secret_key(&secp_ctx, &privkey);
use crate::ln::channel_keys::{HtlcKey, HtlcBasepoint};
macro_rules! sign_input {
($sighash_parts: expr, $idx: expr, $amount: expr, $weight: expr, $sum_actual_sigs: expr, $opt_anchors: expr) => {
let htlc = HTLCOutputInCommitment {
offered: if *$weight == weight_revoked_offered_htlc($opt_anchors) || *$weight == weight_offered_htlc($opt_anchors) { true } else { false },
amount_msat: 0,
cltv_expiry: 2 << 16,
payment_hash: PaymentHash([1; 32]),
transaction_output_index: Some($idx as u32),
};
let redeem_script = if *$weight == WEIGHT_REVOKED_OUTPUT { chan_utils::get_revokeable_redeemscript(&RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(pubkey), &pubkey), 256, &DelayedPaymentKey::from_basepoint(&secp_ctx, &DelayedPaymentBasepoint::from(pubkey), &pubkey)) } else { chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, $opt_anchors, &HtlcKey::from_basepoint(&secp_ctx, &HtlcBasepoint::from(pubkey), &pubkey), &HtlcKey::from_basepoint(&secp_ctx, &HtlcBasepoint::from(pubkey), &pubkey), &RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(pubkey), &pubkey)) };
let sighash = hash_to_message!(&$sighash_parts.p2wsh_signature_hash($idx, &redeem_script, $amount, EcdsaSighashType::All).unwrap()[..]);
let sig = secp_ctx.sign_ecdsa(&sighash, &privkey);
let mut ser_sig = sig.serialize_der().to_vec();
ser_sig.push(EcdsaSighashType::All as u8);
$sum_actual_sigs += ser_sig.len() as u64;
let witness = $sighash_parts.witness_mut($idx).unwrap();
witness.push(ser_sig);
if *$weight == WEIGHT_REVOKED_OUTPUT {
witness.push(vec!(1));
} else if *$weight == weight_revoked_offered_htlc($opt_anchors) || *$weight == weight_revoked_received_htlc($opt_anchors) {
witness.push(pubkey.clone().serialize().to_vec());
} else if *$weight == weight_received_htlc($opt_anchors) {
witness.push(vec![0]);
} else {
witness.push(PaymentPreimage([1; 32]).0.to_vec());
}
witness.push(redeem_script.into_bytes());
let witness = witness.to_vec();
println!("witness[0] {}", witness[0].len());
println!("witness[1] {}", witness[1].len());
println!("witness[2] {}", witness[2].len());
}
}
let script_pubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script();
let txid = Txid::from_str("56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d").unwrap();
// Justice tx with 1 to_holder, 2 revoked offered HTLCs, 1 revoked received HTLCs
for channel_type_features in [ChannelTypeFeatures::only_static_remote_key(), ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies()].iter() {
let mut claim_tx = Transaction { version: Version(0), lock_time: LockTime::ZERO, input: Vec::new(), output: Vec::new() };
let mut sum_actual_sigs = 0;
for i in 0..4 {
claim_tx.input.push(TxIn {
previous_output: BitcoinOutPoint {
txid,
vout: i,
},
script_sig: ScriptBuf::new(),
sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
witness: Witness::new(),
});
}
claim_tx.output.push(TxOut {
script_pubkey: script_pubkey.clone(),
value: Amount::ZERO,
});
let base_weight = claim_tx.weight().to_wu();
let inputs_weight = [WEIGHT_REVOKED_OUTPUT, weight_revoked_offered_htlc(channel_type_features), weight_revoked_offered_htlc(channel_type_features), weight_revoked_received_htlc(channel_type_features)];
let mut inputs_total_weight = 2; // count segwit flags
{
let mut sighash_parts = sighash::SighashCache::new(&mut claim_tx);
for (idx, inp) in inputs_weight.iter().enumerate() {
sign_input!(sighash_parts, idx, Amount::ZERO, inp, sum_actual_sigs, channel_type_features);
inputs_total_weight += inp;
}
}
assert_eq!(base_weight + inputs_total_weight, claim_tx.weight().to_wu() + /* max_length_sig */ (73 * inputs_weight.len() as u64 - sum_actual_sigs));
}
// Claim tx with 1 offered HTLCs, 3 received HTLCs
for channel_type_features in [ChannelTypeFeatures::only_static_remote_key(), ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies()].iter() {
let mut claim_tx = Transaction { version: Version(0), lock_time: LockTime::ZERO, input: Vec::new(), output: Vec::new() };
let mut sum_actual_sigs = 0;
for i in 0..4 {
claim_tx.input.push(TxIn {
previous_output: BitcoinOutPoint {
txid,
vout: i,
},
script_sig: ScriptBuf::new(),
sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
witness: Witness::new(),
});
}
claim_tx.output.push(TxOut {
script_pubkey: script_pubkey.clone(),
value: Amount::ZERO,
});
let base_weight = claim_tx.weight().to_wu();
let inputs_weight = [weight_offered_htlc(channel_type_features), weight_received_htlc(channel_type_features), weight_received_htlc(channel_type_features), weight_received_htlc(channel_type_features)];
let mut inputs_total_weight = 2; // count segwit flags
{
let mut sighash_parts = sighash::SighashCache::new(&mut claim_tx);
for (idx, inp) in inputs_weight.iter().enumerate() {
sign_input!(sighash_parts, idx, Amount::ZERO, inp, sum_actual_sigs, channel_type_features);
inputs_total_weight += inp;
}
}
assert_eq!(base_weight + inputs_total_weight, claim_tx.weight().to_wu() + /* max_length_sig */ (73 * inputs_weight.len() as u64 - sum_actual_sigs));
}
// Justice tx with 1 revoked HTLC-Success tx output
for channel_type_features in [ChannelTypeFeatures::only_static_remote_key(), ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies()].iter() {
let mut claim_tx = Transaction { version: Version(0), lock_time: LockTime::ZERO, input: Vec::new(), output: Vec::new() };
let mut sum_actual_sigs = 0;
claim_tx.input.push(TxIn {
previous_output: BitcoinOutPoint {
txid,
vout: 0,
},
script_sig: ScriptBuf::new(),
sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
witness: Witness::new(),
});
claim_tx.output.push(TxOut {
script_pubkey: script_pubkey.clone(),
value: Amount::ZERO,
});
let base_weight = claim_tx.weight().to_wu();
let inputs_weight = [WEIGHT_REVOKED_OUTPUT];
let mut inputs_total_weight = 2; // count segwit flags
{
let mut sighash_parts = sighash::SighashCache::new(&mut claim_tx);
for (idx, inp) in inputs_weight.iter().enumerate() {
sign_input!(sighash_parts, idx, Amount::ZERO, inp, sum_actual_sigs, channel_type_features);
inputs_total_weight += inp;
}
}
assert_eq!(base_weight + inputs_total_weight, claim_tx.weight().to_wu() + /* max_length_isg */ (73 * inputs_weight.len() as u64 - sum_actual_sigs));
}
}
#[test]
#[rustfmt::skip]
fn test_with_channel_monitor_impl_logger() {
let secp_ctx = Secp256k1::new();
let logger = Arc::new(TestLogger::new());
let dummy_key = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let keys = InMemorySigner::new(
SecretKey::from_slice(&[41; 32]).unwrap(),
SecretKey::from_slice(&[41; 32]).unwrap(),
SecretKey::from_slice(&[41; 32]).unwrap(),
SecretKey::from_slice(&[41; 32]).unwrap(),
true,
SecretKey::from_slice(&[41; 32]).unwrap(),
SecretKey::from_slice(&[41; 32]).unwrap(),
[41; 32],
[0; 32],
[0; 32],
);
let counterparty_pubkeys = ChannelPublicKeys {
funding_pubkey: PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[44; 32]).unwrap()),
revocation_basepoint: RevocationBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap())),
payment_point: PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[46; 32]).unwrap()),
delayed_payment_basepoint: DelayedPaymentBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[47; 32]).unwrap())),
htlc_basepoint: HtlcBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[48; 32]).unwrap())),
};
let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::MAX };
let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
let channel_parameters = ChannelTransactionParameters {
holder_pubkeys: keys.pubkeys(&secp_ctx),
holder_selected_contest_delay: 66,
is_outbound_from_holder: true,
counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
pubkeys: counterparty_pubkeys,
selected_contest_delay: 67,
}),
funding_outpoint: Some(funding_outpoint),
splice_parent_funding_txid: None,
channel_type_features: ChannelTypeFeatures::only_static_remote_key(),
channel_value_satoshis: 0,
};
let shutdown_pubkey = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let shutdown_script = ShutdownScript::new_p2wpkh_from_pubkey(shutdown_pubkey);
let best_block = BestBlock::from_network(Network::Testnet);
let monitor = ChannelMonitor::new(
Secp256k1::new(), keys, Some(shutdown_script.into_inner()), 0, &ScriptBuf::new(),
&channel_parameters, true, 0, HolderCommitmentTransaction::dummy(0, funding_outpoint, Vec::new()),
best_block, dummy_key, channel_id, false,
);
let chan_id = monitor.inner.lock().unwrap().channel_id();
let payment_hash = PaymentHash([1; 32]);
let context_logger = WithChannelMonitor::from(&logger, &monitor, Some(payment_hash));
log_error!(context_logger, "This is an error");
log_warn!(context_logger, "This is an error");
log_debug!(context_logger, "This is an error");
log_trace!(context_logger, "This is an error");
log_gossip!(context_logger, "This is an error");
log_info!(context_logger, "This is an error");
logger.assert_log_context_contains("lightning::chain::channelmonitor::tests", Some(dummy_key), Some(chan_id), 6);
}
// Further testing is done in the ChannelManager integration tests.
}