lightning/ln/
channelmanager.rs

1// This file is Copyright its original authors, visible in version control
2// history.
3//
4// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7// You may not use this file except in accordance with one or both of these
8// licenses.
9
10//! The top-level channel management and payment tracking stuff lives here.
11//!
12//! The [`ChannelManager`] is the main chunk of logic implementing the lightning protocol and is
13//! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those
14//! upon reconnect to the relevant peer(s).
15//!
16//! It does not manage routing logic (see [`Router`] for that) nor does it manage constructing
17//! on-chain transactions (it only monitors the chain to watch for any force-closes that might
18//! imply it needs to fail HTLCs/payments/channels it manages).
19
20use bitcoin::block::Header;
21use bitcoin::transaction::{Transaction, TxIn};
22use bitcoin::constants::ChainHash;
23use bitcoin::key::constants::SECRET_KEY_SIZE;
24use bitcoin::network::Network;
25
26use bitcoin::hashes::{Hash, HashEngine, HmacEngine};
27use bitcoin::hashes::hmac::Hmac;
28use bitcoin::hashes::sha256::Hash as Sha256;
29use bitcoin::hash_types::{BlockHash, Txid};
30
31use bitcoin::secp256k1::{SecretKey,PublicKey};
32use bitcoin::secp256k1::Secp256k1;
33use bitcoin::{secp256k1, Sequence, Weight};
34
35use crate::events::FundingInfo;
36use crate::blinded_path::message::{AsyncPaymentsContext, MessageContext, OffersContext};
37use crate::blinded_path::NodeIdLookUp;
38use crate::blinded_path::message::{BlindedMessagePath, MessageForwardNode};
39use crate::blinded_path::payment::{BlindedPaymentPath, Bolt12OfferContext, Bolt12RefundContext, PaymentConstraints, PaymentContext, UnauthenticatedReceiveTlvs};
40use crate::chain;
41use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
42use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
43use crate::chain::channelmonitor::{Balance, ChannelMonitor, ChannelMonitorUpdate, WithChannelMonitor, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent};
44use crate::chain::transaction::{OutPoint, TransactionData};
45use crate::events::{self, Event, EventHandler, EventsProvider, InboundChannelFunds, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination, PaymentFailureReason, ReplayEvent};
46// Since this struct is returned in `list_channels` methods, expose it here in case users want to
47// construct one themselves.
48use crate::ln::inbound_payment;
49use crate::ln::types::ChannelId;
50use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret};
51use crate::ln::channel::{self, Channel, ChannelPhase, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext, InteractivelyFunded as _};
52#[cfg(any(dual_funding, splicing))]
53use crate::ln::channel::InboundV2Channel;
54use crate::ln::channel_state::ChannelDetails;
55use crate::types::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
56#[cfg(any(feature = "_test_utils", test))]
57use crate::types::features::Bolt11InvoiceFeatures;
58use crate::routing::router::{BlindedTail, FixedRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router};
59use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundHTLCErr, NextPacketDetails};
60use crate::ln::msgs;
61use crate::ln::onion_utils;
62use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING};
63use crate::ln::msgs::{ChannelMessageHandler, CommitmentUpdate, DecodeError, LightningError};
64#[cfg(test)]
65use crate::ln::outbound_payment;
66use crate::ln::outbound_payment::{OutboundPayments, PendingOutboundPayment, RetryableInvoiceRequest, SendAlongPathArgs, StaleExpiration};
67use crate::offers::invoice::{Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, ExplicitSigningPubkey, InvoiceBuilder, UnsignedBolt12Invoice};
68use crate::offers::invoice_error::InvoiceError;
69use crate::offers::invoice_request::{InvoiceRequest, InvoiceRequestBuilder};
70use crate::offers::nonce::Nonce;
71use crate::offers::offer::{Offer, OfferBuilder};
72use crate::offers::parse::Bolt12SemanticError;
73use crate::offers::refund::{Refund, RefundBuilder};
74use crate::offers::signer;
75#[cfg(async_payments)]
76use crate::offers::static_invoice::StaticInvoice;
77use crate::onion_message::async_payments::{AsyncPaymentsMessage, HeldHtlcAvailable, ReleaseHeldHtlc, AsyncPaymentsMessageHandler};
78use crate::onion_message::dns_resolution::HumanReadableName;
79use crate::onion_message::messenger::{Destination, MessageRouter, Responder, ResponseInstruction, MessageSendInstructions};
80use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
81use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider};
82use crate::sign::ecdsa::EcdsaChannelSigner;
83use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
84use crate::util::wakers::{Future, Notifier};
85use crate::util::scid_utils::fake_scid;
86use crate::util::string::UntrustedString;
87use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
88use crate::util::ser::TransactionU16LenLimited;
89use crate::util::logger::{Level, Logger, WithContext};
90use crate::util::errors::APIError;
91
92#[cfg(feature = "dnssec")]
93use crate::blinded_path::message::DNSResolverContext;
94#[cfg(feature = "dnssec")]
95use crate::onion_message::dns_resolution::{DNSResolverMessage, DNSResolverMessageHandler, DNSSECQuery, DNSSECProof, OMNameResolver};
96
97#[cfg(not(c_bindings))]
98use {
99	crate::offers::offer::DerivedMetadata,
100	crate::onion_message::messenger::DefaultMessageRouter,
101	crate::routing::router::DefaultRouter,
102	crate::routing::gossip::NetworkGraph,
103	crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters},
104	crate::sign::KeysManager,
105};
106#[cfg(c_bindings)]
107use {
108	crate::offers::offer::OfferWithDerivedMetadataBuilder,
109	crate::offers::refund::RefundMaybeWithDerivedMetadataBuilder,
110};
111
112use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, CreationError, Currency, Description, InvoiceBuilder as Bolt11InvoiceBuilder, SignOrCreationError, DEFAULT_EXPIRY_TIME};
113
114use alloc::collections::{btree_map, BTreeMap};
115
116use crate::io;
117use crate::prelude::*;
118use core::{cmp, mem};
119use core::borrow::Borrow;
120use core::cell::RefCell;
121use crate::io::Read;
122use crate::sync::{Arc, Mutex, RwLock, RwLockReadGuard, FairRwLock, LockTestExt, LockHeldState};
123use core::sync::atomic::{AtomicUsize, AtomicBool, Ordering};
124use core::time::Duration;
125use core::ops::Deref;
126use bitcoin::hex::impl_fmt_traits;
127// Re-export this for use in the public API.
128pub use crate::ln::outbound_payment::{Bolt12PaymentError, ProbeSendFailure, Retry, RetryableSendFailure, RecipientOnionFields};
129#[cfg(test)]
130pub(crate) use crate::ln::outbound_payment::PaymentSendFailure;
131use crate::ln::script::ShutdownScript;
132
133// We hold various information about HTLC relay in the HTLC objects in Channel itself:
134//
135// Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should
136// forward the HTLC with information it will give back to us when it does so, or if it should Fail
137// the HTLC with the relevant message for the Channel to handle giving to the remote peer.
138//
139// Once said HTLC is committed in the Channel, if the PendingHTLCStatus indicated Forward, the
140// Channel will return the PendingHTLCInfo back to us, and we will create an HTLCForwardInfo
141// with it to track where it came from (in case of onwards-forward error), waiting a random delay
142// before we forward it.
143//
144// We will then use HTLCForwardInfo's PendingHTLCInfo to construct an outbound HTLC, with a
145// relevant HTLCSource::PreviousHopData filled in to indicate where it came from (which we can use
146// to either fail-backwards or fulfill the HTLC backwards along the relevant path).
147// Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is
148// our payment, which we can use to decode errors or inform the user that the payment was sent.
149
150/// Information about where a received HTLC('s onion) has indicated the HTLC should go.
151#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
152#[cfg_attr(test, derive(Debug, PartialEq))]
153pub enum PendingHTLCRouting {
154	/// An HTLC which should be forwarded on to another node.
155	Forward {
156		/// The onion which should be included in the forwarded HTLC, telling the next hop what to
157		/// do with the HTLC.
158		onion_packet: msgs::OnionPacket,
159		/// The short channel ID of the channel which we were instructed to forward this HTLC to.
160		///
161		/// This could be a real on-chain SCID, an SCID alias, or some other SCID which has meaning
162		/// to the receiving node, such as one returned from
163		/// [`ChannelManager::get_intercept_scid`] or [`ChannelManager::get_phantom_scid`].
164		short_channel_id: u64, // This should be NonZero<u64> eventually when we bump MSRV
165		/// Set if this HTLC is being forwarded within a blinded path.
166		blinded: Option<BlindedForward>,
167		/// The absolute CLTV of the inbound HTLC
168		incoming_cltv_expiry: Option<u32>,
169	},
170	/// The onion indicates that this is a payment for an invoice (supposedly) generated by us.
171	///
172	/// Note that at this point, we have not checked that the invoice being paid was actually
173	/// generated by us, but rather it's claiming to pay an invoice of ours.
174	Receive {
175		/// Information about the amount the sender intended to pay and (potential) proof that this
176		/// is a payment for an invoice we generated. This proof of payment is is also used for
177		/// linking MPP parts of a larger payment.
178		payment_data: msgs::FinalOnionHopData,
179		/// Additional data which we (allegedly) instructed the sender to include in the onion.
180		///
181		/// For HTLCs received by LDK, this will ultimately be exposed in
182		/// [`Event::PaymentClaimable::onion_fields`] as
183		/// [`RecipientOnionFields::payment_metadata`].
184		payment_metadata: Option<Vec<u8>>,
185		/// The context of the payment included by the recipient in a blinded path, or `None` if a
186		/// blinded path was not used.
187		///
188		/// Used in part to determine the [`events::PaymentPurpose`].
189		payment_context: Option<PaymentContext>,
190		/// CLTV expiry of the received HTLC.
191		///
192		/// Used to track when we should expire pending HTLCs that go unclaimed.
193		incoming_cltv_expiry: u32,
194		/// If the onion had forwarding instructions to one of our phantom node SCIDs, this will
195		/// provide the onion shared secret used to decrypt the next level of forwarding
196		/// instructions.
197		phantom_shared_secret: Option<[u8; 32]>,
198		/// Custom TLVs which were set by the sender.
199		///
200		/// For HTLCs received by LDK, this will ultimately be exposed in
201		/// [`Event::PaymentClaimable::onion_fields`] as
202		/// [`RecipientOnionFields::custom_tlvs`].
203		custom_tlvs: Vec<(u64, Vec<u8>)>,
204		/// Set if this HTLC is the final hop in a multi-hop blinded path.
205		requires_blinded_error: bool,
206	},
207	/// The onion indicates that this is for payment to us but which contains the preimage for
208	/// claiming included, and is unrelated to any invoice we'd previously generated (aka a
209	/// "keysend" or "spontaneous" payment).
210	ReceiveKeysend {
211		/// Information about the amount the sender intended to pay and possibly a token to
212		/// associate MPP parts of a larger payment.
213		///
214		/// This will only be filled in if receiving MPP keysend payments is enabled, and it being
215		/// present will cause deserialization to fail on versions of LDK prior to 0.0.116.
216		payment_data: Option<msgs::FinalOnionHopData>,
217		/// Preimage for this onion payment. This preimage is provided by the sender and will be
218		/// used to settle the spontaneous payment.
219		payment_preimage: PaymentPreimage,
220		/// Additional data which we (allegedly) instructed the sender to include in the onion.
221		///
222		/// For HTLCs received by LDK, this will ultimately bubble back up as
223		/// [`RecipientOnionFields::payment_metadata`].
224		payment_metadata: Option<Vec<u8>>,
225		/// CLTV expiry of the received HTLC.
226		///
227		/// Used to track when we should expire pending HTLCs that go unclaimed.
228		incoming_cltv_expiry: u32,
229		/// Custom TLVs which were set by the sender.
230		///
231		/// For HTLCs received by LDK, these will ultimately bubble back up as
232		/// [`RecipientOnionFields::custom_tlvs`].
233		custom_tlvs: Vec<(u64, Vec<u8>)>,
234		/// Set if this HTLC is the final hop in a multi-hop blinded path.
235		requires_blinded_error: bool,
236		/// Set if we are receiving a keysend to a blinded path, meaning we created the
237		/// [`PaymentSecret`] and should verify it using our
238		/// [`NodeSigner::get_inbound_payment_key`].
239		has_recipient_created_payment_secret: bool,
240	},
241}
242
243/// Information used to forward or fail this HTLC that is being forwarded within a blinded path.
244#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
245pub struct BlindedForward {
246	/// The `blinding_point` that was set in the inbound [`msgs::UpdateAddHTLC`], or in the inbound
247	/// onion payload if we're the introduction node. Useful for calculating the next hop's
248	/// [`msgs::UpdateAddHTLC::blinding_point`].
249	pub inbound_blinding_point: PublicKey,
250	/// If needed, this determines how this HTLC should be failed backwards, based on whether we are
251	/// the introduction node.
252	pub failure: BlindedFailure,
253	/// Overrides the next hop's [`msgs::UpdateAddHTLC::blinding_point`]. Set if this HTLC is being
254	/// forwarded within a [`BlindedPaymentPath`] that was concatenated to another blinded path that
255	/// starts at the next hop.
256	pub next_blinding_override: Option<PublicKey>,
257}
258
259impl PendingHTLCRouting {
260	// Used to override the onion failure code and data if the HTLC is blinded.
261	fn blinded_failure(&self) -> Option<BlindedFailure> {
262		match self {
263			Self::Forward { blinded: Some(BlindedForward { failure, .. }), .. } => Some(*failure),
264			Self::Receive { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
265			Self::ReceiveKeysend { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
266			_ => None,
267		}
268	}
269
270	fn incoming_cltv_expiry(&self) -> Option<u32> {
271		match self {
272			Self::Forward { incoming_cltv_expiry, .. } => *incoming_cltv_expiry,
273			Self::Receive { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry),
274			Self::ReceiveKeysend { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry),
275		}
276	}
277}
278
279/// Information about an incoming HTLC, including the [`PendingHTLCRouting`] describing where it
280/// should go next.
281#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
282#[cfg_attr(test, derive(Debug, PartialEq))]
283pub struct PendingHTLCInfo {
284	/// Further routing details based on whether the HTLC is being forwarded or received.
285	pub routing: PendingHTLCRouting,
286	/// The onion shared secret we build with the sender used to decrypt the onion.
287	///
288	/// This is later used to encrypt failure packets in the event that the HTLC is failed.
289	pub incoming_shared_secret: [u8; 32],
290	/// Hash of the payment preimage, to lock the payment until the receiver releases the preimage.
291	pub payment_hash: PaymentHash,
292	/// Amount received in the incoming HTLC.
293	///
294	/// This field was added in LDK 0.0.113 and will be `None` for objects written by prior
295	/// versions.
296	pub incoming_amt_msat: Option<u64>,
297	/// The amount the sender indicated should be forwarded on to the next hop or amount the sender
298	/// intended for us to receive for received payments.
299	///
300	/// If the received amount is less than this for received payments, an intermediary hop has
301	/// attempted to steal some of our funds and we should fail the HTLC (the sender should retry
302	/// it along another path).
303	///
304	/// Because nodes can take less than their required fees, and because senders may wish to
305	/// improve their own privacy, this amount may be less than [`Self::incoming_amt_msat`] for
306	/// received payments. In such cases, recipients must handle this HTLC as if it had received
307	/// [`Self::outgoing_amt_msat`].
308	pub outgoing_amt_msat: u64,
309	/// The CLTV the sender has indicated we should set on the forwarded HTLC (or has indicated
310	/// should have been set on the received HTLC for received payments).
311	pub outgoing_cltv_value: u32,
312	/// The fee taken for this HTLC in addition to the standard protocol HTLC fees.
313	///
314	/// If this is a payment for forwarding, this is the fee we are taking before forwarding the
315	/// HTLC.
316	///
317	/// If this is a received payment, this is the fee that our counterparty took.
318	///
319	/// This is used to allow LSPs to take fees as a part of payments, without the sender having to
320	/// shoulder them.
321	pub skimmed_fee_msat: Option<u64>,
322}
323
324#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
325pub(super) enum HTLCFailureMsg {
326	Relay(msgs::UpdateFailHTLC),
327	Malformed(msgs::UpdateFailMalformedHTLC),
328}
329
330/// Stores whether we can't forward an HTLC or relevant forwarding info
331#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
332pub(super) enum PendingHTLCStatus {
333	Forward(PendingHTLCInfo),
334	Fail(HTLCFailureMsg),
335}
336
337#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
338pub(super) struct PendingAddHTLCInfo {
339	pub(super) forward_info: PendingHTLCInfo,
340
341	// These fields are produced in `forward_htlcs()` and consumed in
342	// `process_pending_htlc_forwards()` for constructing the
343	// `HTLCSource::PreviousHopData` for failed and forwarded
344	// HTLCs.
345	//
346	// Note that this may be an outbound SCID alias for the associated channel.
347	prev_short_channel_id: u64,
348	prev_htlc_id: u64,
349	prev_counterparty_node_id: Option<PublicKey>,
350	prev_channel_id: ChannelId,
351	prev_funding_outpoint: OutPoint,
352	prev_user_channel_id: u128,
353}
354
355#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
356pub(super) enum HTLCForwardInfo {
357	AddHTLC(PendingAddHTLCInfo),
358	FailHTLC {
359		htlc_id: u64,
360		err_packet: msgs::OnionErrorPacket,
361	},
362	FailMalformedHTLC {
363		htlc_id: u64,
364		failure_code: u16,
365		sha256_of_onion: [u8; 32],
366	},
367}
368
369/// Whether this blinded HTLC is being failed backwards by the introduction node or a blinded node,
370/// which determines the failure message that should be used.
371#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
372pub enum BlindedFailure {
373	/// This HTLC is being failed backwards by the introduction node, and thus should be failed with
374	/// [`msgs::UpdateFailHTLC`] and error code `0x8000|0x4000|24`.
375	FromIntroductionNode,
376	/// This HTLC is being failed backwards by a blinded node within the path, and thus should be
377	/// failed with [`msgs::UpdateFailMalformedHTLC`] and error code `0x8000|0x4000|24`.
378	FromBlindedNode,
379}
380
381/// Tracks the inbound corresponding to an outbound HTLC
382#[derive(Clone, Debug, Hash, PartialEq, Eq)]
383pub(crate) struct HTLCPreviousHopData {
384	// Note that this may be an outbound SCID alias for the associated channel.
385	short_channel_id: u64,
386	user_channel_id: Option<u128>,
387	htlc_id: u64,
388	incoming_packet_shared_secret: [u8; 32],
389	phantom_shared_secret: Option<[u8; 32]>,
390	blinded_failure: Option<BlindedFailure>,
391	channel_id: ChannelId,
392
393	// These fields are consumed by `claim_funds_from_hop()` when updating a force-closed backwards
394	// channel with a preimage provided by the forward channel.
395	outpoint: OutPoint,
396	counterparty_node_id: Option<PublicKey>,
397	/// Used to preserve our backwards channel by failing back in case an HTLC claim in the forward
398	/// channel remains unconfirmed for too long.
399	cltv_expiry: Option<u32>,
400}
401
402#[derive(PartialEq, Eq)]
403enum OnionPayload {
404	/// Indicates this incoming onion payload is for the purpose of paying an invoice.
405	Invoice {
406		/// This is only here for backwards-compatibility in serialization, in the future it can be
407		/// removed, breaking clients running 0.0.106 and earlier.
408		_legacy_hop_data: Option<msgs::FinalOnionHopData>,
409	},
410	/// Contains the payer-provided preimage.
411	Spontaneous(PaymentPreimage),
412}
413
414/// HTLCs that are to us and can be failed/claimed by the user
415#[derive(PartialEq, Eq)]
416struct ClaimableHTLC {
417	prev_hop: HTLCPreviousHopData,
418	cltv_expiry: u32,
419	/// The amount (in msats) of this MPP part
420	value: u64,
421	/// The amount (in msats) that the sender intended to be sent in this MPP
422	/// part (used for validating total MPP amount)
423	sender_intended_value: u64,
424	onion_payload: OnionPayload,
425	timer_ticks: u8,
426	/// The total value received for a payment (sum of all MPP parts if the payment is a MPP).
427	/// Gets set to the amount reported when pushing [`Event::PaymentClaimable`].
428	total_value_received: Option<u64>,
429	/// The sender intended sum total of all MPP parts specified in the onion
430	total_msat: u64,
431	/// The extra fee our counterparty skimmed off the top of this HTLC.
432	counterparty_skimmed_fee_msat: Option<u64>,
433}
434
435impl From<&ClaimableHTLC> for events::ClaimedHTLC {
436	fn from(val: &ClaimableHTLC) -> Self {
437		events::ClaimedHTLC {
438			channel_id: val.prev_hop.channel_id,
439			user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0),
440			cltv_expiry: val.cltv_expiry,
441			value_msat: val.value,
442			counterparty_skimmed_fee_msat: val.counterparty_skimmed_fee_msat.unwrap_or(0),
443		}
444	}
445}
446
447impl PartialOrd for ClaimableHTLC {
448	fn partial_cmp(&self, other: &ClaimableHTLC) -> Option<cmp::Ordering> {
449		Some(self.cmp(other))
450	}
451}
452impl Ord for ClaimableHTLC {
453	fn cmp(&self, other: &ClaimableHTLC) -> cmp::Ordering {
454		let res = (self.prev_hop.channel_id, self.prev_hop.htlc_id).cmp(
455			&(other.prev_hop.channel_id, other.prev_hop.htlc_id)
456		);
457		if res.is_eq() {
458			debug_assert!(self == other, "ClaimableHTLCs from the same source should be identical");
459		}
460		res
461	}
462}
463
464/// A trait defining behavior for creating and verifing the HMAC for authenticating a given data.
465pub trait Verification {
466	/// Constructs an HMAC to include in [`OffersContext`] for the data along with the given
467	/// [`Nonce`].
468	fn hmac_for_offer_payment(
469		&self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
470	) -> Hmac<Sha256>;
471
472	/// Authenticates the data using an HMAC and a [`Nonce`] taken from an [`OffersContext`].
473	fn verify_for_offer_payment(
474		&self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
475	) -> Result<(), ()>;
476}
477
478impl Verification for PaymentHash {
479	/// Constructs an HMAC to include in [`OffersContext::InboundPayment`] for the payment hash
480	/// along with the given [`Nonce`].
481	fn hmac_for_offer_payment(
482		&self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
483	) -> Hmac<Sha256> {
484		signer::hmac_for_payment_hash(*self, nonce, expanded_key)
485	}
486
487	/// Authenticates the payment id using an HMAC and a [`Nonce`] taken from an
488	/// [`OffersContext::InboundPayment`].
489	fn verify_for_offer_payment(
490		&self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
491	) -> Result<(), ()> {
492		signer::verify_payment_hash(*self, hmac, nonce, expanded_key)
493	}
494}
495
496impl Verification for UnauthenticatedReceiveTlvs {
497	fn hmac_for_offer_payment(
498		&self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
499	) -> Hmac<Sha256> {
500		signer::hmac_for_payment_tlvs(self, nonce, expanded_key)
501	}
502
503	fn verify_for_offer_payment(
504		&self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
505	) -> Result<(), ()> {
506		signer::verify_payment_tlvs(self, hmac, nonce, expanded_key)
507	}
508}
509
510/// A user-provided identifier in [`ChannelManager::send_payment`] used to uniquely identify
511/// a payment and ensure idempotency in LDK.
512///
513/// This is not exported to bindings users as we just use [u8; 32] directly
514#[derive(Hash, Copy, Clone, PartialEq, Eq)]
515pub struct PaymentId(pub [u8; Self::LENGTH]);
516
517impl PaymentId {
518	/// Number of bytes in the id.
519	pub const LENGTH: usize = 32;
520
521	/// Constructs an HMAC to include in [`AsyncPaymentsContext::OutboundPayment`] for the payment id
522	/// along with the given [`Nonce`].
523	#[cfg(async_payments)]
524	pub fn hmac_for_async_payment(
525		&self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
526	) -> Hmac<Sha256> {
527		signer::hmac_for_async_payment_id(*self, nonce, expanded_key)
528	}
529
530	/// Authenticates the payment id using an HMAC and a [`Nonce`] taken from an
531	/// [`AsyncPaymentsContext::OutboundPayment`].
532	#[cfg(async_payments)]
533	pub fn verify_for_async_payment(
534		&self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
535	) -> Result<(), ()> {
536		signer::verify_async_payment_id(*self, hmac, nonce, expanded_key)
537	}
538}
539
540impl Verification for PaymentId {
541	/// Constructs an HMAC to include in [`OffersContext::OutboundPayment`] for the payment id
542	/// along with the given [`Nonce`].
543	fn hmac_for_offer_payment(
544		&self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
545	) -> Hmac<Sha256> {
546		signer::hmac_for_offer_payment_id(*self, nonce, expanded_key)
547	}
548
549	/// Authenticates the payment id using an HMAC and a [`Nonce`] taken from an
550	/// [`OffersContext::OutboundPayment`].
551	fn verify_for_offer_payment(
552		&self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
553	) -> Result<(), ()> {
554		signer::verify_offer_payment_id(*self, hmac, nonce, expanded_key)
555	}
556}
557
558impl PaymentId {
559	fn for_inbound_from_htlcs<I: Iterator<Item=(ChannelId, u64)>>(key: &[u8; 32], htlcs: I) -> PaymentId {
560		let mut prev_pair = None;
561		let mut hasher = HmacEngine::new(key);
562		for (channel_id, htlc_id) in htlcs {
563			hasher.input(&channel_id.0);
564			hasher.input(&htlc_id.to_le_bytes());
565			if let Some(prev) = prev_pair {
566				debug_assert!(prev < (channel_id, htlc_id), "HTLCs should be sorted");
567			}
568			prev_pair = Some((channel_id, htlc_id));
569		}
570		PaymentId(Hmac::<Sha256>::from_engine(hasher).to_byte_array())
571	}
572}
573
574impl Borrow<[u8]> for PaymentId {
575	fn borrow(&self) -> &[u8] {
576		&self.0[..]
577	}
578}
579
580impl_fmt_traits! {
581	impl fmt_traits for PaymentId {
582		const LENGTH: usize = 32;
583	}
584}
585
586impl Writeable for PaymentId {
587	fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
588		self.0.write(w)
589	}
590}
591
592impl Readable for PaymentId {
593	fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
594		let buf: [u8; 32] = Readable::read(r)?;
595		Ok(PaymentId(buf))
596	}
597}
598
599/// An identifier used to uniquely identify an intercepted HTLC to LDK.
600///
601/// This is not exported to bindings users as we just use [u8; 32] directly
602#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
603pub struct InterceptId(pub [u8; 32]);
604
605impl Writeable for InterceptId {
606	fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
607		self.0.write(w)
608	}
609}
610
611impl Readable for InterceptId {
612	fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
613		let buf: [u8; 32] = Readable::read(r)?;
614		Ok(InterceptId(buf))
615	}
616}
617
618#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
619/// Uniquely describes an HTLC by its source. Just the guaranteed-unique subset of [`HTLCSource`].
620pub(crate) enum SentHTLCId {
621	PreviousHopData { short_channel_id: u64, htlc_id: u64 },
622	OutboundRoute { session_priv: [u8; SECRET_KEY_SIZE] },
623}
624impl SentHTLCId {
625	pub(crate) fn from_source(source: &HTLCSource) -> Self {
626		match source {
627			HTLCSource::PreviousHopData(hop_data) => Self::PreviousHopData {
628				short_channel_id: hop_data.short_channel_id,
629				htlc_id: hop_data.htlc_id,
630			},
631			HTLCSource::OutboundRoute { session_priv, .. } =>
632				Self::OutboundRoute { session_priv: session_priv.secret_bytes() },
633		}
634	}
635}
636impl_writeable_tlv_based_enum!(SentHTLCId,
637	(0, PreviousHopData) => {
638		(0, short_channel_id, required),
639		(2, htlc_id, required),
640	},
641	(2, OutboundRoute) => {
642		(0, session_priv, required),
643	},
644);
645
646
647/// Tracks the inbound corresponding to an outbound HTLC
648#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
649#[derive(Clone, Debug, PartialEq, Eq)]
650pub(crate) enum HTLCSource {
651	PreviousHopData(HTLCPreviousHopData),
652	OutboundRoute {
653		path: Path,
654		session_priv: SecretKey,
655		/// Technically we can recalculate this from the route, but we cache it here to avoid
656		/// doing a double-pass on route when we get a failure back
657		first_hop_htlc_msat: u64,
658		payment_id: PaymentId,
659	},
660}
661#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
662impl core::hash::Hash for HTLCSource {
663	fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
664		match self {
665			HTLCSource::PreviousHopData(prev_hop_data) => {
666				0u8.hash(hasher);
667				prev_hop_data.hash(hasher);
668			},
669			HTLCSource::OutboundRoute { path, session_priv, payment_id, first_hop_htlc_msat } => {
670				1u8.hash(hasher);
671				path.hash(hasher);
672				session_priv[..].hash(hasher);
673				payment_id.hash(hasher);
674				first_hop_htlc_msat.hash(hasher);
675			},
676		}
677	}
678}
679impl HTLCSource {
680	#[cfg(all(ldk_test_vectors, test))]
681	pub fn dummy() -> Self {
682		assert!(cfg!(not(feature = "grind_signatures")));
683		HTLCSource::OutboundRoute {
684			path: Path { hops: Vec::new(), blinded_tail: None },
685			session_priv: SecretKey::from_slice(&[1; 32]).unwrap(),
686			first_hop_htlc_msat: 0,
687			payment_id: PaymentId([2; 32]),
688		}
689	}
690
691	#[cfg(debug_assertions)]
692	/// Checks whether this HTLCSource could possibly match the given HTLC output in a commitment
693	/// transaction. Useful to ensure different datastructures match up.
694	pub(crate) fn possibly_matches_output(&self, htlc: &super::chan_utils::HTLCOutputInCommitment) -> bool {
695		if let HTLCSource::OutboundRoute { first_hop_htlc_msat, .. } = self {
696			*first_hop_htlc_msat == htlc.amount_msat
697		} else {
698			// There's nothing we can check for forwarded HTLCs
699			true
700		}
701	}
702
703	/// Returns the CLTV expiry of the inbound HTLC (i.e. the source referred to by this object),
704	/// if the source was a forwarded HTLC and the HTLC was first forwarded on LDK 0.1.1 or later.
705	pub(crate) fn inbound_htlc_expiry(&self) -> Option<u32> {
706		match self {
707			Self::PreviousHopData(HTLCPreviousHopData { cltv_expiry, .. }) => *cltv_expiry,
708			_ => None,
709		}
710	}
711}
712
713/// This enum is used to specify which error data to send to peers when failing back an HTLC
714/// using [`ChannelManager::fail_htlc_backwards_with_reason`].
715///
716/// For more info on failure codes, see <https://github.com/lightning/bolts/blob/master/04-onion-routing.md#failure-messages>.
717#[derive(Clone, Copy)]
718pub enum FailureCode {
719	/// We had a temporary error processing the payment. Useful if no other error codes fit
720	/// and you want to indicate that the payer may want to retry.
721	TemporaryNodeFailure,
722	/// We have a required feature which was not in this onion. For example, you may require
723	/// some additional metadata that was not provided with this payment.
724	RequiredNodeFeatureMissing,
725	/// You may wish to use this when a `payment_preimage` is unknown, or the CLTV expiry of
726	/// the HTLC is too close to the current block height for safe handling.
727	/// Using this failure code in [`ChannelManager::fail_htlc_backwards_with_reason`] is
728	/// equivalent to calling [`ChannelManager::fail_htlc_backwards`].
729	IncorrectOrUnknownPaymentDetails,
730	/// We failed to process the payload after the onion was decrypted. You may wish to
731	/// use this when receiving custom HTLC TLVs with even type numbers that you don't recognize.
732	///
733	/// If available, the tuple data may include the type number and byte offset in the
734	/// decrypted byte stream where the failure occurred.
735	InvalidOnionPayload(Option<(u64, u16)>),
736}
737
738impl Into<u16> for FailureCode {
739	fn into(self) -> u16 {
740		match self {
741			FailureCode::TemporaryNodeFailure => 0x2000 | 2,
742			FailureCode::RequiredNodeFeatureMissing => 0x4000 | 0x2000 | 3,
743			FailureCode::IncorrectOrUnknownPaymentDetails => 0x4000 | 15,
744			FailureCode::InvalidOnionPayload(_) => 0x4000 | 22,
745		}
746	}
747}
748
749/// Error type returned across the peer_state mutex boundary. When an Err is generated for a
750/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
751/// immediately (ie with no further calls on it made). Thus, this step happens inside a
752/// peer_state lock. We then return the set of things that need to be done outside the lock in
753/// this struct and call handle_error!() on it.
754struct MsgHandleErrInternal {
755	err: msgs::LightningError,
756	closes_channel: bool,
757	shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
758}
759impl MsgHandleErrInternal {
760	#[inline]
761	fn send_err_msg_no_close(err: String, channel_id: ChannelId) -> Self {
762		Self {
763			err: LightningError {
764				err: err.clone(),
765				action: msgs::ErrorAction::SendErrorMessage {
766					msg: msgs::ErrorMessage {
767						channel_id,
768						data: err
769					},
770				},
771			},
772			closes_channel: false,
773			shutdown_finish: None,
774		}
775	}
776	#[inline]
777	fn from_no_close(err: msgs::LightningError) -> Self {
778		Self { err, closes_channel: false, shutdown_finish: None }
779	}
780	#[inline]
781	fn from_finish_shutdown(err: String, channel_id: ChannelId, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
782		let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() };
783		let action = if shutdown_res.monitor_update.is_some() {
784			// We have a closing `ChannelMonitorUpdate`, which means the channel was funded and we
785			// should disconnect our peer such that we force them to broadcast their latest
786			// commitment upon reconnecting.
787			msgs::ErrorAction::DisconnectPeer { msg: Some(err_msg) }
788		} else {
789			msgs::ErrorAction::SendErrorMessage { msg: err_msg }
790		};
791		Self {
792			err: LightningError { err, action },
793			closes_channel: true,
794			shutdown_finish: Some((shutdown_res, channel_update)),
795		}
796	}
797	#[inline]
798	fn from_chan_no_close(err: ChannelError, channel_id: ChannelId) -> Self {
799		Self {
800			err: match err {
801				ChannelError::Warn(msg) =>  LightningError {
802					err: msg.clone(),
803					action: msgs::ErrorAction::SendWarningMessage {
804						msg: msgs::WarningMessage {
805							channel_id,
806							data: msg
807						},
808						log_level: Level::Warn,
809					},
810				},
811				ChannelError::Ignore(msg) => LightningError {
812					err: msg,
813					action: msgs::ErrorAction::IgnoreError,
814				},
815				ChannelError::Close((msg, _reason)) => LightningError {
816					err: msg.clone(),
817					action: msgs::ErrorAction::SendErrorMessage {
818						msg: msgs::ErrorMessage {
819							channel_id,
820							data: msg
821						},
822					},
823				},
824			},
825			closes_channel: false,
826			shutdown_finish: None,
827		}
828	}
829
830	fn closes_channel(&self) -> bool {
831		self.closes_channel
832	}
833}
834
835/// We hold back HTLCs we intend to relay for a random interval greater than this (see
836/// Event::PendingHTLCsForwardable for the API guidelines indicating how long should be waited).
837/// This provides some limited amount of privacy. Ideally this would range from somewhere like one
838/// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly.
839pub(super) const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100;
840
841/// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should
842/// be sent in the order they appear in the return value, however sometimes the order needs to be
843/// variable at runtime (eg Channel::channel_reestablish needs to re-send messages in the order
844/// they were originally sent). In those cases, this enum is also returned.
845#[derive(Clone, PartialEq, Debug)]
846pub(super) enum RAACommitmentOrder {
847	/// Send the CommitmentUpdate messages first
848	CommitmentFirst,
849	/// Send the RevokeAndACK message first
850	RevokeAndACKFirst,
851}
852
853/// Information about a payment which is currently being claimed.
854#[derive(Clone, Debug, PartialEq, Eq)]
855struct ClaimingPayment {
856	amount_msat: u64,
857	payment_purpose: events::PaymentPurpose,
858	receiver_node_id: PublicKey,
859	htlcs: Vec<events::ClaimedHTLC>,
860	sender_intended_value: Option<u64>,
861	onion_fields: Option<RecipientOnionFields>,
862	payment_id: Option<PaymentId>,
863}
864impl_writeable_tlv_based!(ClaimingPayment, {
865	(0, amount_msat, required),
866	(2, payment_purpose, required),
867	(4, receiver_node_id, required),
868	(5, htlcs, optional_vec),
869	(7, sender_intended_value, option),
870	(9, onion_fields, option),
871	(11, payment_id, option),
872});
873
874struct ClaimablePayment {
875	purpose: events::PaymentPurpose,
876	onion_fields: Option<RecipientOnionFields>,
877	htlcs: Vec<ClaimableHTLC>,
878}
879
880impl ClaimablePayment {
881	fn inbound_payment_id(&self, secret: &[u8; 32]) -> PaymentId {
882		PaymentId::for_inbound_from_htlcs(
883			secret,
884			self.htlcs.iter().map(|htlc| (htlc.prev_hop.channel_id, htlc.prev_hop.htlc_id))
885		)
886	}
887}
888
889/// Represent the channel funding transaction type.
890enum FundingType {
891	/// This variant is useful when we want LDK to validate the funding transaction and
892	/// broadcast it automatically.
893	///
894	/// This is the normal flow.
895	Checked(Transaction),
896	/// This variant is useful when we want to loosen the validation checks and allow to
897	/// manually broadcast the funding transaction, leaving the responsibility to the caller.
898	///
899	/// This is useful in cases of constructing the funding transaction as part of another
900	/// flow and the caller wants to perform the validation and broadcasting. An example of such
901	/// scenario could be when constructing the funding transaction as part of a Payjoin
902	/// transaction.
903	Unchecked(OutPoint),
904}
905
906impl FundingType {
907	fn txid(&self) -> Txid {
908		match self {
909			FundingType::Checked(tx) => tx.compute_txid(),
910			FundingType::Unchecked(outp) => outp.txid,
911		}
912	}
913
914	fn transaction_or_dummy(&self) -> Transaction {
915		match self {
916			FundingType::Checked(tx) => tx.clone(),
917			FundingType::Unchecked(_) => Transaction {
918				version: bitcoin::transaction::Version::TWO,
919				lock_time: bitcoin::absolute::LockTime::ZERO,
920				input: Vec::new(),
921				output: Vec::new(),
922			},
923		}
924	}
925
926	fn is_manual_broadcast(&self) -> bool {
927		match self {
928			FundingType::Checked(_) => false,
929			FundingType::Unchecked(_) => true,
930		}
931	}
932}
933
934/// Information about claimable or being-claimed payments
935struct ClaimablePayments {
936	/// Map from payment hash to the payment data and any HTLCs which are to us and can be
937	/// failed/claimed by the user.
938	///
939	/// Note that, no consistency guarantees are made about the channels given here actually
940	/// existing anymore by the time you go to read them!
941	///
942	/// When adding to the map, [`Self::pending_claiming_payments`] must also be checked to ensure
943	/// we don't get a duplicate payment.
944	claimable_payments: HashMap<PaymentHash, ClaimablePayment>,
945
946	/// Map from payment hash to the payment data for HTLCs which we have begun claiming, but which
947	/// are waiting on a [`ChannelMonitorUpdate`] to complete in order to be surfaced to the user
948	/// as an [`events::Event::PaymentClaimed`].
949	pending_claiming_payments: HashMap<PaymentHash, ClaimingPayment>,
950}
951
952impl ClaimablePayments {
953	/// Moves a payment from [`Self::claimable_payments`] to [`Self::pending_claiming_payments`].
954	///
955	/// If `custom_tlvs_known` is false and custom even TLVs are set by the sender, the set of
956	/// pending HTLCs will be returned in the `Err` variant of this method. They MUST then be
957	/// failed by the caller as they will not be in either [`Self::claimable_payments`] or
958	/// [`Self::pending_claiming_payments`].
959	///
960	/// If `custom_tlvs_known` is true, and a matching payment is found, it will always be moved.
961	///
962	/// If no payment is found, `Err(Vec::new())` is returned.
963	fn begin_claiming_payment<L: Deref, S: Deref>(
964		&mut self, payment_hash: PaymentHash, node_signer: &S, logger: &L,
965		inbound_payment_id_secret: &[u8; 32], custom_tlvs_known: bool,
966	) -> Result<(Vec<ClaimableHTLC>, ClaimingPayment), Vec<ClaimableHTLC>>
967		where L::Target: Logger, S::Target: NodeSigner,
968	{
969		match self.claimable_payments.remove(&payment_hash) {
970			Some(payment) => {
971				let mut receiver_node_id = node_signer.get_node_id(Recipient::Node)
972					.expect("Failed to get node_id for node recipient");
973				for htlc in payment.htlcs.iter() {
974					if htlc.prev_hop.phantom_shared_secret.is_some() {
975						let phantom_pubkey = node_signer.get_node_id(Recipient::PhantomNode)
976							.expect("Failed to get node_id for phantom node recipient");
977						receiver_node_id = phantom_pubkey;
978						break;
979					}
980				}
981
982				if let Some(RecipientOnionFields { custom_tlvs, .. }) = &payment.onion_fields {
983					if !custom_tlvs_known && custom_tlvs.iter().any(|(typ, _)| typ % 2 == 0) {
984						log_info!(logger, "Rejecting payment with payment hash {} as we cannot accept payment with unknown even TLVs: {}",
985							&payment_hash, log_iter!(custom_tlvs.iter().map(|(typ, _)| typ).filter(|typ| *typ % 2 == 0)));
986						return Err(payment.htlcs);
987					}
988				}
989
990				let payment_id = payment.inbound_payment_id(inbound_payment_id_secret);
991				let claiming_payment = self.pending_claiming_payments
992					.entry(payment_hash)
993					.and_modify(|_| {
994						debug_assert!(false, "Shouldn't get a duplicate pending claim event ever");
995						log_error!(logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug",
996							&payment_hash);
997					})
998					.or_insert_with(|| {
999						let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect();
1000						let sender_intended_value = payment.htlcs.first().map(|htlc| htlc.total_msat);
1001						ClaimingPayment {
1002							amount_msat: payment.htlcs.iter().map(|source| source.value).sum(),
1003							payment_purpose: payment.purpose,
1004							receiver_node_id,
1005							htlcs,
1006							sender_intended_value,
1007							onion_fields: payment.onion_fields,
1008							payment_id: Some(payment_id),
1009						}
1010					}).clone();
1011
1012				Ok((payment.htlcs, claiming_payment))
1013			},
1014			None => Err(Vec::new())
1015		}
1016	}
1017}
1018
1019/// Events which we process internally but cannot be processed immediately at the generation site
1020/// usually because we're running pre-full-init. They are handled immediately once we detect we are
1021/// running normally, and specifically must be processed before any other non-background
1022/// [`ChannelMonitorUpdate`]s are applied.
1023#[derive(Debug)]
1024enum BackgroundEvent {
1025	/// Handle a ChannelMonitorUpdate which closes the channel or for an already-closed channel.
1026	/// This is only separated from [`Self::MonitorUpdateRegeneratedOnStartup`] as for truly
1027	/// ancient [`ChannelMonitor`]s that haven't seen an update since LDK 0.0.118 we may not have
1028	/// the counterparty node ID available.
1029	///
1030	/// Note that any such events are lost on shutdown, so in general they must be updates which
1031	/// are regenerated on startup.
1032	ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelId, ChannelMonitorUpdate)),
1033	/// Handle a ChannelMonitorUpdate which may or may not close the channel and may unblock the
1034	/// channel to continue normal operation.
1035	///
1036	/// In general this should be used rather than
1037	/// [`Self::ClosedMonitorUpdateRegeneratedOnStartup`], however in cases where the
1038	/// `counterparty_node_id` is not available as the channel has closed from a [`ChannelMonitor`]
1039	/// error the other variant is acceptable.
1040	///
1041	/// Any such events that exist in [`ChannelManager::pending_background_events`] will *also* be
1042	/// tracked in [`PeerState::in_flight_monitor_updates`].
1043	///
1044	/// Note that any such events are lost on shutdown, so in general they must be updates which
1045	/// are regenerated on startup.
1046	MonitorUpdateRegeneratedOnStartup {
1047		counterparty_node_id: PublicKey,
1048		funding_txo: OutPoint,
1049		channel_id: ChannelId,
1050		update: ChannelMonitorUpdate
1051	},
1052	/// Some [`ChannelMonitorUpdate`] (s) completed before we were serialized but we still have
1053	/// them marked pending, thus we need to run any [`MonitorUpdateCompletionAction`] (s) pending
1054	/// on a channel.
1055	MonitorUpdatesComplete {
1056		counterparty_node_id: PublicKey,
1057		channel_id: ChannelId,
1058	},
1059}
1060
1061/// A pointer to a channel that is unblocked when an event is surfaced
1062#[derive(Debug)]
1063pub(crate) struct EventUnblockedChannel {
1064	counterparty_node_id: PublicKey,
1065	funding_txo: OutPoint,
1066	channel_id: ChannelId,
1067	blocking_action: RAAMonitorUpdateBlockingAction,
1068}
1069
1070impl Writeable for EventUnblockedChannel {
1071	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
1072		self.counterparty_node_id.write(writer)?;
1073		self.funding_txo.write(writer)?;
1074		self.channel_id.write(writer)?;
1075		self.blocking_action.write(writer)
1076	}
1077}
1078
1079impl MaybeReadable for EventUnblockedChannel {
1080	fn read<R: Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
1081		let counterparty_node_id = Readable::read(reader)?;
1082		let funding_txo = Readable::read(reader)?;
1083		let channel_id = Readable::read(reader)?;
1084		let blocking_action = match RAAMonitorUpdateBlockingAction::read(reader)? {
1085			Some(blocking_action) => blocking_action,
1086			None => return Ok(None),
1087		};
1088		Ok(Some(EventUnblockedChannel {
1089			counterparty_node_id,
1090			funding_txo,
1091			channel_id,
1092			blocking_action,
1093		}))
1094	}
1095}
1096
1097#[derive(Debug)]
1098pub(crate) enum MonitorUpdateCompletionAction {
1099	/// Indicates that a payment ultimately destined for us was claimed and we should emit an
1100	/// [`events::Event::PaymentClaimed`] to the user if we haven't yet generated such an event for
1101	/// this payment. Note that this is only best-effort. On restart it's possible such a duplicate
1102	/// event can be generated.
1103	PaymentClaimed {
1104		payment_hash: PaymentHash,
1105		/// A pending MPP claim which hasn't yet completed.
1106		///
1107		/// Not written to disk.
1108		pending_mpp_claim: Option<(PublicKey, ChannelId, u64, PendingMPPClaimPointer)>,
1109	},
1110	/// Indicates an [`events::Event`] should be surfaced to the user and possibly resume the
1111	/// operation of another channel.
1112	///
1113	/// This is usually generated when we've forwarded an HTLC and want to block the outbound edge
1114	/// from completing a monitor update which removes the payment preimage until the inbound edge
1115	/// completes a monitor update containing the payment preimage. In that case, after the inbound
1116	/// edge completes, we will surface an [`Event::PaymentForwarded`] as well as unblock the
1117	/// outbound edge.
1118	EmitEventAndFreeOtherChannel {
1119		event: events::Event,
1120		downstream_counterparty_and_funding_outpoint: Option<EventUnblockedChannel>,
1121	},
1122	/// Indicates we should immediately resume the operation of another channel, unless there is
1123	/// some other reason why the channel is blocked. In practice this simply means immediately
1124	/// removing the [`RAAMonitorUpdateBlockingAction`] provided from the blocking set.
1125	///
1126	/// This is usually generated when we've forwarded an HTLC and want to block the outbound edge
1127	/// from completing a monitor update which removes the payment preimage until the inbound edge
1128	/// completes a monitor update containing the payment preimage. However, we use this variant
1129	/// instead of [`Self::EmitEventAndFreeOtherChannel`] when we discover that the claim was in
1130	/// fact duplicative and we simply want to resume the outbound edge channel immediately.
1131	///
1132	/// This variant should thus never be written to disk, as it is processed inline rather than
1133	/// stored for later processing.
1134	FreeOtherChannelImmediately {
1135		downstream_counterparty_node_id: PublicKey,
1136		downstream_funding_outpoint: OutPoint,
1137		blocking_action: RAAMonitorUpdateBlockingAction,
1138		downstream_channel_id: ChannelId,
1139	},
1140}
1141
1142impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction,
1143	(0, PaymentClaimed) => {
1144		(0, payment_hash, required),
1145		(9999999999, pending_mpp_claim, (static_value, None)),
1146	},
1147	// Note that FreeOtherChannelImmediately should never be written - we were supposed to free
1148	// *immediately*. However, for simplicity we implement read/write here.
1149	(1, FreeOtherChannelImmediately) => {
1150		(0, downstream_counterparty_node_id, required),
1151		(2, downstream_funding_outpoint, required),
1152		(4, blocking_action, upgradable_required),
1153		// Note that by the time we get past the required read above, downstream_funding_outpoint will be
1154		// filled in, so we can safely unwrap it here.
1155		(5, downstream_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(downstream_funding_outpoint.0.unwrap()))),
1156	},
1157	(2, EmitEventAndFreeOtherChannel) => {
1158		(0, event, upgradable_required),
1159		// LDK prior to 0.0.116 did not have this field as the monitor update application order was
1160		// required by clients. If we downgrade to something prior to 0.0.116 this may result in
1161		// monitor updates which aren't properly blocked or resumed, however that's fine - we don't
1162		// support async monitor updates even in LDK 0.0.116 and once we do we'll require no
1163		// downgrades to prior versions.
1164		(1, downstream_counterparty_and_funding_outpoint, upgradable_option),
1165	},
1166);
1167
1168#[derive(Clone, Debug, PartialEq, Eq)]
1169pub(crate) enum EventCompletionAction {
1170	ReleaseRAAChannelMonitorUpdate {
1171		counterparty_node_id: PublicKey,
1172		channel_funding_outpoint: OutPoint,
1173		channel_id: ChannelId,
1174	},
1175}
1176impl_writeable_tlv_based_enum!(EventCompletionAction,
1177	(0, ReleaseRAAChannelMonitorUpdate) => {
1178		(0, channel_funding_outpoint, required),
1179		(2, counterparty_node_id, required),
1180		// Note that by the time we get past the required read above, channel_funding_outpoint will be
1181		// filled in, so we can safely unwrap it here.
1182		(3, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(channel_funding_outpoint.0.unwrap()))),
1183	}
1184);
1185
1186/// The source argument which is passed to [`ChannelManager::claim_mpp_part`].
1187///
1188/// This is identical to [`MPPClaimHTLCSource`] except that [`Self::counterparty_node_id`] is an
1189/// `Option`, whereas it is required in [`MPPClaimHTLCSource`]. In the future, we should ideally
1190/// drop this and merge the two, however doing so may break upgrades for nodes which have pending
1191/// forwarded payments.
1192struct HTLCClaimSource {
1193	counterparty_node_id: Option<PublicKey>,
1194	funding_txo: OutPoint,
1195	channel_id: ChannelId,
1196	htlc_id: u64,
1197}
1198
1199impl From<&MPPClaimHTLCSource> for HTLCClaimSource {
1200	fn from(o: &MPPClaimHTLCSource) -> HTLCClaimSource {
1201		HTLCClaimSource {
1202			counterparty_node_id: Some(o.counterparty_node_id),
1203			funding_txo: o.funding_txo,
1204			channel_id: o.channel_id,
1205			htlc_id: o.htlc_id,
1206		}
1207	}
1208}
1209
1210#[derive(Clone, Debug, Hash, PartialEq, Eq)]
1211/// The source of an HTLC which is being claimed as a part of an incoming payment. Each part is
1212/// tracked in [`PendingMPPClaim`] as well as in [`ChannelMonitor`]s, so that it can be converted
1213/// to an [`HTLCClaimSource`] for claim replays on startup.
1214struct MPPClaimHTLCSource {
1215	counterparty_node_id: PublicKey,
1216	funding_txo: OutPoint,
1217	channel_id: ChannelId,
1218	htlc_id: u64,
1219}
1220
1221impl_writeable_tlv_based!(MPPClaimHTLCSource, {
1222	(0, counterparty_node_id, required),
1223	(2, funding_txo, required),
1224	(4, channel_id, required),
1225	(6, htlc_id, required),
1226});
1227
1228#[derive(Debug)]
1229pub(crate) struct PendingMPPClaim {
1230	channels_without_preimage: Vec<MPPClaimHTLCSource>,
1231	channels_with_preimage: Vec<MPPClaimHTLCSource>,
1232}
1233
1234#[derive(Clone, Debug, PartialEq, Eq)]
1235/// When we're claiming a(n MPP) payment, we want to store information about that payment in the
1236/// [`ChannelMonitor`] so that we can replay the claim without any information from the
1237/// [`ChannelManager`] at all. This struct stores that information with enough to replay claims
1238/// against all MPP parts as well as generate an [`Event::PaymentClaimed`].
1239pub(crate) struct PaymentClaimDetails {
1240	mpp_parts: Vec<MPPClaimHTLCSource>,
1241	/// Use [`ClaimingPayment`] as a stable source of all the fields we need to generate the
1242	/// [`Event::PaymentClaimed`].
1243	claiming_payment: ClaimingPayment,
1244}
1245
1246impl_writeable_tlv_based!(PaymentClaimDetails, {
1247	(0, mpp_parts, required_vec),
1248	(2, claiming_payment, required),
1249});
1250
1251#[derive(Clone)]
1252pub(crate) struct PendingMPPClaimPointer(Arc<Mutex<PendingMPPClaim>>);
1253
1254impl PartialEq for PendingMPPClaimPointer {
1255	fn eq(&self, o: &Self) -> bool { Arc::ptr_eq(&self.0, &o.0) }
1256}
1257impl Eq for PendingMPPClaimPointer {}
1258
1259impl core::fmt::Debug for PendingMPPClaimPointer {
1260	fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
1261		self.0.lock().unwrap().fmt(f)
1262	}
1263}
1264
1265#[derive(Clone, PartialEq, Eq, Debug)]
1266/// If something is blocked on the completion of an RAA-generated [`ChannelMonitorUpdate`] we track
1267/// the blocked action here. See enum variants for more info.
1268pub(crate) enum RAAMonitorUpdateBlockingAction {
1269	/// A forwarded payment was claimed. We block the downstream channel completing its monitor
1270	/// update which removes the HTLC preimage until the upstream channel has gotten the preimage
1271	/// durably to disk.
1272	ForwardedPaymentInboundClaim {
1273		/// The upstream channel ID (i.e. the inbound edge).
1274		channel_id: ChannelId,
1275		/// The HTLC ID on the inbound edge.
1276		htlc_id: u64,
1277	},
1278	/// We claimed an MPP payment across multiple channels. We have to block removing the payment
1279	/// preimage from any monitor until the last monitor is updated to contain the payment
1280	/// preimage. Otherwise we may not be able to replay the preimage on the monitor(s) that
1281	/// weren't updated on startup.
1282	///
1283	/// This variant is *not* written to disk, instead being inferred from [`ChannelMonitor`]
1284	/// state.
1285	ClaimedMPPPayment {
1286		pending_claim: PendingMPPClaimPointer,
1287	}
1288}
1289
1290impl RAAMonitorUpdateBlockingAction {
1291	fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
1292		Self::ForwardedPaymentInboundClaim {
1293			channel_id: prev_hop.channel_id,
1294			htlc_id: prev_hop.htlc_id,
1295		}
1296	}
1297}
1298
1299impl_writeable_tlv_based_enum_upgradable!(RAAMonitorUpdateBlockingAction,
1300	(0, ForwardedPaymentInboundClaim) => { (0, channel_id, required), (2, htlc_id, required) },
1301	unread_variants: ClaimedMPPPayment
1302);
1303
1304impl Readable for Option<RAAMonitorUpdateBlockingAction> {
1305	fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
1306		Ok(RAAMonitorUpdateBlockingAction::read(reader)?)
1307	}
1308}
1309
1310/// State we hold per-peer.
1311pub(super) struct PeerState<SP: Deref> where SP::Target: SignerProvider {
1312	/// `channel_id` -> `ChannelPhase`
1313	///
1314	/// Holds all channels within corresponding `ChannelPhase`s where the peer is the counterparty.
1315	pub(super) channel_by_id: HashMap<ChannelId, ChannelPhase<SP>>,
1316	/// `temporary_channel_id` -> `InboundChannelRequest`.
1317	///
1318	/// When manual channel acceptance is enabled, this holds all unaccepted inbound channels where
1319	/// the peer is the counterparty. If the channel is accepted, then the entry in this table is
1320	/// removed, and an InboundV1Channel is created and placed in the `inbound_v1_channel_by_id` table. If
1321	/// the channel is rejected, then the entry is simply removed.
1322	pub(super) inbound_channel_request_by_id: HashMap<ChannelId, InboundChannelRequest>,
1323	/// The latest `InitFeatures` we heard from the peer.
1324	latest_features: InitFeatures,
1325	/// Messages to send to the peer - pushed to in the same lock that they are generated in (except
1326	/// for broadcast messages, where ordering isn't as strict).
1327	pub(super) pending_msg_events: Vec<MessageSendEvent>,
1328	/// Map from Channel IDs to pending [`ChannelMonitorUpdate`]s which have been passed to the
1329	/// user but which have not yet completed.
1330	///
1331	/// Note that the channel may no longer exist. For example if the channel was closed but we
1332	/// later needed to claim an HTLC which is pending on-chain, we may generate a monitor update
1333	/// for a missing channel.
1334	///
1335	/// Note that any pending [`BackgroundEvent::MonitorUpdateRegeneratedOnStartup`]s which are
1336	/// sitting in [`ChannelManager::pending_background_events`] will *also* be tracked here. This
1337	/// avoids a race condition during [`ChannelManager::pending_background_events`] processing
1338	/// where we complete one [`ChannelMonitorUpdate`] (but there are more pending as background
1339	/// events) but we conclude all pending [`ChannelMonitorUpdate`]s have completed and its safe
1340	/// to run post-completion actions.
1341	in_flight_monitor_updates: BTreeMap<OutPoint, Vec<ChannelMonitorUpdate>>,
1342	/// Map from a specific channel to some action(s) that should be taken when all pending
1343	/// [`ChannelMonitorUpdate`]s for the channel complete updating.
1344	///
1345	/// Note that because we generally only have one entry here a HashMap is pretty overkill. A
1346	/// BTreeMap currently stores more than ten elements per leaf node, so even up to a few
1347	/// channels with a peer this will just be one allocation and will amount to a linear list of
1348	/// channels to walk, avoiding the whole hashing rigmarole.
1349	///
1350	/// Note that the channel may no longer exist. For example, if a channel was closed but we
1351	/// later needed to claim an HTLC which is pending on-chain, we may generate a monitor update
1352	/// for a missing channel. While a malicious peer could construct a second channel with the
1353	/// same `temporary_channel_id` (or final `channel_id` in the case of 0conf channels or prior
1354	/// to funding appearing on-chain), the downstream `ChannelMonitor` set is required to ensure
1355	/// duplicates do not occur, so such channels should fail without a monitor update completing.
1356	monitor_update_blocked_actions: BTreeMap<ChannelId, Vec<MonitorUpdateCompletionAction>>,
1357	/// If another channel's [`ChannelMonitorUpdate`] needs to complete before a channel we have
1358	/// with this peer can complete an RAA [`ChannelMonitorUpdate`] (e.g. because the RAA update
1359	/// will remove a preimage that needs to be durably in an upstream channel first), we put an
1360	/// entry here to note that the channel with the key's ID is blocked on a set of actions.
1361	actions_blocking_raa_monitor_updates: BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
1362	/// The latest [`ChannelMonitor::get_latest_update_id`] value for all closed channels as they
1363	/// exist on-disk/in our [`chain::Watch`].
1364	///
1365	/// If there are any updates pending in [`Self::in_flight_monitor_updates`] this will contain
1366	/// the highest `update_id` of all the pending in-flight updates (note that any pending updates
1367	/// not yet applied sitting in [`ChannelManager::pending_background_events`] will also be
1368	/// considered as they are also in [`Self::in_flight_monitor_updates`]).
1369	closed_channel_monitor_update_ids: BTreeMap<ChannelId, u64>,
1370	/// The peer is currently connected (i.e. we've seen a
1371	/// [`ChannelMessageHandler::peer_connected`] and no corresponding
1372	/// [`ChannelMessageHandler::peer_disconnected`].
1373	pub is_connected: bool,
1374}
1375
1376impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
1377	/// Indicates that a peer meets the criteria where we're ok to remove it from our storage.
1378	/// If true is passed for `require_disconnected`, the function will return false if we haven't
1379	/// disconnected from the node already, ie. `PeerState::is_connected` is set to `true`.
1380	fn ok_to_remove(&self, require_disconnected: bool) -> bool {
1381		if require_disconnected && self.is_connected {
1382			return false
1383		}
1384		for (_, updates) in self.in_flight_monitor_updates.iter() {
1385			if !updates.is_empty() {
1386				return false;
1387			}
1388		}
1389		!self.channel_by_id.iter().any(|(_, phase)|
1390			match phase {
1391				ChannelPhase::Funded(_) | ChannelPhase::UnfundedOutboundV1(_) => true,
1392				ChannelPhase::UnfundedInboundV1(_) => false,
1393				ChannelPhase::UnfundedOutboundV2(_) => true,
1394				ChannelPhase::UnfundedInboundV2(_) => false,
1395			}
1396		)
1397			&& self.monitor_update_blocked_actions.is_empty()
1398			&& self.closed_channel_monitor_update_ids.is_empty()
1399	}
1400
1401	// Returns a count of all channels we have with this peer, including unfunded channels.
1402	fn total_channel_count(&self) -> usize {
1403		self.channel_by_id.len() + self.inbound_channel_request_by_id.len()
1404	}
1405
1406	// Returns a bool indicating if the given `channel_id` matches a channel we have with this peer.
1407	fn has_channel(&self, channel_id: &ChannelId) -> bool {
1408		self.channel_by_id.contains_key(channel_id) ||
1409			self.inbound_channel_request_by_id.contains_key(channel_id)
1410	}
1411}
1412
1413#[derive(Clone)]
1414pub(super) enum OpenChannelMessage {
1415	V1(msgs::OpenChannel),
1416	#[cfg(dual_funding)]
1417	V2(msgs::OpenChannelV2),
1418}
1419
1420pub(super) enum OpenChannelMessageRef<'a> {
1421	V1(&'a msgs::OpenChannel),
1422	#[cfg(dual_funding)]
1423	V2(&'a msgs::OpenChannelV2),
1424}
1425
1426/// A not-yet-accepted inbound (from counterparty) channel. Once
1427/// accepted, the parameters will be used to construct a channel.
1428pub(super) struct InboundChannelRequest {
1429	/// The original OpenChannel message.
1430	pub open_channel_msg: OpenChannelMessage,
1431	/// The number of ticks remaining before the request expires.
1432	pub ticks_remaining: i32,
1433}
1434
1435/// The number of ticks that may elapse while we're waiting for an unaccepted inbound channel to be
1436/// accepted. An unaccepted channel that exceeds this limit will be abandoned.
1437const UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS: i32 = 2;
1438
1439/// The number of blocks of historical feerate estimates we keep around and consider when deciding
1440/// to force-close a channel for having too-low fees. Also the number of blocks we have to see
1441/// after startup before we consider force-closing channels for having too-low fees.
1442pub(super) const FEERATE_TRACKING_BLOCKS: usize = 144;
1443
1444/// Stores a PaymentSecret and any other data we may need to validate an inbound payment is
1445/// actually ours and not some duplicate HTLC sent to us by a node along the route.
1446///
1447/// For users who don't want to bother doing their own payment preimage storage, we also store that
1448/// here.
1449///
1450/// Note that this struct will be removed entirely soon, in favor of storing no inbound payment data
1451/// and instead encoding it in the payment secret.
1452#[derive(Debug)]
1453struct PendingInboundPayment {
1454	/// The payment secret that the sender must use for us to accept this payment
1455	payment_secret: PaymentSecret,
1456	/// Time at which this HTLC expires - blocks with a header time above this value will result in
1457	/// this payment being removed.
1458	expiry_time: u64,
1459	/// Arbitrary identifier the user specifies (or not)
1460	user_payment_id: u64,
1461	// Other required attributes of the payment, optionally enforced:
1462	payment_preimage: Option<PaymentPreimage>,
1463	min_value_msat: Option<u64>,
1464}
1465
1466/// [`SimpleArcChannelManager`] is useful when you need a [`ChannelManager`] with a static lifetime, e.g.
1467/// when you're using `lightning-net-tokio` (since `tokio::spawn` requires parameters with static
1468/// lifetimes). Other times you can afford a reference, which is more efficient, in which case
1469/// [`SimpleRefChannelManager`] is the more appropriate type. Defining these type aliases prevents
1470/// issues such as overly long function definitions. Note that the `ChannelManager` can take any type
1471/// that implements [`NodeSigner`], [`EntropySource`], and [`SignerProvider`] for its keys manager,
1472/// or, respectively, [`Router`] for its router, but this type alias chooses the concrete types
1473/// of [`KeysManager`] and [`DefaultRouter`].
1474///
1475/// This is not exported to bindings users as type aliases aren't supported in most languages.
1476#[cfg(not(c_bindings))]
1477pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<
1478	Arc<M>,
1479	Arc<T>,
1480	Arc<KeysManager>,
1481	Arc<KeysManager>,
1482	Arc<KeysManager>,
1483	Arc<F>,
1484	Arc<DefaultRouter<
1485		Arc<NetworkGraph<Arc<L>>>,
1486		Arc<L>,
1487		Arc<KeysManager>,
1488		Arc<RwLock<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
1489		ProbabilisticScoringFeeParameters,
1490		ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>,
1491	>>,
1492	Arc<DefaultMessageRouter<
1493		Arc<NetworkGraph<Arc<L>>>,
1494		Arc<L>,
1495		Arc<KeysManager>,
1496	>>,
1497	Arc<L>
1498>;
1499
1500/// [`SimpleRefChannelManager`] is a type alias for a ChannelManager reference, and is the reference
1501/// counterpart to the [`SimpleArcChannelManager`] type alias. Use this type by default when you don't
1502/// need a ChannelManager with a static lifetime. You'll need a static lifetime in cases such as
1503/// usage of lightning-net-tokio (since `tokio::spawn` requires parameters with static lifetimes).
1504/// But if this is not necessary, using a reference is more efficient. Defining these type aliases
1505/// issues such as overly long function definitions. Note that the ChannelManager can take any type
1506/// that implements [`NodeSigner`], [`EntropySource`], and [`SignerProvider`] for its keys manager,
1507/// or, respectively, [`Router`]  for its router, but this type alias chooses the concrete types
1508/// of [`KeysManager`] and [`DefaultRouter`].
1509///
1510/// This is not exported to bindings users as type aliases aren't supported in most languages.
1511#[cfg(not(c_bindings))]
1512pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, 'i, M, T, F, L> =
1513	ChannelManager<
1514		&'a M,
1515		&'b T,
1516		&'c KeysManager,
1517		&'c KeysManager,
1518		&'c KeysManager,
1519		&'d F,
1520		&'e DefaultRouter<
1521			&'f NetworkGraph<&'g L>,
1522			&'g L,
1523			&'c KeysManager,
1524			&'h RwLock<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>,
1525			ProbabilisticScoringFeeParameters,
1526			ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>
1527		>,
1528		&'i DefaultMessageRouter<
1529			&'f NetworkGraph<&'g L>,
1530			&'g L,
1531			&'c KeysManager,
1532		>,
1533		&'g L
1534	>;
1535
1536/// A trivial trait which describes any [`ChannelManager`].
1537///
1538/// This is not exported to bindings users as general cover traits aren't useful in other
1539/// languages.
1540pub trait AChannelManager {
1541	/// A type implementing [`chain::Watch`].
1542	type Watch: chain::Watch<Self::Signer> + ?Sized;
1543	/// A type that may be dereferenced to [`Self::Watch`].
1544	type M: Deref<Target = Self::Watch>;
1545	/// A type implementing [`BroadcasterInterface`].
1546	type Broadcaster: BroadcasterInterface + ?Sized;
1547	/// A type that may be dereferenced to [`Self::Broadcaster`].
1548	type T: Deref<Target = Self::Broadcaster>;
1549	/// A type implementing [`EntropySource`].
1550	type EntropySource: EntropySource + ?Sized;
1551	/// A type that may be dereferenced to [`Self::EntropySource`].
1552	type ES: Deref<Target = Self::EntropySource>;
1553	/// A type implementing [`NodeSigner`].
1554	type NodeSigner: NodeSigner + ?Sized;
1555	/// A type that may be dereferenced to [`Self::NodeSigner`].
1556	type NS: Deref<Target = Self::NodeSigner>;
1557	/// A type implementing [`EcdsaChannelSigner`].
1558	type Signer: EcdsaChannelSigner + Sized;
1559	/// A type implementing [`SignerProvider`] for [`Self::Signer`].
1560	type SignerProvider: SignerProvider<EcdsaSigner= Self::Signer> + ?Sized;
1561	/// A type that may be dereferenced to [`Self::SignerProvider`].
1562	type SP: Deref<Target = Self::SignerProvider>;
1563	/// A type implementing [`FeeEstimator`].
1564	type FeeEstimator: FeeEstimator + ?Sized;
1565	/// A type that may be dereferenced to [`Self::FeeEstimator`].
1566	type F: Deref<Target = Self::FeeEstimator>;
1567	/// A type implementing [`Router`].
1568	type Router: Router + ?Sized;
1569	/// A type that may be dereferenced to [`Self::Router`].
1570	type R: Deref<Target = Self::Router>;
1571	/// A type implementing [`MessageRouter`].
1572	type MessageRouter: MessageRouter + ?Sized;
1573	/// A type that may be dereferenced to [`Self::MessageRouter`].
1574	type MR: Deref<Target = Self::MessageRouter>;
1575	/// A type implementing [`Logger`].
1576	type Logger: Logger + ?Sized;
1577	/// A type that may be dereferenced to [`Self::Logger`].
1578	type L: Deref<Target = Self::Logger>;
1579	/// Returns a reference to the actual [`ChannelManager`] object.
1580	fn get_cm(&self) -> &ChannelManager<Self::M, Self::T, Self::ES, Self::NS, Self::SP, Self::F, Self::R, Self::MR, Self::L>;
1581}
1582
1583impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> AChannelManager
1584for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
1585where
1586	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
1587	T::Target: BroadcasterInterface,
1588	ES::Target: EntropySource,
1589	NS::Target: NodeSigner,
1590	SP::Target: SignerProvider,
1591	F::Target: FeeEstimator,
1592	R::Target: Router,
1593	MR::Target: MessageRouter,
1594	L::Target: Logger,
1595{
1596	type Watch = M::Target;
1597	type M = M;
1598	type Broadcaster = T::Target;
1599	type T = T;
1600	type EntropySource = ES::Target;
1601	type ES = ES;
1602	type NodeSigner = NS::Target;
1603	type NS = NS;
1604	type Signer = <SP::Target as SignerProvider>::EcdsaSigner;
1605	type SignerProvider = SP::Target;
1606	type SP = SP;
1607	type FeeEstimator = F::Target;
1608	type F = F;
1609	type Router = R::Target;
1610	type R = R;
1611	type MessageRouter = MR::Target;
1612	type MR = MR;
1613	type Logger = L::Target;
1614	type L = L;
1615	fn get_cm(&self) -> &ChannelManager<M, T, ES, NS, SP, F, R, MR, L> { self }
1616}
1617
1618/// A lightning node's channel state machine and payment management logic, which facilitates
1619/// sending, forwarding, and receiving payments through lightning channels.
1620///
1621/// [`ChannelManager`] is parameterized by a number of components to achieve this.
1622/// - [`chain::Watch`] (typically [`ChainMonitor`]) for on-chain monitoring and enforcement of each
1623///   channel
1624/// - [`BroadcasterInterface`] for broadcasting transactions related to opening, funding, and
1625///   closing channels
1626/// - [`EntropySource`] for providing random data needed for cryptographic operations
1627/// - [`NodeSigner`] for cryptographic operations scoped to the node
1628/// - [`SignerProvider`] for providing signers whose operations are scoped to individual channels
1629/// - [`FeeEstimator`] to determine transaction fee rates needed to have a transaction mined in a
1630///   timely manner
1631/// - [`Router`] for finding payment paths when initiating and retrying payments
1632/// - [`MessageRouter`] for finding message paths when initiating and retrying onion messages
1633/// - [`Logger`] for logging operational information of varying degrees
1634///
1635/// Additionally, it implements the following traits:
1636/// - [`ChannelMessageHandler`] to handle off-chain channel activity from peers
1637/// - [`MessageSendEventsProvider`] to similarly send such messages to peers
1638/// - [`OffersMessageHandler`] for BOLT 12 message handling and sending
1639/// - [`EventsProvider`] to generate user-actionable [`Event`]s
1640/// - [`chain::Listen`] and [`chain::Confirm`] for notification of on-chain activity
1641///
1642/// Thus, [`ChannelManager`] is typically used to parameterize a [`MessageHandler`] and an
1643/// [`OnionMessenger`]. The latter is required to support BOLT 12 functionality.
1644///
1645/// # `ChannelManager` vs `ChannelMonitor`
1646///
1647/// It's important to distinguish between the *off-chain* management and *on-chain* enforcement of
1648/// lightning channels. [`ChannelManager`] exchanges messages with peers to manage the off-chain
1649/// state of each channel. During this process, it generates a [`ChannelMonitor`] for each channel
1650/// and a [`ChannelMonitorUpdate`] for each relevant change, notifying its parameterized
1651/// [`chain::Watch`] of them.
1652///
1653/// An implementation of [`chain::Watch`], such as [`ChainMonitor`], is responsible for aggregating
1654/// these [`ChannelMonitor`]s and applying any [`ChannelMonitorUpdate`]s to them. It then monitors
1655/// for any pertinent on-chain activity, enforcing claims as needed.
1656///
1657/// This division of off-chain management and on-chain enforcement allows for interesting node
1658/// setups. For instance, on-chain enforcement could be moved to a separate host or have added
1659/// redundancy, possibly as a watchtower. See [`chain::Watch`] for the relevant interface.
1660///
1661/// # Initialization
1662///
1663/// Use [`ChannelManager::new`] with the most recent [`BlockHash`] when creating a fresh instance.
1664/// Otherwise, if restarting, construct [`ChannelManagerReadArgs`] with the necessary parameters and
1665/// references to any deserialized [`ChannelMonitor`]s that were previously persisted. Use this to
1666/// deserialize the [`ChannelManager`] and feed it any new chain data since it was last online, as
1667/// detailed in the [`ChannelManagerReadArgs`] documentation.
1668///
1669/// ```
1670/// use bitcoin::BlockHash;
1671/// use bitcoin::network::Network;
1672/// use lightning::chain::BestBlock;
1673/// # use lightning::chain::channelmonitor::ChannelMonitor;
1674/// use lightning::ln::channelmanager::{ChainParameters, ChannelManager, ChannelManagerReadArgs};
1675/// # use lightning::routing::gossip::NetworkGraph;
1676/// use lightning::util::config::UserConfig;
1677/// use lightning::util::ser::ReadableArgs;
1678///
1679/// # fn read_channel_monitors() -> Vec<ChannelMonitor<lightning::sign::InMemorySigner>> { vec![] }
1680/// # fn example<
1681/// #     'a,
1682/// #     L: lightning::util::logger::Logger,
1683/// #     ES: lightning::sign::EntropySource,
1684/// #     S: for <'b> lightning::routing::scoring::LockableScore<'b, ScoreLookUp = SL>,
1685/// #     SL: lightning::routing::scoring::ScoreLookUp<ScoreParams = SP>,
1686/// #     SP: Sized,
1687/// #     R: lightning::io::Read,
1688/// # >(
1689/// #     fee_estimator: &dyn lightning::chain::chaininterface::FeeEstimator,
1690/// #     chain_monitor: &dyn lightning::chain::Watch<lightning::sign::InMemorySigner>,
1691/// #     tx_broadcaster: &dyn lightning::chain::chaininterface::BroadcasterInterface,
1692/// #     router: &lightning::routing::router::DefaultRouter<&NetworkGraph<&'a L>, &'a L, &ES, &S, SP, SL>,
1693/// #     message_router: &lightning::onion_message::messenger::DefaultMessageRouter<&NetworkGraph<&'a L>, &'a L, &ES>,
1694/// #     logger: &L,
1695/// #     entropy_source: &ES,
1696/// #     node_signer: &dyn lightning::sign::NodeSigner,
1697/// #     signer_provider: &lightning::sign::DynSignerProvider,
1698/// #     best_block: lightning::chain::BestBlock,
1699/// #     current_timestamp: u32,
1700/// #     mut reader: R,
1701/// # ) -> Result<(), lightning::ln::msgs::DecodeError> {
1702/// // Fresh start with no channels
1703/// let params = ChainParameters {
1704///     network: Network::Bitcoin,
1705///     best_block,
1706/// };
1707/// let default_config = UserConfig::default();
1708/// let channel_manager = ChannelManager::new(
1709///     fee_estimator, chain_monitor, tx_broadcaster, router, message_router, logger,
1710///     entropy_source, node_signer, signer_provider, default_config, params, current_timestamp,
1711/// );
1712///
1713/// // Restart from deserialized data
1714/// let mut channel_monitors = read_channel_monitors();
1715/// let args = ChannelManagerReadArgs::new(
1716///     entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster,
1717///     router, message_router, logger, default_config, channel_monitors.iter().collect(),
1718/// );
1719/// let (block_hash, channel_manager) =
1720///     <(BlockHash, ChannelManager<_, _, _, _, _, _, _, _, _>)>::read(&mut reader, args)?;
1721///
1722/// // Update the ChannelManager and ChannelMonitors with the latest chain data
1723/// // ...
1724///
1725/// // Move the monitors to the ChannelManager's chain::Watch parameter
1726/// for monitor in channel_monitors {
1727///     chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
1728/// }
1729/// # Ok(())
1730/// # }
1731/// ```
1732///
1733/// # Operation
1734///
1735/// The following is required for [`ChannelManager`] to function properly:
1736/// - Handle messages from peers using its [`ChannelMessageHandler`] implementation (typically
1737///   called by [`PeerManager::read_event`] when processing network I/O)
1738/// - Send messages to peers obtained via its [`MessageSendEventsProvider`] implementation
1739///   (typically initiated when [`PeerManager::process_events`] is called)
1740/// - Feed on-chain activity using either its [`chain::Listen`] or [`chain::Confirm`] implementation
1741///   as documented by those traits
1742/// - Perform any periodic channel and payment checks by calling [`timer_tick_occurred`] roughly
1743///   every minute
1744/// - Persist to disk whenever [`get_and_clear_needs_persistence`] returns `true` using a
1745///   [`Persister`] such as a [`KVStore`] implementation
1746/// - Handle [`Event`]s obtained via its [`EventsProvider`] implementation
1747///
1748/// The [`Future`] returned by [`get_event_or_persistence_needed_future`] is useful in determining
1749/// when the last two requirements need to be checked.
1750///
1751/// The [`lightning-block-sync`] and [`lightning-transaction-sync`] crates provide utilities that
1752/// simplify feeding in on-chain activity using the [`chain::Listen`] and [`chain::Confirm`] traits,
1753/// respectively. The remaining requirements can be met using the [`lightning-background-processor`]
1754/// crate. For languages other than Rust, the availability of similar utilities may vary.
1755///
1756/// # Channels
1757///
1758/// [`ChannelManager`]'s primary function involves managing a channel state. Without channels,
1759/// payments can't be sent. Use [`list_channels`] or [`list_usable_channels`] for a snapshot of the
1760/// currently open channels.
1761///
1762/// ```
1763/// # use lightning::ln::channelmanager::AChannelManager;
1764/// #
1765/// # fn example<T: AChannelManager>(channel_manager: T) {
1766/// # let channel_manager = channel_manager.get_cm();
1767/// let channels = channel_manager.list_usable_channels();
1768/// for details in channels {
1769///     println!("{:?}", details);
1770/// }
1771/// # }
1772/// ```
1773///
1774/// Each channel is identified using a [`ChannelId`], which will change throughout the channel's
1775/// life cycle. Additionally, channels are assigned a `user_channel_id`, which is given in
1776/// [`Event`]s associated with the channel and serves as a fixed identifier but is otherwise unused
1777/// by [`ChannelManager`].
1778///
1779/// ## Opening Channels
1780///
1781/// To an open a channel with a peer, call [`create_channel`]. This will initiate the process of
1782/// opening an outbound channel, which requires self-funding when handling
1783/// [`Event::FundingGenerationReady`].
1784///
1785/// ```
1786/// # use bitcoin::{ScriptBuf, Transaction};
1787/// # use bitcoin::secp256k1::PublicKey;
1788/// # use lightning::ln::channelmanager::AChannelManager;
1789/// # use lightning::events::{Event, EventsProvider};
1790/// #
1791/// # trait Wallet {
1792/// #     fn create_funding_transaction(
1793/// #         &self, _amount_sats: u64, _output_script: ScriptBuf
1794/// #     ) -> Transaction;
1795/// # }
1796/// #
1797/// # fn example<T: AChannelManager, W: Wallet>(channel_manager: T, wallet: W, peer_id: PublicKey) {
1798/// # let channel_manager = channel_manager.get_cm();
1799/// let value_sats = 1_000_000;
1800/// let push_msats = 10_000_000;
1801/// match channel_manager.create_channel(peer_id, value_sats, push_msats, 42, None, None) {
1802///     Ok(channel_id) => println!("Opening channel {}", channel_id),
1803///     Err(e) => println!("Error opening channel: {:?}", e),
1804/// }
1805///
1806/// // On the event processing thread once the peer has responded
1807/// channel_manager.process_pending_events(&|event| {
1808///     match event {
1809///         Event::FundingGenerationReady {
1810///             temporary_channel_id, counterparty_node_id, channel_value_satoshis, output_script,
1811///             user_channel_id, ..
1812///         } => {
1813///             assert_eq!(user_channel_id, 42);
1814///             let funding_transaction = wallet.create_funding_transaction(
1815///                 channel_value_satoshis, output_script
1816///             );
1817///             match channel_manager.funding_transaction_generated(
1818///                 temporary_channel_id, counterparty_node_id, funding_transaction
1819///             ) {
1820///                 Ok(()) => println!("Funding channel {}", temporary_channel_id),
1821///                 Err(e) => println!("Error funding channel {}: {:?}", temporary_channel_id, e),
1822///             }
1823///         },
1824///         Event::ChannelPending { channel_id, user_channel_id, former_temporary_channel_id, .. } => {
1825///             assert_eq!(user_channel_id, 42);
1826///             println!(
1827///                 "Channel {} now {} pending (funding transaction has been broadcasted)", channel_id,
1828///                 former_temporary_channel_id.unwrap()
1829///             );
1830///         },
1831///         Event::ChannelReady { channel_id, user_channel_id, .. } => {
1832///             assert_eq!(user_channel_id, 42);
1833///             println!("Channel {} ready", channel_id);
1834///         },
1835///         // ...
1836///     #     _ => {},
1837///     }
1838///     Ok(())
1839/// });
1840/// # }
1841/// ```
1842///
1843/// ## Accepting Channels
1844///
1845/// Inbound channels are initiated by peers and are automatically accepted unless [`ChannelManager`]
1846/// has [`UserConfig::manually_accept_inbound_channels`] set. In that case, the channel may be
1847/// either accepted or rejected when handling [`Event::OpenChannelRequest`].
1848///
1849/// ```
1850/// # use bitcoin::secp256k1::PublicKey;
1851/// # use lightning::ln::channelmanager::AChannelManager;
1852/// # use lightning::events::{Event, EventsProvider};
1853/// #
1854/// # fn is_trusted(counterparty_node_id: PublicKey) -> bool {
1855/// #     // ...
1856/// #     unimplemented!()
1857/// # }
1858/// #
1859/// # fn example<T: AChannelManager>(channel_manager: T) {
1860/// # let channel_manager = channel_manager.get_cm();
1861/// # let error_message = "Channel force-closed";
1862/// channel_manager.process_pending_events(&|event| {
1863///     match event {
1864///         Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, ..  } => {
1865///             if !is_trusted(counterparty_node_id) {
1866///                 match channel_manager.force_close_without_broadcasting_txn(
1867///                     &temporary_channel_id, &counterparty_node_id, error_message.to_string()
1868///                 ) {
1869///                     Ok(()) => println!("Rejecting channel {}", temporary_channel_id),
1870///                     Err(e) => println!("Error rejecting channel {}: {:?}", temporary_channel_id, e),
1871///                 }
1872///                 return Ok(());
1873///             }
1874///
1875///             let user_channel_id = 43;
1876///             match channel_manager.accept_inbound_channel(
1877///                 &temporary_channel_id, &counterparty_node_id, user_channel_id
1878///             ) {
1879///                 Ok(()) => println!("Accepting channel {}", temporary_channel_id),
1880///                 Err(e) => println!("Error accepting channel {}: {:?}", temporary_channel_id, e),
1881///             }
1882///         },
1883///         // ...
1884///     #     _ => {},
1885///     }
1886///     Ok(())
1887/// });
1888/// # }
1889/// ```
1890///
1891/// ## Closing Channels
1892///
1893/// There are two ways to close a channel: either cooperatively using [`close_channel`] or
1894/// unilaterally using [`force_close_broadcasting_latest_txn`]. The former is ideal as it makes for
1895/// lower fees and immediate access to funds. However, the latter may be necessary if the
1896/// counterparty isn't behaving properly or has gone offline. [`Event::ChannelClosed`] is generated
1897/// once the channel has been closed successfully.
1898///
1899/// ```
1900/// # use bitcoin::secp256k1::PublicKey;
1901/// # use lightning::ln::types::ChannelId;
1902/// # use lightning::ln::channelmanager::AChannelManager;
1903/// # use lightning::events::{Event, EventsProvider};
1904/// #
1905/// # fn example<T: AChannelManager>(
1906/// #     channel_manager: T, channel_id: ChannelId, counterparty_node_id: PublicKey
1907/// # ) {
1908/// # let channel_manager = channel_manager.get_cm();
1909/// match channel_manager.close_channel(&channel_id, &counterparty_node_id) {
1910///     Ok(()) => println!("Closing channel {}", channel_id),
1911///     Err(e) => println!("Error closing channel {}: {:?}", channel_id, e),
1912/// }
1913///
1914/// // On the event processing thread
1915/// channel_manager.process_pending_events(&|event| {
1916///     match event {
1917///         Event::ChannelClosed { channel_id, user_channel_id, ..  } => {
1918///             assert_eq!(user_channel_id, 42);
1919///             println!("Channel {} closed", channel_id);
1920///         },
1921///         // ...
1922///     #     _ => {},
1923///     }
1924///     Ok(())
1925/// });
1926/// # }
1927/// ```
1928///
1929/// # Payments
1930///
1931/// [`ChannelManager`] is responsible for sending, forwarding, and receiving payments through its
1932/// channels. A payment is typically initiated from a [BOLT 11] invoice or a [BOLT 12] offer, though
1933/// spontaneous (i.e., keysend) payments are also possible. Incoming payments don't require
1934/// maintaining any additional state as [`ChannelManager`] can reconstruct the [`PaymentPreimage`]
1935/// from the [`PaymentSecret`]. Sending payments, however, require tracking in order to retry failed
1936/// HTLCs.
1937///
1938/// After a payment is initiated, it will appear in [`list_recent_payments`] until a short time
1939/// after either an [`Event::PaymentSent`] or [`Event::PaymentFailed`] is handled. Failed HTLCs
1940/// for a payment will be retried according to the payment's [`Retry`] strategy or until
1941/// [`abandon_payment`] is called.
1942///
1943/// ## BOLT 11 Invoices
1944///
1945/// The [`lightning-invoice`] crate is useful for creating BOLT 11 invoices. However, in order to
1946/// construct a [`Bolt11Invoice`] that is compatible with [`ChannelManager`], use
1947/// [`create_bolt11_invoice`]. This method serves as a convenience for building invoices with the
1948/// [`PaymentHash`] and [`PaymentSecret`] returned from [`create_inbound_payment`]. To provide your
1949/// own [`PaymentHash`], override the appropriate [`Bolt11InvoiceParameters`], which is equivalent
1950/// to using [`create_inbound_payment_for_hash`].
1951///
1952/// [`ChannelManager`] generates an [`Event::PaymentClaimable`] once the full payment has been
1953/// received. Call [`claim_funds`] to release the [`PaymentPreimage`], which in turn will result in
1954/// an [`Event::PaymentClaimed`].
1955///
1956/// ```
1957/// # use lightning::events::{Event, EventsProvider, PaymentPurpose};
1958/// # use lightning::ln::channelmanager::{AChannelManager, Bolt11InvoiceParameters};
1959/// #
1960/// # fn example<T: AChannelManager>(channel_manager: T) {
1961/// # let channel_manager = channel_manager.get_cm();
1962/// let params = Bolt11InvoiceParameters {
1963///     amount_msats: Some(10_000_000),
1964///     invoice_expiry_delta_secs: Some(3600),
1965///     ..Default::default()
1966/// };
1967/// let invoice = match channel_manager.create_bolt11_invoice(params) {
1968///     Ok(invoice) => {
1969///         println!("Creating invoice with payment hash {}", invoice.payment_hash());
1970///         invoice
1971///     },
1972///     Err(e) => panic!("Error creating invoice: {}", e),
1973/// };
1974///
1975/// // On the event processing thread
1976/// channel_manager.process_pending_events(&|event| {
1977///     match event {
1978///         Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose {
1979///             PaymentPurpose::Bolt11InvoicePayment { payment_preimage: Some(payment_preimage), .. } => {
1980///                 assert_eq!(payment_hash.0, invoice.payment_hash().as_ref());
1981///                 println!("Claiming payment {}", payment_hash);
1982///                 channel_manager.claim_funds(payment_preimage);
1983///             },
1984///             PaymentPurpose::Bolt11InvoicePayment { payment_preimage: None, .. } => {
1985///                 println!("Unknown payment hash: {}", payment_hash);
1986///             },
1987///             PaymentPurpose::SpontaneousPayment(payment_preimage) => {
1988///                 assert_ne!(payment_hash.0, invoice.payment_hash().as_ref());
1989///                 println!("Claiming spontaneous payment {}", payment_hash);
1990///                 channel_manager.claim_funds(payment_preimage);
1991///             },
1992///             // ...
1993/// #           _ => {},
1994///         },
1995///         Event::PaymentClaimed { payment_hash, amount_msat, .. } => {
1996///             assert_eq!(payment_hash.0, invoice.payment_hash().as_ref());
1997///             println!("Claimed {} msats", amount_msat);
1998///         },
1999///         // ...
2000/// #       _ => {},
2001///     }
2002///     Ok(())
2003/// });
2004/// # }
2005/// ```
2006///
2007/// For paying an invoice, see the [`bolt11_payment`] module with convenience functions for use with
2008/// [`send_payment`].
2009///
2010/// ```
2011/// # use lightning::events::{Event, EventsProvider};
2012/// # use lightning::types::payment::PaymentHash;
2013/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, RecipientOnionFields, Retry};
2014/// # use lightning::routing::router::RouteParameters;
2015/// #
2016/// # fn example<T: AChannelManager>(
2017/// #     channel_manager: T, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields,
2018/// #     route_params: RouteParameters, retry: Retry
2019/// # ) {
2020/// # let channel_manager = channel_manager.get_cm();
2021/// // let (payment_hash, recipient_onion, route_params) =
2022/// //     payment::payment_parameters_from_invoice(&invoice);
2023/// let payment_id = PaymentId([42; 32]);
2024/// match channel_manager.send_payment(
2025///     payment_hash, recipient_onion, payment_id, route_params, retry
2026/// ) {
2027///     Ok(()) => println!("Sending payment with hash {}", payment_hash),
2028///     Err(e) => println!("Failed sending payment with hash {}: {:?}", payment_hash, e),
2029/// }
2030///
2031/// let expected_payment_id = payment_id;
2032/// let expected_payment_hash = payment_hash;
2033/// assert!(
2034///     channel_manager.list_recent_payments().iter().find(|details| matches!(
2035///         details,
2036///         RecentPaymentDetails::Pending {
2037///             payment_id: expected_payment_id,
2038///             payment_hash: expected_payment_hash,
2039///             ..
2040///         }
2041///     )).is_some()
2042/// );
2043///
2044/// // On the event processing thread
2045/// channel_manager.process_pending_events(&|event| {
2046///     match event {
2047///         Event::PaymentSent { payment_hash, .. } => println!("Paid {}", payment_hash),
2048///         Event::PaymentFailed { payment_hash: Some(payment_hash), .. } =>
2049///             println!("Failed paying {}", payment_hash),
2050///         // ...
2051///     #     _ => {},
2052///     }
2053///     Ok(())
2054/// });
2055/// # }
2056/// ```
2057///
2058/// ## BOLT 12 Offers
2059///
2060/// The [`offers`] module is useful for creating BOLT 12 offers. An [`Offer`] is a precursor to a
2061/// [`Bolt12Invoice`], which must first be requested by the payer. The interchange of these messages
2062/// as defined in the specification is handled by [`ChannelManager`] and its implementation of
2063/// [`OffersMessageHandler`]. However, this only works with an [`Offer`] created using a builder
2064/// returned by [`create_offer_builder`]. With this approach, BOLT 12 offers and invoices are
2065/// stateless just as BOLT 11 invoices are.
2066///
2067/// ```
2068/// # use lightning::events::{Event, EventsProvider, PaymentPurpose};
2069/// # use lightning::ln::channelmanager::AChannelManager;
2070/// # use lightning::offers::parse::Bolt12SemanticError;
2071/// #
2072/// # fn example<T: AChannelManager>(channel_manager: T) -> Result<(), Bolt12SemanticError> {
2073/// # let channel_manager = channel_manager.get_cm();
2074/// # let absolute_expiry = None;
2075/// let offer = channel_manager
2076///     .create_offer_builder(absolute_expiry)?
2077/// # ;
2078/// # // Needed for compiling for c_bindings
2079/// # let builder: lightning::offers::offer::OfferBuilder<_, _> = offer.into();
2080/// # let offer = builder
2081///     .description("coffee".to_string())
2082///     .amount_msats(10_000_000)
2083///     .build()?;
2084/// let bech32_offer = offer.to_string();
2085///
2086/// // On the event processing thread
2087/// channel_manager.process_pending_events(&|event| {
2088///     match event {
2089///         Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose {
2090///             PaymentPurpose::Bolt12OfferPayment { payment_preimage: Some(payment_preimage), .. } => {
2091///                 println!("Claiming payment {}", payment_hash);
2092///                 channel_manager.claim_funds(payment_preimage);
2093///             },
2094///             PaymentPurpose::Bolt12OfferPayment { payment_preimage: None, .. } => {
2095///                 println!("Unknown payment hash: {}", payment_hash);
2096///             }
2097/// #           _ => {},
2098///         },
2099///         Event::PaymentClaimed { payment_hash, amount_msat, .. } => {
2100///             println!("Claimed {} msats", amount_msat);
2101///         },
2102///         // ...
2103///     #     _ => {},
2104///     }
2105///     Ok(())
2106/// });
2107/// # Ok(())
2108/// # }
2109/// ```
2110///
2111/// Use [`pay_for_offer`] to initiated payment, which sends an [`InvoiceRequest`] for an [`Offer`]
2112/// and pays the [`Bolt12Invoice`] response.
2113///
2114/// ```
2115/// # use lightning::events::{Event, EventsProvider};
2116/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, Retry};
2117/// # use lightning::offers::offer::Offer;
2118/// #
2119/// # fn example<T: AChannelManager>(
2120/// #     channel_manager: T, offer: &Offer, quantity: Option<u64>, amount_msats: Option<u64>,
2121/// #     payer_note: Option<String>, retry: Retry, max_total_routing_fee_msat: Option<u64>
2122/// # ) {
2123/// # let channel_manager = channel_manager.get_cm();
2124/// let payment_id = PaymentId([42; 32]);
2125/// match channel_manager.pay_for_offer(
2126///     offer, quantity, amount_msats, payer_note, payment_id, retry, max_total_routing_fee_msat
2127/// ) {
2128///     Ok(()) => println!("Requesting invoice for offer"),
2129///     Err(e) => println!("Unable to request invoice for offer: {:?}", e),
2130/// }
2131///
2132/// // First the payment will be waiting on an invoice
2133/// let expected_payment_id = payment_id;
2134/// assert!(
2135///     channel_manager.list_recent_payments().iter().find(|details| matches!(
2136///         details,
2137///         RecentPaymentDetails::AwaitingInvoice { payment_id: expected_payment_id }
2138///     )).is_some()
2139/// );
2140///
2141/// // Once the invoice is received, a payment will be sent
2142/// assert!(
2143///     channel_manager.list_recent_payments().iter().find(|details| matches!(
2144///         details,
2145///         RecentPaymentDetails::Pending { payment_id: expected_payment_id, ..  }
2146///     )).is_some()
2147/// );
2148///
2149/// // On the event processing thread
2150/// channel_manager.process_pending_events(&|event| {
2151///     match event {
2152///         Event::PaymentSent { payment_id: Some(payment_id), .. } => println!("Paid {}", payment_id),
2153///         Event::PaymentFailed { payment_id, .. } => println!("Failed paying {}", payment_id),
2154///         // ...
2155///     #     _ => {},
2156///     }
2157///     Ok(())
2158/// });
2159/// # }
2160/// ```
2161///
2162/// ## BOLT 12 Refunds
2163///
2164/// A [`Refund`] is a request for an invoice to be paid. Like *paying* for an [`Offer`], *creating*
2165/// a [`Refund`] involves maintaining state since it represents a future outbound payment.
2166/// Therefore, use [`create_refund_builder`] when creating one, otherwise [`ChannelManager`] will
2167/// refuse to pay any corresponding [`Bolt12Invoice`] that it receives.
2168///
2169/// ```
2170/// # use core::time::Duration;
2171/// # use lightning::events::{Event, EventsProvider};
2172/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, Retry};
2173/// # use lightning::offers::parse::Bolt12SemanticError;
2174/// #
2175/// # fn example<T: AChannelManager>(
2176/// #     channel_manager: T, amount_msats: u64, absolute_expiry: Duration, retry: Retry,
2177/// #     max_total_routing_fee_msat: Option<u64>
2178/// # ) -> Result<(), Bolt12SemanticError> {
2179/// # let channel_manager = channel_manager.get_cm();
2180/// let payment_id = PaymentId([42; 32]);
2181/// let refund = channel_manager
2182///     .create_refund_builder(
2183///         amount_msats, absolute_expiry, payment_id, retry, max_total_routing_fee_msat
2184///     )?
2185/// # ;
2186/// # // Needed for compiling for c_bindings
2187/// # let builder: lightning::offers::refund::RefundBuilder<_> = refund.into();
2188/// # let refund = builder
2189///     .description("coffee".to_string())
2190///     .payer_note("refund for order 1234".to_string())
2191///     .build()?;
2192/// let bech32_refund = refund.to_string();
2193///
2194/// // First the payment will be waiting on an invoice
2195/// let expected_payment_id = payment_id;
2196/// assert!(
2197///     channel_manager.list_recent_payments().iter().find(|details| matches!(
2198///         details,
2199///         RecentPaymentDetails::AwaitingInvoice { payment_id: expected_payment_id }
2200///     )).is_some()
2201/// );
2202///
2203/// // Once the invoice is received, a payment will be sent
2204/// assert!(
2205///     channel_manager.list_recent_payments().iter().find(|details| matches!(
2206///         details,
2207///         RecentPaymentDetails::Pending { payment_id: expected_payment_id, ..  }
2208///     )).is_some()
2209/// );
2210///
2211/// // On the event processing thread
2212/// channel_manager.process_pending_events(&|event| {
2213///     match event {
2214///         Event::PaymentSent { payment_id: Some(payment_id), .. } => println!("Paid {}", payment_id),
2215///         Event::PaymentFailed { payment_id, .. } => println!("Failed paying {}", payment_id),
2216///         // ...
2217///     #     _ => {},
2218///     }
2219///     Ok(())
2220/// });
2221/// # Ok(())
2222/// # }
2223/// ```
2224///
2225/// Use [`request_refund_payment`] to send a [`Bolt12Invoice`] for receiving the refund. Similar to
2226/// *creating* an [`Offer`], this is stateless as it represents an inbound payment.
2227///
2228/// ```
2229/// # use lightning::events::{Event, EventsProvider, PaymentPurpose};
2230/// # use lightning::ln::channelmanager::AChannelManager;
2231/// # use lightning::offers::refund::Refund;
2232/// #
2233/// # fn example<T: AChannelManager>(channel_manager: T, refund: &Refund) {
2234/// # let channel_manager = channel_manager.get_cm();
2235/// let known_payment_hash = match channel_manager.request_refund_payment(refund) {
2236///     Ok(invoice) => {
2237///         let payment_hash = invoice.payment_hash();
2238///         println!("Requesting refund payment {}", payment_hash);
2239///         payment_hash
2240///     },
2241///     Err(e) => panic!("Unable to request payment for refund: {:?}", e),
2242/// };
2243///
2244/// // On the event processing thread
2245/// channel_manager.process_pending_events(&|event| {
2246///     match event {
2247///         Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose {
2248///             PaymentPurpose::Bolt12RefundPayment { payment_preimage: Some(payment_preimage), .. } => {
2249///                 assert_eq!(payment_hash, known_payment_hash);
2250///                 println!("Claiming payment {}", payment_hash);
2251///                 channel_manager.claim_funds(payment_preimage);
2252///             },
2253///             PaymentPurpose::Bolt12RefundPayment { payment_preimage: None, .. } => {
2254///                 println!("Unknown payment hash: {}", payment_hash);
2255///             },
2256///             // ...
2257/// #           _ => {},
2258///     },
2259///     Event::PaymentClaimed { payment_hash, amount_msat, .. } => {
2260///         assert_eq!(payment_hash, known_payment_hash);
2261///         println!("Claimed {} msats", amount_msat);
2262///     },
2263///     // ...
2264/// #     _ => {},
2265///     }
2266///     Ok(())
2267/// });
2268/// # }
2269/// ```
2270///
2271/// # Persistence
2272///
2273/// Implements [`Writeable`] to write out all channel state to disk. Implies [`peer_disconnected`] for
2274/// all peers during write/read (though does not modify this instance, only the instance being
2275/// serialized). This will result in any channels which have not yet exchanged [`funding_created`] (i.e.,
2276/// called [`funding_transaction_generated`] for outbound channels) being closed.
2277///
2278/// Note that you can be a bit lazier about writing out `ChannelManager` than you can be with
2279/// [`ChannelMonitor`]. With [`ChannelMonitor`] you MUST durably write each
2280/// [`ChannelMonitorUpdate`] before returning from
2281/// [`chain::Watch::watch_channel`]/[`update_channel`] or before completing async writes. With
2282/// `ChannelManager`s, writing updates happens out-of-band (and will prevent any other
2283/// `ChannelManager` operations from occurring during the serialization process). If the
2284/// deserialized version is out-of-date compared to the [`ChannelMonitor`] passed by reference to
2285/// [`read`], those channels will be force-closed based on the `ChannelMonitor` state and no funds
2286/// will be lost (modulo on-chain transaction fees).
2287///
2288/// Note that the deserializer is only implemented for `(`[`BlockHash`]`, `[`ChannelManager`]`)`, which
2289/// tells you the last block hash which was connected. You should get the best block tip before using the manager.
2290/// See [`chain::Listen`] and [`chain::Confirm`] for more details.
2291///
2292/// # `ChannelUpdate` Messages
2293///
2294/// Note that `ChannelManager` is responsible for tracking liveness of its channels and generating
2295/// [`ChannelUpdate`] messages informing peers that the channel is temporarily disabled. To avoid
2296/// spam due to quick disconnection/reconnection, updates are not sent until the channel has been
2297/// offline for a full minute. In order to track this, you must call
2298/// [`timer_tick_occurred`] roughly once per minute, though it doesn't have to be perfect.
2299///
2300/// # DoS Mitigation
2301///
2302/// To avoid trivial DoS issues, `ChannelManager` limits the number of inbound connections and
2303/// inbound channels without confirmed funding transactions. This may result in nodes which we do
2304/// not have a channel with being unable to connect to us or open new channels with us if we have
2305/// many peers with unfunded channels.
2306///
2307/// Because it is an indication of trust, inbound channels which we've accepted as 0conf are
2308/// exempted from the count of unfunded channels. Similarly, outbound channels and connections are
2309/// never limited. Please ensure you limit the count of such channels yourself.
2310///
2311/// # Type Aliases
2312///
2313/// Rather than using a plain `ChannelManager`, it is preferable to use either a [`SimpleArcChannelManager`]
2314/// a [`SimpleRefChannelManager`], for conciseness. See their documentation for more details, but
2315/// essentially you should default to using a [`SimpleRefChannelManager`], and use a
2316/// [`SimpleArcChannelManager`] when you require a `ChannelManager` with a static lifetime, such as when
2317/// you're using lightning-net-tokio.
2318///
2319/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
2320/// [`MessageHandler`]: crate::ln::peer_handler::MessageHandler
2321/// [`OnionMessenger`]: crate::onion_message::messenger::OnionMessenger
2322/// [`PeerManager::read_event`]: crate::ln::peer_handler::PeerManager::read_event
2323/// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
2324/// [`timer_tick_occurred`]: Self::timer_tick_occurred
2325/// [`get_and_clear_needs_persistence`]: Self::get_and_clear_needs_persistence
2326/// [`Persister`]: crate::util::persist::Persister
2327/// [`KVStore`]: crate::util::persist::KVStore
2328/// [`get_event_or_persistence_needed_future`]: Self::get_event_or_persistence_needed_future
2329/// [`lightning-block-sync`]: https://docs.rs/lightning_block_sync/latest/lightning_block_sync
2330/// [`lightning-transaction-sync`]: https://docs.rs/lightning_transaction_sync/latest/lightning_transaction_sync
2331/// [`lightning-background-processor`]: https://docs.rs/lightning_background_processor/lightning_background_processor
2332/// [`list_channels`]: Self::list_channels
2333/// [`list_usable_channels`]: Self::list_usable_channels
2334/// [`create_channel`]: Self::create_channel
2335/// [`close_channel`]: Self::force_close_broadcasting_latest_txn
2336/// [`force_close_broadcasting_latest_txn`]: Self::force_close_broadcasting_latest_txn
2337/// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md
2338/// [BOLT 12]: https://github.com/rustyrussell/lightning-rfc/blob/guilt/offers/12-offer-encoding.md
2339/// [`list_recent_payments`]: Self::list_recent_payments
2340/// [`abandon_payment`]: Self::abandon_payment
2341/// [`lightning-invoice`]: https://docs.rs/lightning_invoice/latest/lightning_invoice
2342/// [`create_bolt11_invoice`]: Self::create_bolt11_invoice
2343/// [`create_inbound_payment`]: Self::create_inbound_payment
2344/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
2345/// [`bolt11_payment`]: crate::ln::bolt11_payment
2346/// [`claim_funds`]: Self::claim_funds
2347/// [`send_payment`]: Self::send_payment
2348/// [`offers`]: crate::offers
2349/// [`create_offer_builder`]: Self::create_offer_builder
2350/// [`pay_for_offer`]: Self::pay_for_offer
2351/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
2352/// [`create_refund_builder`]: Self::create_refund_builder
2353/// [`request_refund_payment`]: Self::request_refund_payment
2354/// [`peer_disconnected`]: msgs::ChannelMessageHandler::peer_disconnected
2355/// [`funding_created`]: msgs::FundingCreated
2356/// [`funding_transaction_generated`]: Self::funding_transaction_generated
2357/// [`BlockHash`]: bitcoin::hash_types::BlockHash
2358/// [`update_channel`]: chain::Watch::update_channel
2359/// [`ChannelUpdate`]: msgs::ChannelUpdate
2360/// [`read`]: ReadableArgs::read
2361//
2362// Lock order:
2363// The tree structure below illustrates the lock order requirements for the different locks of the
2364// `ChannelManager`. Locks can be held at the same time if they are on the same branch in the tree,
2365// and should then be taken in the order of the lowest to the highest level in the tree.
2366// Note that locks on different branches shall not be taken at the same time, as doing so will
2367// create a new lock order for those specific locks in the order they were taken.
2368//
2369// Lock order tree:
2370//
2371// `pending_offers_messages`
2372//
2373// `pending_async_payments_messages`
2374//
2375// `total_consistency_lock`
2376//  |
2377//  |__`forward_htlcs`
2378//  |   |
2379//  |   |__`pending_intercepted_htlcs`
2380//  |
2381//  |__`decode_update_add_htlcs`
2382//  |
2383//  |__`per_peer_state`
2384//      |
2385//      |__`claimable_payments`
2386//      |
2387//      |__`pending_outbound_payments` // This field's struct contains a map of pending outbounds
2388//         |
2389//         |__`peer_state`
2390//            |
2391//            |__`outpoint_to_peer`
2392//            |
2393//            |__`short_to_chan_info`
2394//            |
2395//            |__`outbound_scid_aliases`
2396//            |
2397//            |__`best_block`
2398//            |
2399//            |__`pending_events`
2400//               |
2401//               |__`pending_background_events`
2402//
2403pub struct ChannelManager<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
2404where
2405	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
2406	T::Target: BroadcasterInterface,
2407	ES::Target: EntropySource,
2408	NS::Target: NodeSigner,
2409	SP::Target: SignerProvider,
2410	F::Target: FeeEstimator,
2411	R::Target: Router,
2412	MR::Target: MessageRouter,
2413	L::Target: Logger,
2414{
2415	default_configuration: UserConfig,
2416	chain_hash: ChainHash,
2417	fee_estimator: LowerBoundedFeeEstimator<F>,
2418	chain_monitor: M,
2419	tx_broadcaster: T,
2420	router: R,
2421	message_router: MR,
2422
2423	/// See `ChannelManager` struct-level documentation for lock order requirements.
2424	#[cfg(test)]
2425	pub(super) best_block: RwLock<BestBlock>,
2426	#[cfg(not(test))]
2427	best_block: RwLock<BestBlock>,
2428	secp_ctx: Secp256k1<secp256k1::All>,
2429
2430	/// The session_priv bytes and retry metadata of outbound payments which are pending resolution.
2431	/// The authoritative state of these HTLCs resides either within Channels or ChannelMonitors
2432	/// (if the channel has been force-closed), however we track them here to prevent duplicative
2433	/// PaymentSent/PaymentPathFailed events. Specifically, in the case of a duplicative
2434	/// update_fulfill_htlc message after a reconnect, we may "claim" a payment twice.
2435	/// Additionally, because ChannelMonitors are often not re-serialized after connecting block(s)
2436	/// which may generate a claim event, we may receive similar duplicate claim/fail MonitorEvents
2437	/// after reloading from disk while replaying blocks against ChannelMonitors.
2438	///
2439	/// See `PendingOutboundPayment` documentation for more info.
2440	///
2441	/// See `ChannelManager` struct-level documentation for lock order requirements.
2442	pending_outbound_payments: OutboundPayments,
2443
2444	/// SCID/SCID Alias -> forward infos. Key of 0 means payments received.
2445	///
2446	/// Note that because we may have an SCID Alias as the key we can have two entries per channel,
2447	/// though in practice we probably won't be receiving HTLCs for a channel both via the alias
2448	/// and via the classic SCID.
2449	///
2450	/// Note that no consistency guarantees are made about the existence of a channel with the
2451	/// `short_channel_id` here, nor the `short_channel_id` in the `PendingHTLCInfo`!
2452	///
2453	/// See `ChannelManager` struct-level documentation for lock order requirements.
2454	#[cfg(test)]
2455	pub(super) forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
2456	#[cfg(not(test))]
2457	forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
2458	/// Storage for HTLCs that have been intercepted and bubbled up to the user. We hold them here
2459	/// until the user tells us what we should do with them.
2460	///
2461	/// See `ChannelManager` struct-level documentation for lock order requirements.
2462	pending_intercepted_htlcs: Mutex<HashMap<InterceptId, PendingAddHTLCInfo>>,
2463
2464	/// SCID/SCID Alias -> pending `update_add_htlc`s to decode.
2465	///
2466	/// Note that because we may have an SCID Alias as the key we can have two entries per channel,
2467	/// though in practice we probably won't be receiving HTLCs for a channel both via the alias
2468	/// and via the classic SCID.
2469	///
2470	/// Note that no consistency guarantees are made about the existence of a channel with the
2471	/// `short_channel_id` here, nor the `channel_id` in `UpdateAddHTLC`!
2472	///
2473	/// See `ChannelManager` struct-level documentation for lock order requirements.
2474	decode_update_add_htlcs: Mutex<HashMap<u64, Vec<msgs::UpdateAddHTLC>>>,
2475
2476	/// The sets of payments which are claimable or currently being claimed. See
2477	/// [`ClaimablePayments`]' individual field docs for more info.
2478	///
2479	/// See `ChannelManager` struct-level documentation for lock order requirements.
2480	claimable_payments: Mutex<ClaimablePayments>,
2481
2482	/// The set of outbound SCID aliases across all our channels, including unconfirmed channels
2483	/// and some closed channels which reached a usable state prior to being closed. This is used
2484	/// only to avoid duplicates, and is not persisted explicitly to disk, but rebuilt from the
2485	/// active channel list on load.
2486	///
2487	/// See `ChannelManager` struct-level documentation for lock order requirements.
2488	outbound_scid_aliases: Mutex<HashSet<u64>>,
2489
2490	/// Channel funding outpoint -> `counterparty_node_id`.
2491	///
2492	/// Note that this map should only be used for `MonitorEvent` handling, to be able to access
2493	/// the corresponding channel for the event, as we only have access to the `channel_id` during
2494	/// the handling of the events.
2495	///
2496	/// Note that no consistency guarantees are made about the existence of a peer with the
2497	/// `counterparty_node_id` in our other maps.
2498	///
2499	/// TODO:
2500	/// The `counterparty_node_id` isn't passed with `MonitorEvent`s currently. To pass it, we need
2501	/// to make `counterparty_node_id`'s a required field in `ChannelMonitor`s, which unfortunately
2502	/// would break backwards compatability.
2503	/// We should add `counterparty_node_id`s to `MonitorEvent`s, and eventually rely on it in the
2504	/// future. That would make this map redundant, as only the `ChannelManager::per_peer_state` is
2505	/// required to access the channel with the `counterparty_node_id`.
2506	///
2507	/// See `ChannelManager` struct-level documentation for lock order requirements.
2508	#[cfg(not(test))]
2509	outpoint_to_peer: Mutex<HashMap<OutPoint, PublicKey>>,
2510	#[cfg(test)]
2511	pub(crate) outpoint_to_peer: Mutex<HashMap<OutPoint, PublicKey>>,
2512
2513	/// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s.
2514	///
2515	/// Outbound SCID aliases are added here once the channel is available for normal use, with
2516	/// SCIDs being added once the funding transaction is confirmed at the channel's required
2517	/// confirmation depth.
2518	///
2519	/// Note that while this holds `counterparty_node_id`s and `channel_id`s, no consistency
2520	/// guarantees are made about the existence of a peer with the `counterparty_node_id` nor a
2521	/// channel with the `channel_id` in our other maps.
2522	///
2523	/// See `ChannelManager` struct-level documentation for lock order requirements.
2524	#[cfg(test)]
2525	pub(super) short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, ChannelId)>>,
2526	#[cfg(not(test))]
2527	short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, ChannelId)>>,
2528
2529	our_network_pubkey: PublicKey,
2530
2531	inbound_payment_key: inbound_payment::ExpandedKey,
2532
2533	/// LDK puts the [fake scids] that it generates into namespaces, to identify the type of an
2534	/// incoming payment. To make it harder for a third-party to identify the type of a payment,
2535	/// we encrypt the namespace identifier using these bytes.
2536	///
2537	/// [fake scids]: crate::util::scid_utils::fake_scid
2538	fake_scid_rand_bytes: [u8; 32],
2539
2540	/// When we send payment probes, we generate the [`PaymentHash`] based on this cookie secret
2541	/// and a random [`PaymentId`]. This allows us to discern probes from real payments, without
2542	/// keeping additional state.
2543	probing_cookie_secret: [u8; 32],
2544
2545	/// When generating [`PaymentId`]s for inbound payments, we HMAC the HTLCs with this secret.
2546	inbound_payment_id_secret: [u8; 32],
2547
2548	/// The highest block timestamp we've seen, which is usually a good guess at the current time.
2549	/// Assuming most miners are generating blocks with reasonable timestamps, this shouldn't be
2550	/// very far in the past, and can only ever be up to two hours in the future.
2551	highest_seen_timestamp: AtomicUsize,
2552
2553	/// The bulk of our storage. Currently the `per_peer_state` stores our channels on a per-peer
2554	/// basis, as well as the peer's latest features.
2555	///
2556	/// If we are connected to a peer we always at least have an entry here, even if no channels
2557	/// are currently open with that peer.
2558	///
2559	/// Because adding or removing an entry is rare, we usually take an outer read lock and then
2560	/// operate on the inner value freely. This opens up for parallel per-peer operation for
2561	/// channels.
2562	///
2563	/// Note that the same thread must never acquire two inner `PeerState` locks at the same time.
2564	///
2565	/// See `ChannelManager` struct-level documentation for lock order requirements.
2566	#[cfg(not(any(test, feature = "_test_utils")))]
2567	per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<SP>>>>,
2568	#[cfg(any(test, feature = "_test_utils"))]
2569	pub(super) per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<SP>>>>,
2570
2571	/// The set of events which we need to give to the user to handle. In some cases an event may
2572	/// require some further action after the user handles it (currently only blocking a monitor
2573	/// update from being handed to the user to ensure the included changes to the channel state
2574	/// are handled by the user before they're persisted durably to disk). In that case, the second
2575	/// element in the tuple is set to `Some` with further details of the action.
2576	///
2577	/// Note that events MUST NOT be removed from pending_events after deserialization, as they
2578	/// could be in the middle of being processed without the direct mutex held.
2579	///
2580	/// See `ChannelManager` struct-level documentation for lock order requirements.
2581	#[cfg(not(any(test, feature = "_test_utils")))]
2582	pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
2583	#[cfg(any(test, feature = "_test_utils"))]
2584	pub(crate) pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
2585
2586	/// A simple atomic flag to ensure only one task at a time can be processing events asynchronously.
2587	pending_events_processor: AtomicBool,
2588
2589	/// If we are running during init (either directly during the deserialization method or in
2590	/// block connection methods which run after deserialization but before normal operation) we
2591	/// cannot provide the user with [`ChannelMonitorUpdate`]s through the normal update flow -
2592	/// prior to normal operation the user may not have loaded the [`ChannelMonitor`]s into their
2593	/// [`ChainMonitor`] and thus attempting to update it will fail or panic.
2594	///
2595	/// Thus, we place them here to be handled as soon as possible once we are running normally.
2596	///
2597	/// See `ChannelManager` struct-level documentation for lock order requirements.
2598	///
2599	/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
2600	pending_background_events: Mutex<Vec<BackgroundEvent>>,
2601	/// Used when we have to take a BIG lock to make sure everything is self-consistent.
2602	/// Essentially just when we're serializing ourselves out.
2603	/// Taken first everywhere where we are making changes before any other locks.
2604	/// When acquiring this lock in read mode, rather than acquiring it directly, call
2605	/// `PersistenceNotifierGuard::notify_on_drop(..)` and pass the lock to it, to ensure the
2606	/// Notifier the lock contains sends out a notification when the lock is released.
2607	total_consistency_lock: RwLock<()>,
2608	/// Tracks the progress of channels going through batch funding by whether funding_signed was
2609	/// received and the monitor has been persisted.
2610	///
2611	/// This information does not need to be persisted as funding nodes can forget
2612	/// unfunded channels upon disconnection.
2613	funding_batch_states: Mutex<BTreeMap<Txid, Vec<(ChannelId, PublicKey, bool)>>>,
2614
2615	background_events_processed_since_startup: AtomicBool,
2616
2617	event_persist_notifier: Notifier,
2618	needs_persist_flag: AtomicBool,
2619
2620	#[cfg(not(any(test, feature = "_test_utils")))]
2621	pending_offers_messages: Mutex<Vec<(OffersMessage, MessageSendInstructions)>>,
2622	#[cfg(any(test, feature = "_test_utils"))]
2623	pub(crate) pending_offers_messages: Mutex<Vec<(OffersMessage, MessageSendInstructions)>>,
2624	pending_async_payments_messages: Mutex<Vec<(AsyncPaymentsMessage, MessageSendInstructions)>>,
2625
2626	/// Tracks the message events that are to be broadcasted when we are connected to some peer.
2627	pending_broadcast_messages: Mutex<Vec<MessageSendEvent>>,
2628
2629	/// We only want to force-close our channels on peers based on stale feerates when we're
2630	/// confident the feerate on the channel is *really* stale, not just became stale recently.
2631	/// Thus, we store the fee estimates we had as of the last [`FEERATE_TRACKING_BLOCKS`] blocks
2632	/// (after startup completed) here, and only force-close when channels have a lower feerate
2633	/// than we predicted any time in the last [`FEERATE_TRACKING_BLOCKS`] blocks.
2634	///
2635	/// We only keep this in memory as we assume any feerates we receive immediately after startup
2636	/// may be bunk (as they often are if Bitcoin Core crashes) and want to delay taking any
2637	/// actions for a day anyway.
2638	///
2639	/// The first element in the pair is the
2640	/// [`ConfirmationTarget::MinAllowedAnchorChannelRemoteFee`] estimate, the second the
2641	/// [`ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee`] estimate.
2642	last_days_feerates: Mutex<VecDeque<(u32, u32)>>,
2643
2644	#[cfg(feature = "dnssec")]
2645	hrn_resolver: OMNameResolver,
2646	#[cfg(feature = "dnssec")]
2647	pending_dns_onion_messages: Mutex<Vec<(DNSResolverMessage, MessageSendInstructions)>>,
2648
2649	#[cfg(feature = "_test_utils")]
2650	/// In testing, it is useful be able to forge a name -> offer mapping so that we can pay an
2651	/// offer generated in the test.
2652	///
2653	/// This allows for doing so, validating proofs as normal, but, if they pass, replacing the
2654	/// offer they resolve to to the given one.
2655	pub testing_dnssec_proof_offer_resolution_override: Mutex<HashMap<HumanReadableName, Offer>>,
2656
2657	#[cfg(test)]
2658	pub(super) entropy_source: ES,
2659	#[cfg(not(test))]
2660	entropy_source: ES,
2661	node_signer: NS,
2662	#[cfg(test)]
2663	pub(super) signer_provider: SP,
2664	#[cfg(not(test))]
2665	signer_provider: SP,
2666
2667	logger: L,
2668}
2669
2670/// Chain-related parameters used to construct a new `ChannelManager`.
2671///
2672/// Typically, the block-specific parameters are derived from the best block hash for the network,
2673/// as a newly constructed `ChannelManager` will not have created any channels yet. These parameters
2674/// are not needed when deserializing a previously constructed `ChannelManager`.
2675#[derive(Clone, Copy, PartialEq)]
2676pub struct ChainParameters {
2677	/// The network for determining the `chain_hash` in Lightning messages.
2678	pub network: Network,
2679
2680	/// The hash and height of the latest block successfully connected.
2681	///
2682	/// Used to track on-chain channel funding outputs and send payments with reliable timelocks.
2683	pub best_block: BestBlock,
2684}
2685
2686#[derive(Copy, Clone, PartialEq)]
2687#[must_use]
2688enum NotifyOption {
2689	DoPersist,
2690	SkipPersistHandleEvents,
2691	SkipPersistNoEvents,
2692}
2693
2694/// Whenever we release the `ChannelManager`'s `total_consistency_lock`, from read mode, it is
2695/// desirable to notify any listeners on `await_persistable_update_timeout`/
2696/// `await_persistable_update` when new updates are available for persistence. Therefore, this
2697/// struct is responsible for locking the total consistency lock and, upon going out of scope,
2698/// sending the aforementioned notification (since the lock being released indicates that the
2699/// updates are ready for persistence).
2700///
2701/// We allow callers to either always notify by constructing with `notify_on_drop` or choose to
2702/// notify or not based on whether relevant changes have been made, providing a closure to
2703/// `optionally_notify` which returns a `NotifyOption`.
2704struct PersistenceNotifierGuard<'a, F: FnMut() -> NotifyOption> {
2705	event_persist_notifier: &'a Notifier,
2706	needs_persist_flag: &'a AtomicBool,
2707	should_persist: F,
2708	// We hold onto this result so the lock doesn't get released immediately.
2709	_read_guard: RwLockReadGuard<'a, ()>,
2710}
2711
2712impl<'a> PersistenceNotifierGuard<'a, fn() -> NotifyOption> { // We don't care what the concrete F is here, it's unused
2713	/// Notifies any waiters and indicates that we need to persist, in addition to possibly having
2714	/// events to handle.
2715	///
2716	/// This must always be called if the changes included a `ChannelMonitorUpdate`, as well as in
2717	/// other cases where losing the changes on restart may result in a force-close or otherwise
2718	/// isn't ideal.
2719	fn notify_on_drop<C: AChannelManager>(cm: &'a C) -> PersistenceNotifierGuard<'a, impl FnMut() -> NotifyOption> {
2720		Self::optionally_notify(cm, || -> NotifyOption { NotifyOption::DoPersist })
2721	}
2722
2723	fn optionally_notify<F: FnMut() -> NotifyOption, C: AChannelManager>(cm: &'a C, mut persist_check: F)
2724	-> PersistenceNotifierGuard<'a, impl FnMut() -> NotifyOption> {
2725		let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
2726		let force_notify = cm.get_cm().process_background_events();
2727
2728		PersistenceNotifierGuard {
2729			event_persist_notifier: &cm.get_cm().event_persist_notifier,
2730			needs_persist_flag: &cm.get_cm().needs_persist_flag,
2731			should_persist: move || {
2732				// Pick the "most" action between `persist_check` and the background events
2733				// processing and return that.
2734				let notify = persist_check();
2735				match (notify, force_notify) {
2736					(NotifyOption::DoPersist, _) => NotifyOption::DoPersist,
2737					(_, NotifyOption::DoPersist) => NotifyOption::DoPersist,
2738					(NotifyOption::SkipPersistHandleEvents, _) => NotifyOption::SkipPersistHandleEvents,
2739					(_, NotifyOption::SkipPersistHandleEvents) => NotifyOption::SkipPersistHandleEvents,
2740					_ => NotifyOption::SkipPersistNoEvents,
2741				}
2742			},
2743			_read_guard: read_guard,
2744		}
2745	}
2746
2747	/// Note that if any [`ChannelMonitorUpdate`]s are possibly generated,
2748	/// [`ChannelManager::process_background_events`] MUST be called first (or
2749	/// [`Self::optionally_notify`] used).
2750	fn optionally_notify_skipping_background_events<F: Fn() -> NotifyOption, C: AChannelManager>
2751	(cm: &'a C, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
2752		let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
2753
2754		PersistenceNotifierGuard {
2755			event_persist_notifier: &cm.get_cm().event_persist_notifier,
2756			needs_persist_flag: &cm.get_cm().needs_persist_flag,
2757			should_persist: persist_check,
2758			_read_guard: read_guard,
2759		}
2760	}
2761}
2762
2763impl<'a, F: FnMut() -> NotifyOption> Drop for PersistenceNotifierGuard<'a, F> {
2764	fn drop(&mut self) {
2765		match (self.should_persist)() {
2766			NotifyOption::DoPersist => {
2767				self.needs_persist_flag.store(true, Ordering::Release);
2768				self.event_persist_notifier.notify()
2769			},
2770			NotifyOption::SkipPersistHandleEvents =>
2771				self.event_persist_notifier.notify(),
2772			NotifyOption::SkipPersistNoEvents => {},
2773		}
2774	}
2775}
2776
2777/// The amount of time in blocks we require our counterparty wait to claim their money (ie time
2778/// between when we, or our watchtower, must check for them having broadcast a theft transaction).
2779///
2780/// This can be increased (but not decreased) through [`ChannelHandshakeConfig::our_to_self_delay`]
2781///
2782/// [`ChannelHandshakeConfig::our_to_self_delay`]: crate::util::config::ChannelHandshakeConfig::our_to_self_delay
2783pub const BREAKDOWN_TIMEOUT: u16 = 6 * 24;
2784/// The amount of time in blocks we're willing to wait to claim money back to us. This matches
2785/// the maximum required amount in lnd as of March 2021.
2786pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7;
2787
2788/// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound
2789/// HTLC's CLTV. The current default represents roughly seven hours of blocks at six blocks/hour.
2790///
2791/// This can be increased (but not decreased) through [`ChannelConfig::cltv_expiry_delta`]
2792///
2793/// [`ChannelConfig::cltv_expiry_delta`]: crate::util::config::ChannelConfig::cltv_expiry_delta
2794// This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER,
2795// i.e. the node we forwarded the payment on to should always have enough room to reliably time out
2796// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
2797// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more).
2798pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6*7;
2799// This should be long enough to allow a payment path drawn across multiple routing hops with substantial
2800// `cltv_expiry_delta`. Indeed, the length of those values is the reaction delay offered to a routing node
2801// in case of HTLC on-chain settlement. While appearing less competitive, a node operator could decide to
2802// scale them up to suit its security policy. At the network-level, we shouldn't constrain them too much,
2803// while avoiding to introduce a DoS vector. Further, a low CTLV_FAR_FAR_AWAY could be a source of
2804// routing failure for any HTLC sender picking up an LDK node among the first hops.
2805pub(super) const CLTV_FAR_FAR_AWAY: u32 = 14 * 24 * 6;
2806
2807/// Minimum CLTV difference between the current block height and received inbound payments.
2808/// Invoices generated for payment to us must set their `min_final_cltv_expiry_delta` field to at least
2809/// this value.
2810// Note that we fail if exactly HTLC_FAIL_BACK_BUFFER + 1 was used, so we need to add one for
2811// any payments to succeed. Further, we don't want payments to fail if a block was found while
2812// a payment was being routed, so we add an extra block to be safe.
2813pub const MIN_FINAL_CLTV_EXPIRY_DELTA: u16 = HTLC_FAIL_BACK_BUFFER as u16 + 3;
2814
2815// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS,
2816// ie that if the next-hop peer fails the HTLC within
2817// LATENCY_GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER left to timeout it onchain,
2818// then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and
2819// failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before
2820// LATENCY_GRACE_PERIOD_BLOCKS.
2821#[allow(dead_code)]
2822const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
2823
2824// Check for ability of an attacker to make us fail on-chain by delaying an HTLC claim. See
2825// ChannelMonitor::should_broadcast_holder_commitment_txn for a description of why this is needed.
2826#[allow(dead_code)]
2827const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
2828
2829/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until expiry of incomplete MPPs
2830pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;
2831
2832/// The number of ticks of [`ChannelManager::timer_tick_occurred`] where a peer is disconnected
2833/// until we mark the channel disabled and gossip the update.
2834pub(crate) const DISABLE_GOSSIP_TICKS: u8 = 10;
2835
2836/// The number of ticks of [`ChannelManager::timer_tick_occurred`] where a peer is connected until
2837/// we mark the channel enabled and gossip the update.
2838pub(crate) const ENABLE_GOSSIP_TICKS: u8 = 5;
2839
2840/// The maximum number of unfunded channels we can have per-peer before we start rejecting new
2841/// (inbound) ones. The number of peers with unfunded channels is limited separately in
2842/// [`MAX_UNFUNDED_CHANNEL_PEERS`].
2843const MAX_UNFUNDED_CHANS_PER_PEER: usize = 4;
2844
2845/// The maximum number of peers from which we will allow pending unfunded channels. Once we reach
2846/// this many peers we reject new (inbound) channels from peers with which we don't have a channel.
2847const MAX_UNFUNDED_CHANNEL_PEERS: usize = 50;
2848
2849/// The maximum number of peers which we do not have a (funded) channel with. Once we reach this
2850/// many peers we reject new (inbound) connections.
2851const MAX_NO_CHANNEL_PEERS: usize = 250;
2852
2853/// The maximum expiration from the current time where an [`Offer`] or [`Refund`] is considered
2854/// short-lived, while anything with a greater expiration is considered long-lived.
2855///
2856/// Using [`ChannelManager::create_offer_builder`] or [`ChannelManager::create_refund_builder`],
2857/// will included a [`BlindedMessagePath`] created using:
2858/// - [`MessageRouter::create_compact_blinded_paths`] when short-lived, and
2859/// - [`MessageRouter::create_blinded_paths`] when long-lived.
2860///
2861/// Using compact [`BlindedMessagePath`]s may provide better privacy as the [`MessageRouter`] could select
2862/// more hops. However, since they use short channel ids instead of pubkeys, they are more likely to
2863/// become invalid over time as channels are closed. Thus, they are only suitable for short-term use.
2864pub const MAX_SHORT_LIVED_RELATIVE_EXPIRY: Duration = Duration::from_secs(60 * 60 * 24);
2865
2866/// Used by [`ChannelManager::list_recent_payments`] to express the status of recent payments.
2867/// These include payments that have yet to find a successful path, or have unresolved HTLCs.
2868#[derive(Debug, PartialEq)]
2869pub enum RecentPaymentDetails {
2870	/// When an invoice was requested and thus a payment has not yet been sent.
2871	AwaitingInvoice {
2872		/// A user-provided identifier in [`ChannelManager::pay_for_offer`] used to uniquely identify a
2873		/// payment and ensure idempotency in LDK.
2874		payment_id: PaymentId,
2875	},
2876	/// When a payment is still being sent and awaiting successful delivery.
2877	Pending {
2878		/// A user-provided identifier in [`send_payment`] or [`pay_for_offer`] used to uniquely
2879		/// identify a payment and ensure idempotency in LDK.
2880		///
2881		/// [`send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
2882		/// [`pay_for_offer`]: crate::ln::channelmanager::ChannelManager::pay_for_offer
2883		payment_id: PaymentId,
2884		/// Hash of the payment that is currently being sent but has yet to be fulfilled or
2885		/// abandoned.
2886		payment_hash: PaymentHash,
2887		/// Total amount (in msat, excluding fees) across all paths for this payment,
2888		/// not just the amount currently inflight.
2889		total_msat: u64,
2890	},
2891	/// When a pending payment is fulfilled, we continue tracking it until all pending HTLCs have
2892	/// been resolved. Upon receiving [`Event::PaymentSent`], we delay for a few minutes before the
2893	/// payment is removed from tracking.
2894	Fulfilled {
2895		/// A user-provided identifier in [`send_payment`] or [`pay_for_offer`] used to uniquely
2896		/// identify a payment and ensure idempotency in LDK.
2897		///
2898		/// [`send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
2899		/// [`pay_for_offer`]: crate::ln::channelmanager::ChannelManager::pay_for_offer
2900		payment_id: PaymentId,
2901		/// Hash of the payment that was claimed. `None` for serializations of [`ChannelManager`]
2902		/// made before LDK version 0.0.104.
2903		payment_hash: Option<PaymentHash>,
2904	},
2905	/// After a payment's retries are exhausted per the provided [`Retry`], or it is explicitly
2906	/// abandoned via [`ChannelManager::abandon_payment`], it is marked as abandoned until all
2907	/// pending HTLCs for this payment resolve and an [`Event::PaymentFailed`] is generated.
2908	Abandoned {
2909		/// A user-provided identifier in [`send_payment`] or [`pay_for_offer`] used to uniquely
2910		/// identify a payment and ensure idempotency in LDK.
2911		///
2912		/// [`send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
2913		/// [`pay_for_offer`]: crate::ln::channelmanager::ChannelManager::pay_for_offer
2914		payment_id: PaymentId,
2915		/// Hash of the payment that we have given up trying to send.
2916		payment_hash: PaymentHash,
2917	},
2918}
2919
2920/// Route hints used in constructing invoices for [phantom node payents].
2921///
2922/// [phantom node payments]: crate::sign::PhantomKeysManager
2923#[derive(Clone)]
2924pub struct PhantomRouteHints {
2925	/// The list of channels to be included in the invoice route hints.
2926	pub channels: Vec<ChannelDetails>,
2927	/// A fake scid used for representing the phantom node's fake channel in generating the invoice
2928	/// route hints.
2929	pub phantom_scid: u64,
2930	/// The pubkey of the real backing node that would ultimately receive the payment.
2931	pub real_node_pubkey: PublicKey,
2932}
2933
2934macro_rules! handle_error {
2935	($self: ident, $internal: expr, $counterparty_node_id: expr) => { {
2936		// In testing, ensure there are no deadlocks where the lock is already held upon
2937		// entering the macro.
2938		debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
2939		debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
2940
2941		match $internal {
2942			Ok(msg) => Ok(msg),
2943			Err(MsgHandleErrInternal { err, shutdown_finish, .. }) => {
2944				let mut msg_event = None;
2945
2946				if let Some((shutdown_res, update_option)) = shutdown_finish {
2947					let counterparty_node_id = shutdown_res.counterparty_node_id;
2948					let channel_id = shutdown_res.channel_id;
2949					let logger = WithContext::from(
2950						&$self.logger, Some(counterparty_node_id), Some(channel_id), None
2951					);
2952					log_error!(logger, "Force-closing channel: {}", err.err);
2953
2954					$self.finish_close_channel(shutdown_res);
2955					if let Some(update) = update_option {
2956						let mut pending_broadcast_messages = $self.pending_broadcast_messages.lock().unwrap();
2957						pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
2958							msg: update
2959						});
2960					}
2961				} else {
2962					log_error!($self.logger, "Got non-closing error: {}", err.err);
2963				}
2964
2965				if let msgs::ErrorAction::IgnoreError = err.action {
2966				} else {
2967					msg_event = Some(events::MessageSendEvent::HandleError {
2968						node_id: $counterparty_node_id,
2969						action: err.action.clone()
2970					});
2971				}
2972
2973				if let Some(msg_event) = msg_event {
2974					let per_peer_state = $self.per_peer_state.read().unwrap();
2975					if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) {
2976						let mut peer_state = peer_state_mutex.lock().unwrap();
2977						peer_state.pending_msg_events.push(msg_event);
2978					}
2979				}
2980
2981				// Return error in case higher-API need one
2982				Err(err)
2983			},
2984		}
2985	} };
2986}
2987
2988/// When a channel is removed, two things need to happen:
2989/// (a) This must be called in the same `per_peer_state` lock as the channel-closing action,
2990/// (b) [`ChannelManager::finish_close_channel`] needs to be called without holding any locks
2991///     (except [`ChannelManager::total_consistency_lock`].
2992///
2993/// Note that this step can be skipped if the channel was never opened (through the creation of a
2994/// [`ChannelMonitor`]/channel funding transaction) to begin with.
2995macro_rules! locked_close_channel {
2996	($self: ident, $peer_state: expr, $channel_context: expr, $shutdown_res_mut: expr) => {{
2997		if let Some((_, funding_txo, _, update)) = $shutdown_res_mut.monitor_update.take() {
2998			handle_new_monitor_update!($self, funding_txo, update, $peer_state,
2999				$channel_context, REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER);
3000		}
3001		// If there's a possibility that we need to generate further monitor updates for this
3002		// channel, we need to store the last update_id of it. However, we don't want to insert
3003		// into the map (which prevents the `PeerState` from being cleaned up) for channels that
3004		// never even got confirmations (which would open us up to DoS attacks).
3005		let update_id = $channel_context.get_latest_monitor_update_id();
3006		if $channel_context.get_funding_tx_confirmation_height().is_some() || $channel_context.minimum_depth() == Some(0) || update_id > 1 {
3007			let chan_id = $channel_context.channel_id();
3008			$peer_state.closed_channel_monitor_update_ids.insert(chan_id, update_id);
3009		}
3010		if let Some(outpoint) = $channel_context.get_funding_txo() {
3011			$self.outpoint_to_peer.lock().unwrap().remove(&outpoint);
3012		}
3013		let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
3014		if let Some(short_id) = $channel_context.get_short_channel_id() {
3015			short_to_chan_info.remove(&short_id);
3016		} else {
3017			// If the channel was never confirmed on-chain prior to its closure, remove the
3018			// outbound SCID alias we used for it from the collision-prevention set. While we
3019			// generally want to avoid ever re-using an outbound SCID alias across all channels, we
3020			// also don't want a counterparty to be able to trivially cause a memory leak by simply
3021			// opening a million channels with us which are closed before we ever reach the funding
3022			// stage.
3023			let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel_context.outbound_scid_alias());
3024			debug_assert!(alias_removed);
3025		}
3026		short_to_chan_info.remove(&$channel_context.outbound_scid_alias());
3027	}}
3028}
3029
3030/// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error)
3031macro_rules! convert_chan_phase_err {
3032	($self: ident, $peer_state: expr, $err: expr, $channel: expr, $channel_id: expr, MANUAL_CHANNEL_UPDATE, $channel_update: expr) => {
3033		match $err {
3034			ChannelError::Warn(msg) => {
3035				(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), *$channel_id))
3036			},
3037			ChannelError::Ignore(msg) => {
3038				(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), *$channel_id))
3039			},
3040			ChannelError::Close((msg, reason)) => {
3041				let logger = WithChannelContext::from(&$self.logger, &$channel.context, None);
3042				log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
3043				let mut shutdown_res = $channel.context.force_shutdown(true, reason);
3044				locked_close_channel!($self, $peer_state, &$channel.context, &mut shutdown_res);
3045				let err =
3046					MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $channel_update);
3047				(true, err)
3048			},
3049		}
3050	};
3051	($self: ident, $peer_state: expr, $err: expr, $channel: expr, $channel_id: expr, FUNDED_CHANNEL) => {
3052		convert_chan_phase_err!($self, $peer_state, $err, $channel, $channel_id, MANUAL_CHANNEL_UPDATE, { $self.get_channel_update_for_broadcast($channel).ok() })
3053	};
3054	($self: ident, $peer_state: expr, $err: expr, $channel: expr, $channel_id: expr, UNFUNDED_CHANNEL) => {
3055		convert_chan_phase_err!($self, $peer_state, $err, $channel, $channel_id, MANUAL_CHANNEL_UPDATE, None)
3056	};
3057	($self: ident, $peer_state: expr, $err: expr, $channel_phase: expr, $channel_id: expr) => {
3058		match $channel_phase {
3059			ChannelPhase::Funded(channel) => {
3060				convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, FUNDED_CHANNEL)
3061			},
3062			ChannelPhase::UnfundedOutboundV1(channel) => {
3063				convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, UNFUNDED_CHANNEL)
3064			},
3065			ChannelPhase::UnfundedInboundV1(channel) => {
3066				convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, UNFUNDED_CHANNEL)
3067			},
3068			ChannelPhase::UnfundedOutboundV2(channel) => {
3069				convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, UNFUNDED_CHANNEL)
3070			},
3071			ChannelPhase::UnfundedInboundV2(channel) => {
3072				convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, UNFUNDED_CHANNEL)
3073			},
3074		}
3075	};
3076}
3077
3078macro_rules! break_chan_phase_entry {
3079	($self: ident, $peer_state: expr, $res: expr, $entry: expr) => {
3080		match $res {
3081			Ok(res) => res,
3082			Err(e) => {
3083				let key = *$entry.key();
3084				let (drop, res) = convert_chan_phase_err!($self, $peer_state, e, $entry.get_mut(), &key);
3085				if drop {
3086					$entry.remove_entry();
3087				}
3088				break Err(res);
3089			}
3090		}
3091	}
3092}
3093
3094macro_rules! try_chan_phase_entry {
3095	($self: ident, $peer_state: expr, $res: expr, $entry: expr) => {
3096		match $res {
3097			Ok(res) => res,
3098			Err(e) => {
3099				let key = *$entry.key();
3100				let (drop, res) = convert_chan_phase_err!($self, $peer_state, e, $entry.get_mut(), &key);
3101				if drop {
3102					$entry.remove_entry();
3103				}
3104				return Err(res);
3105			}
3106		}
3107	}
3108}
3109
3110macro_rules! remove_channel_phase {
3111	($self: ident, $peer_state: expr, $entry: expr, $shutdown_res_mut: expr) => {
3112		{
3113			let channel = $entry.remove_entry().1;
3114			locked_close_channel!($self, $peer_state, &channel.context(), $shutdown_res_mut);
3115			channel
3116		}
3117	}
3118}
3119
3120macro_rules! send_channel_ready {
3121	($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{
3122		$pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
3123			node_id: $channel.context.get_counterparty_node_id(),
3124			msg: $channel_ready_msg,
3125		});
3126		// Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so
3127		// we allow collisions, but we shouldn't ever be updating the channel ID pointed to.
3128		let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
3129		let outbound_alias_insert = short_to_chan_info.insert($channel.context.outbound_scid_alias(), ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()));
3130		assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()),
3131			"SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
3132		if let Some(real_scid) = $channel.context.get_short_channel_id() {
3133			let scid_insert = short_to_chan_info.insert(real_scid, ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()));
3134			assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()),
3135				"SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
3136		}
3137	}}
3138}
3139macro_rules! emit_funding_tx_broadcast_safe_event {
3140	($locked_events: expr, $channel: expr, $funding_txo: expr) => {
3141		if !$channel.context.funding_tx_broadcast_safe_event_emitted() {
3142			$locked_events.push_back((events::Event::FundingTxBroadcastSafe {
3143				channel_id: $channel.context.channel_id(),
3144				user_channel_id: $channel.context.get_user_id(),
3145				funding_txo: $funding_txo,
3146				counterparty_node_id: $channel.context.get_counterparty_node_id(),
3147				former_temporary_channel_id: $channel.context.temporary_channel_id()
3148					.expect("Unreachable: FundingTxBroadcastSafe event feature added to channel establishment process in LDK v0.0.124 where this should never be None."),
3149			}, None));
3150			$channel.context.set_funding_tx_broadcast_safe_event_emitted();
3151		}
3152	}
3153}
3154
3155macro_rules! emit_channel_pending_event {
3156	($locked_events: expr, $channel: expr) => {
3157		if $channel.context.should_emit_channel_pending_event() {
3158			$locked_events.push_back((events::Event::ChannelPending {
3159				channel_id: $channel.context.channel_id(),
3160				former_temporary_channel_id: $channel.context.temporary_channel_id(),
3161				counterparty_node_id: $channel.context.get_counterparty_node_id(),
3162				user_channel_id: $channel.context.get_user_id(),
3163				funding_txo: $channel.context.get_funding_txo().unwrap().into_bitcoin_outpoint(),
3164				channel_type: Some($channel.context.get_channel_type().clone()),
3165			}, None));
3166			$channel.context.set_channel_pending_event_emitted();
3167		}
3168	}
3169}
3170
3171macro_rules! emit_channel_ready_event {
3172	($locked_events: expr, $channel: expr) => {
3173		if $channel.context.should_emit_channel_ready_event() {
3174			debug_assert!($channel.context.channel_pending_event_emitted());
3175			$locked_events.push_back((events::Event::ChannelReady {
3176				channel_id: $channel.context.channel_id(),
3177				user_channel_id: $channel.context.get_user_id(),
3178				counterparty_node_id: $channel.context.get_counterparty_node_id(),
3179				channel_type: $channel.context.get_channel_type().clone(),
3180			}, None));
3181			$channel.context.set_channel_ready_event_emitted();
3182		}
3183	}
3184}
3185
3186macro_rules! handle_monitor_update_completion {
3187	($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
3188		let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3189		let mut updates = $chan.monitor_updating_restored(&&logger,
3190			&$self.node_signer, $self.chain_hash, &$self.default_configuration,
3191			$self.best_block.read().unwrap().height);
3192		let counterparty_node_id = $chan.context.get_counterparty_node_id();
3193		let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() {
3194			// We only send a channel_update in the case where we are just now sending a
3195			// channel_ready and the channel is in a usable state. We may re-send a
3196			// channel_update later through the announcement_signatures process for public
3197			// channels, but there's no reason not to just inform our counterparty of our fees
3198			// now.
3199			if let Ok(msg) = $self.get_channel_update_for_unicast($chan) {
3200				Some(events::MessageSendEvent::SendChannelUpdate {
3201					node_id: counterparty_node_id,
3202					msg,
3203				})
3204			} else { None }
3205		} else { None };
3206
3207		let update_actions = $peer_state.monitor_update_blocked_actions
3208			.remove(&$chan.context.channel_id()).unwrap_or(Vec::new());
3209
3210		let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption(
3211			&mut $peer_state.pending_msg_events, $chan, updates.raa,
3212			updates.commitment_update, updates.order, updates.accepted_htlcs, updates.pending_update_adds,
3213			updates.funding_broadcastable, updates.channel_ready,
3214			updates.announcement_sigs, updates.tx_signatures);
3215		if let Some(upd) = channel_update {
3216			$peer_state.pending_msg_events.push(upd);
3217		}
3218
3219		let channel_id = $chan.context.channel_id();
3220		let unbroadcasted_batch_funding_txid = $chan.context.unbroadcasted_batch_funding_txid();
3221		core::mem::drop($peer_state_lock);
3222		core::mem::drop($per_peer_state_lock);
3223
3224		// If the channel belongs to a batch funding transaction, the progress of the batch
3225		// should be updated as we have received funding_signed and persisted the monitor.
3226		if let Some(txid) = unbroadcasted_batch_funding_txid {
3227			let mut funding_batch_states = $self.funding_batch_states.lock().unwrap();
3228			let mut batch_completed = false;
3229			if let Some(batch_state) = funding_batch_states.get_mut(&txid) {
3230				let channel_state = batch_state.iter_mut().find(|(chan_id, pubkey, _)| (
3231					*chan_id == channel_id &&
3232					*pubkey == counterparty_node_id
3233				));
3234				if let Some(channel_state) = channel_state {
3235					channel_state.2 = true;
3236				} else {
3237					debug_assert!(false, "Missing channel batch state for channel which completed initial monitor update");
3238				}
3239				batch_completed = batch_state.iter().all(|(_, _, completed)| *completed);
3240			} else {
3241				debug_assert!(false, "Missing batch state for channel which completed initial monitor update");
3242			}
3243
3244			// When all channels in a batched funding transaction have become ready, it is not necessary
3245			// to track the progress of the batch anymore and the state of the channels can be updated.
3246			if batch_completed {
3247				let removed_batch_state = funding_batch_states.remove(&txid).into_iter().flatten();
3248				let per_peer_state = $self.per_peer_state.read().unwrap();
3249				let mut batch_funding_tx = None;
3250				for (channel_id, counterparty_node_id, _) in removed_batch_state {
3251					if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
3252						let mut peer_state = peer_state_mutex.lock().unwrap();
3253						if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
3254							batch_funding_tx = batch_funding_tx.or_else(|| chan.context.unbroadcasted_funding());
3255							chan.set_batch_ready();
3256							let mut pending_events = $self.pending_events.lock().unwrap();
3257							emit_channel_pending_event!(pending_events, chan);
3258						}
3259					}
3260				}
3261				if let Some(tx) = batch_funding_tx {
3262					log_info!($self.logger, "Broadcasting batch funding transaction with txid {}", tx.compute_txid());
3263					$self.tx_broadcaster.broadcast_transactions(&[&tx]);
3264				}
3265			}
3266		}
3267
3268		$self.handle_monitor_update_completion_actions(update_actions);
3269
3270		if let Some(forwards) = htlc_forwards {
3271			$self.forward_htlcs(&mut [forwards][..]);
3272		}
3273		if let Some(decode) = decode_update_add_htlcs {
3274			$self.push_decode_update_add_htlcs(decode);
3275		}
3276		$self.finalize_claims(updates.finalized_claimed_htlcs);
3277		for failure in updates.failed_htlcs.drain(..) {
3278			let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
3279			$self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver);
3280		}
3281	} }
3282}
3283
3284macro_rules! handle_new_monitor_update {
3285	($self: ident, $update_res: expr, $logger: expr, $channel_id: expr, _internal, $completed: expr) => { {
3286		debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire));
3287		match $update_res {
3288			ChannelMonitorUpdateStatus::UnrecoverableError => {
3289				let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
3290				log_error!($logger, "{}", err_str);
3291				panic!("{}", err_str);
3292			},
3293			ChannelMonitorUpdateStatus::InProgress => {
3294				log_debug!($logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
3295					$channel_id);
3296				false
3297			},
3298			ChannelMonitorUpdateStatus::Completed => {
3299				$completed;
3300				true
3301			},
3302		}
3303	} };
3304	($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, INITIAL_MONITOR) => {
3305		let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3306		handle_new_monitor_update!($self, $update_res, logger, $chan.context.channel_id(), _internal,
3307			handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan))
3308	};
3309	(
3310		$self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $logger: expr,
3311		$chan_id: expr, $counterparty_node_id: expr, $in_flight_updates: ident, $update_idx: ident,
3312		_internal_outer, $completed: expr
3313	) => { {
3314		$in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
3315			.or_insert_with(Vec::new);
3316		// During startup, we push monitor updates as background events through to here in
3317		// order to replay updates that were in-flight when we shut down. Thus, we have to
3318		// filter for uniqueness here.
3319		$update_idx = $in_flight_updates.iter().position(|upd| upd == &$update)
3320			.unwrap_or_else(|| {
3321				$in_flight_updates.push($update);
3322				$in_flight_updates.len() - 1
3323			});
3324		if $self.background_events_processed_since_startup.load(Ordering::Acquire) {
3325			let update_res = $self.chain_monitor.update_channel($funding_txo, &$in_flight_updates[$update_idx]);
3326			handle_new_monitor_update!($self, update_res, $logger, $chan_id, _internal, $completed)
3327		} else {
3328			// We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we
3329			// fail to persist it. This is a fairly safe assumption, however, since anything we do
3330			// during the startup sequence should be replayed exactly if we immediately crash.
3331			let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
3332				counterparty_node_id: $counterparty_node_id,
3333				funding_txo: $funding_txo,
3334				channel_id: $chan_id,
3335				update: $in_flight_updates[$update_idx].clone(),
3336			};
3337			// We want to track the in-flight update both in `in_flight_monitor_updates` and in
3338			// `pending_background_events` to avoid a race condition during
3339			// `pending_background_events` processing where we complete one
3340			// `ChannelMonitorUpdate` (but there are more pending as background events) but we
3341			// conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to
3342			// run post-completion actions.
3343			// We could work around that with some effort, but its simpler to just track updates
3344			// twice.
3345			$self.pending_background_events.lock().unwrap().push(event);
3346			false
3347		}
3348	} };
3349	(
3350		$self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $chan_context: expr,
3351		REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER
3352	) => { {
3353		let logger = WithChannelContext::from(&$self.logger, &$chan_context, None);
3354		let chan_id = $chan_context.channel_id();
3355		let counterparty_node_id = $chan_context.get_counterparty_node_id();
3356		let in_flight_updates;
3357		let idx;
3358		handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
3359			counterparty_node_id, in_flight_updates, idx, _internal_outer,
3360			{
3361				let _ = in_flight_updates.remove(idx);
3362			})
3363	} };
3364	(
3365		$self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
3366		$per_peer_state_lock: expr, $counterparty_node_id: expr, $channel_id: expr, POST_CHANNEL_CLOSE
3367	) => { {
3368		let logger = WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None);
3369		let in_flight_updates;
3370		let idx;
3371		handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger,
3372			$channel_id, $counterparty_node_id, in_flight_updates, idx, _internal_outer,
3373			{
3374				let _ = in_flight_updates.remove(idx);
3375				if in_flight_updates.is_empty() {
3376					let update_actions = $peer_state.monitor_update_blocked_actions
3377						.remove(&$channel_id).unwrap_or(Vec::new());
3378
3379					mem::drop($peer_state_lock);
3380					mem::drop($per_peer_state_lock);
3381
3382					$self.handle_monitor_update_completion_actions(update_actions);
3383				}
3384			})
3385	} };
3386	(
3387		$self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
3388		$per_peer_state_lock: expr, $chan: expr
3389	) => { {
3390		let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3391		let chan_id = $chan.context.channel_id();
3392		let counterparty_node_id = $chan.context.get_counterparty_node_id();
3393		let in_flight_updates;
3394		let idx;
3395		handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
3396			counterparty_node_id, in_flight_updates, idx, _internal_outer,
3397			{
3398				let _ = in_flight_updates.remove(idx);
3399				if in_flight_updates.is_empty() && $chan.blocked_monitor_updates_pending() == 0 {
3400					handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan);
3401				}
3402			})
3403	} };
3404}
3405
3406macro_rules! process_events_body {
3407	($self: expr, $event_to_handle: expr, $handle_event: expr) => {
3408		let mut handling_failed = false;
3409		let mut processed_all_events = false;
3410		while !handling_failed && !processed_all_events {
3411			if $self.pending_events_processor.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed).is_err() {
3412				return;
3413			}
3414
3415			let mut result;
3416
3417			{
3418				// We'll acquire our total consistency lock so that we can be sure no other
3419				// persists happen while processing monitor events.
3420				let _read_guard = $self.total_consistency_lock.read().unwrap();
3421
3422				// Because `handle_post_event_actions` may send `ChannelMonitorUpdate`s to the user we must
3423				// ensure any startup-generated background events are handled first.
3424				result = $self.process_background_events();
3425
3426				// TODO: This behavior should be documented. It's unintuitive that we query
3427				// ChannelMonitors when clearing other events.
3428				if $self.process_pending_monitor_events() {
3429					result = NotifyOption::DoPersist;
3430				}
3431			}
3432
3433			let pending_events = $self.pending_events.lock().unwrap().clone();
3434			if !pending_events.is_empty() {
3435				result = NotifyOption::DoPersist;
3436			}
3437
3438			let mut post_event_actions = Vec::new();
3439
3440			let mut num_handled_events = 0;
3441			for (event, action_opt) in pending_events {
3442				log_trace!($self.logger, "Handling event {:?}...", event);
3443				$event_to_handle = event;
3444				let event_handling_result = $handle_event;
3445				log_trace!($self.logger, "Done handling event, result: {:?}", event_handling_result);
3446				match event_handling_result {
3447					Ok(()) => {
3448						if let Some(action) = action_opt {
3449							post_event_actions.push(action);
3450						}
3451						num_handled_events += 1;
3452					}
3453					Err(_e) => {
3454						// If we encounter an error we stop handling events and make sure to replay
3455						// any unhandled events on the next invocation.
3456						handling_failed = true;
3457						break;
3458					}
3459				}
3460			}
3461
3462			{
3463				let mut pending_events = $self.pending_events.lock().unwrap();
3464				pending_events.drain(..num_handled_events);
3465				processed_all_events = pending_events.is_empty();
3466				// Note that `push_pending_forwards_ev` relies on `pending_events_processor` being
3467				// updated here with the `pending_events` lock acquired.
3468				$self.pending_events_processor.store(false, Ordering::Release);
3469			}
3470
3471			if !post_event_actions.is_empty() {
3472				$self.handle_post_event_actions(post_event_actions);
3473				// If we had some actions, go around again as we may have more events now
3474				processed_all_events = false;
3475			}
3476
3477			match result {
3478				NotifyOption::DoPersist => {
3479					$self.needs_persist_flag.store(true, Ordering::Release);
3480					$self.event_persist_notifier.notify();
3481				},
3482				NotifyOption::SkipPersistHandleEvents =>
3483					$self.event_persist_notifier.notify(),
3484				NotifyOption::SkipPersistNoEvents => {},
3485			}
3486		}
3487	}
3488}
3489
3490impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
3491where
3492	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
3493	T::Target: BroadcasterInterface,
3494	ES::Target: EntropySource,
3495	NS::Target: NodeSigner,
3496	SP::Target: SignerProvider,
3497	F::Target: FeeEstimator,
3498	R::Target: Router,
3499	MR::Target: MessageRouter,
3500	L::Target: Logger,
3501{
3502	/// Constructs a new `ChannelManager` to hold several channels and route between them.
3503	///
3504	/// The current time or latest block header time can be provided as the `current_timestamp`.
3505	///
3506	/// This is the main "logic hub" for all channel-related actions, and implements
3507	/// [`ChannelMessageHandler`].
3508	///
3509	/// Non-proportional fees are fixed according to our risk using the provided fee estimator.
3510	///
3511	/// Users need to notify the new `ChannelManager` when a new block is connected or
3512	/// disconnected using its [`block_connected`] and [`block_disconnected`] methods, starting
3513	/// from after [`params.best_block.block_hash`]. See [`chain::Listen`] and [`chain::Confirm`] for
3514	/// more details.
3515	///
3516	/// [`block_connected`]: chain::Listen::block_connected
3517	/// [`block_disconnected`]: chain::Listen::block_disconnected
3518	/// [`params.best_block.block_hash`]: chain::BestBlock::block_hash
3519	pub fn new(
3520		fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, message_router: MR, logger: L,
3521		entropy_source: ES, node_signer: NS, signer_provider: SP, config: UserConfig,
3522		params: ChainParameters, current_timestamp: u32,
3523	) -> Self {
3524		let mut secp_ctx = Secp256k1::new();
3525		secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
3526		let expanded_inbound_key = node_signer.get_inbound_payment_key();
3527		ChannelManager {
3528			default_configuration: config.clone(),
3529			chain_hash: ChainHash::using_genesis_block(params.network),
3530			fee_estimator: LowerBoundedFeeEstimator::new(fee_est),
3531			chain_monitor,
3532			tx_broadcaster,
3533			router,
3534			message_router,
3535
3536			best_block: RwLock::new(params.best_block),
3537
3538			outbound_scid_aliases: Mutex::new(new_hash_set()),
3539			pending_outbound_payments: OutboundPayments::new(new_hash_map()),
3540			forward_htlcs: Mutex::new(new_hash_map()),
3541			decode_update_add_htlcs: Mutex::new(new_hash_map()),
3542			claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }),
3543			pending_intercepted_htlcs: Mutex::new(new_hash_map()),
3544			outpoint_to_peer: Mutex::new(new_hash_map()),
3545			short_to_chan_info: FairRwLock::new(new_hash_map()),
3546
3547			our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(),
3548			secp_ctx,
3549
3550			inbound_payment_key: expanded_inbound_key,
3551			fake_scid_rand_bytes: entropy_source.get_secure_random_bytes(),
3552
3553			probing_cookie_secret: entropy_source.get_secure_random_bytes(),
3554			inbound_payment_id_secret: entropy_source.get_secure_random_bytes(),
3555
3556			highest_seen_timestamp: AtomicUsize::new(current_timestamp as usize),
3557
3558			per_peer_state: FairRwLock::new(new_hash_map()),
3559
3560			pending_events: Mutex::new(VecDeque::new()),
3561			pending_events_processor: AtomicBool::new(false),
3562			pending_background_events: Mutex::new(Vec::new()),
3563			total_consistency_lock: RwLock::new(()),
3564			background_events_processed_since_startup: AtomicBool::new(false),
3565			event_persist_notifier: Notifier::new(),
3566			needs_persist_flag: AtomicBool::new(false),
3567			funding_batch_states: Mutex::new(BTreeMap::new()),
3568
3569			pending_offers_messages: Mutex::new(Vec::new()),
3570			pending_async_payments_messages: Mutex::new(Vec::new()),
3571			pending_broadcast_messages: Mutex::new(Vec::new()),
3572
3573			last_days_feerates: Mutex::new(VecDeque::new()),
3574
3575			entropy_source,
3576			node_signer,
3577			signer_provider,
3578
3579			logger,
3580
3581			#[cfg(feature = "dnssec")]
3582			hrn_resolver: OMNameResolver::new(current_timestamp, params.best_block.height),
3583			#[cfg(feature = "dnssec")]
3584			pending_dns_onion_messages: Mutex::new(Vec::new()),
3585
3586			#[cfg(feature = "_test_utils")]
3587			testing_dnssec_proof_offer_resolution_override: Mutex::new(new_hash_map()),
3588		}
3589	}
3590
3591	/// Gets the current configuration applied to all new channels.
3592	pub fn get_current_default_configuration(&self) -> &UserConfig {
3593		&self.default_configuration
3594	}
3595
3596	#[cfg(test)]
3597	pub fn create_and_insert_outbound_scid_alias_for_test(&self) -> u64 {
3598		self.create_and_insert_outbound_scid_alias()
3599	}
3600
3601	fn create_and_insert_outbound_scid_alias(&self) -> u64 {
3602		let height = self.best_block.read().unwrap().height;
3603		let mut outbound_scid_alias = 0;
3604		let mut i = 0;
3605		loop {
3606			if cfg!(fuzzing) { // fuzzing chacha20 doesn't use the key at all so we always get the same alias
3607				outbound_scid_alias += 1;
3608			} else {
3609				outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
3610			}
3611			if outbound_scid_alias != 0 && self.outbound_scid_aliases.lock().unwrap().insert(outbound_scid_alias) {
3612				break;
3613			}
3614			i += 1;
3615			if i > 1_000_000 { panic!("Your RNG is busted or we ran out of possible outbound SCID aliases (which should never happen before we run out of memory to store channels"); }
3616		}
3617		outbound_scid_alias
3618	}
3619
3620	/// Creates a new outbound channel to the given remote node and with the given value.
3621	///
3622	/// `user_channel_id` will be provided back as in
3623	/// [`Event::FundingGenerationReady::user_channel_id`] to allow tracking of which events
3624	/// correspond with which `create_channel` call. Note that the `user_channel_id` defaults to a
3625	/// randomized value for inbound channels. `user_channel_id` has no meaning inside of LDK, it
3626	/// is simply copied to events and otherwise ignored.
3627	///
3628	/// Raises [`APIError::APIMisuseError`] when `channel_value_satoshis` > 2**24 or `push_msat` is
3629	/// greater than `channel_value_satoshis * 1k` or `channel_value_satoshis < 1000`.
3630	///
3631	/// Raises [`APIError::ChannelUnavailable`] if the channel cannot be opened due to failing to
3632	/// generate a shutdown scriptpubkey or destination script set by
3633	/// [`SignerProvider::get_shutdown_scriptpubkey`] or [`SignerProvider::get_destination_script`].
3634	///
3635	/// Note that we do not check if you are currently connected to the given peer. If no
3636	/// connection is available, the outbound `open_channel` message may fail to send, resulting in
3637	/// the channel eventually being silently forgotten (dropped on reload).
3638	///
3639	/// If `temporary_channel_id` is specified, it will be used as the temporary channel ID of the
3640	/// channel. Otherwise, a random one will be generated for you.
3641	///
3642	/// Returns the new Channel's temporary `channel_id`. This ID will appear as
3643	/// [`Event::FundingGenerationReady::temporary_channel_id`] and in
3644	/// [`ChannelDetails::channel_id`] until after
3645	/// [`ChannelManager::funding_transaction_generated`] is called, swapping the Channel's ID for
3646	/// one derived from the funding transaction's TXID. If the counterparty rejects the channel
3647	/// immediately, this temporary ID will appear in [`Event::ChannelClosed::channel_id`].
3648	///
3649	/// [`Event::FundingGenerationReady::user_channel_id`]: events::Event::FundingGenerationReady::user_channel_id
3650	/// [`Event::FundingGenerationReady::temporary_channel_id`]: events::Event::FundingGenerationReady::temporary_channel_id
3651	/// [`Event::ChannelClosed::channel_id`]: events::Event::ChannelClosed::channel_id
3652	pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u128, temporary_channel_id: Option<ChannelId>, override_config: Option<UserConfig>) -> Result<ChannelId, APIError> {
3653		if channel_value_satoshis < 1000 {
3654			return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
3655		}
3656
3657		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
3658		// We want to make sure the lock is actually acquired by PersistenceNotifierGuard.
3659		debug_assert!(&self.total_consistency_lock.try_write().is_err());
3660
3661		let per_peer_state = self.per_peer_state.read().unwrap();
3662
3663		let peer_state_mutex = per_peer_state.get(&their_network_key)
3664			.ok_or_else(|| APIError::APIMisuseError{ err: format!("Not connected to node: {}", their_network_key) })?;
3665
3666		let mut peer_state = peer_state_mutex.lock().unwrap();
3667
3668		if let Some(temporary_channel_id) = temporary_channel_id {
3669			if peer_state.channel_by_id.contains_key(&temporary_channel_id) {
3670				return Err(APIError::APIMisuseError{ err: format!("Channel with temporary channel ID {} already exists!", temporary_channel_id)});
3671			}
3672		}
3673
3674		let mut channel = {
3675			let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
3676			let their_features = &peer_state.latest_features;
3677			let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
3678			match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key,
3679				their_features, channel_value_satoshis, push_msat, user_channel_id, config,
3680				self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id, &*self.logger)
3681			{
3682				Ok(res) => res,
3683				Err(e) => {
3684					self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
3685					return Err(e);
3686				},
3687			}
3688		};
3689		let logger = WithChannelContext::from(&self.logger, &channel.context, None);
3690		let res = channel.get_open_channel(self.chain_hash, &&logger);
3691
3692		let temporary_channel_id = channel.context.channel_id();
3693		match peer_state.channel_by_id.entry(temporary_channel_id) {
3694			hash_map::Entry::Occupied(_) => {
3695				if cfg!(fuzzing) {
3696					return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
3697				} else {
3698					panic!("RNG is bad???");
3699				}
3700			},
3701			hash_map::Entry::Vacant(entry) => { entry.insert(ChannelPhase::UnfundedOutboundV1(channel)); }
3702		}
3703
3704		if let Some(msg) = res {
3705			peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
3706				node_id: their_network_key,
3707				msg,
3708			});
3709		}
3710		Ok(temporary_channel_id)
3711	}
3712
3713	fn list_funded_channels_with_filter<Fn: FnMut(&(&ChannelId, &Channel<SP>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
3714		// Allocate our best estimate of the number of channels we have in the `res`
3715		// Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
3716		// a scid or a scid alias, and the `outpoint_to_peer` shouldn't be used outside
3717		// of the ChannelMonitor handling. Therefore reallocations may still occur, but is
3718		// unlikely as the `short_to_chan_info` map often contains 2 entries for
3719		// the same channel.
3720		let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
3721		{
3722			let best_block_height = self.best_block.read().unwrap().height;
3723			let per_peer_state = self.per_peer_state.read().unwrap();
3724			for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
3725				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3726				let peer_state = &mut *peer_state_lock;
3727				res.extend(peer_state.channel_by_id.iter()
3728					.filter_map(|(chan_id, phase)| match phase {
3729						// Only `Channels` in the `ChannelPhase::Funded` phase can be considered funded.
3730						ChannelPhase::Funded(chan) => Some((chan_id, chan)),
3731						_ => None,
3732					})
3733					.filter(f)
3734					.map(|(_channel_id, channel)| {
3735						ChannelDetails::from_channel_context(&channel.context, best_block_height,
3736							peer_state.latest_features.clone(), &self.fee_estimator)
3737					})
3738				);
3739			}
3740		}
3741		res
3742	}
3743
3744	/// Gets the list of open channels, in random order. See [`ChannelDetails`] field documentation for
3745	/// more information.
3746	pub fn list_channels(&self) -> Vec<ChannelDetails> {
3747		// Allocate our best estimate of the number of channels we have in the `res`
3748		// Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
3749		// a scid or a scid alias, and the `outpoint_to_peer` shouldn't be used outside
3750		// of the ChannelMonitor handling. Therefore reallocations may still occur, but is
3751		// unlikely as the `short_to_chan_info` map often contains 2 entries for
3752		// the same channel.
3753		let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
3754		{
3755			let best_block_height = self.best_block.read().unwrap().height;
3756			let per_peer_state = self.per_peer_state.read().unwrap();
3757			for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
3758				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3759				let peer_state = &mut *peer_state_lock;
3760				for context in peer_state.channel_by_id.iter().map(|(_, phase)| phase.context()) {
3761					let details = ChannelDetails::from_channel_context(context, best_block_height,
3762						peer_state.latest_features.clone(), &self.fee_estimator);
3763					res.push(details);
3764				}
3765			}
3766		}
3767		res
3768	}
3769
3770	/// Gets the list of usable channels, in random order. Useful as an argument to
3771	/// [`Router::find_route`] to ensure non-announced channels are used.
3772	///
3773	/// These are guaranteed to have their [`ChannelDetails::is_usable`] value set to true, see the
3774	/// documentation for [`ChannelDetails::is_usable`] for more info on exactly what the criteria
3775	/// are.
3776	pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
3777		// Note we use is_live here instead of usable which leads to somewhat confused
3778		// internal/external nomenclature, but that's ok cause that's probably what the user
3779		// really wanted anyway.
3780		self.list_funded_channels_with_filter(|&(_, ref channel)| channel.context.is_live())
3781	}
3782
3783	/// Gets the list of channels we have with a given counterparty, in random order.
3784	pub fn list_channels_with_counterparty(&self, counterparty_node_id: &PublicKey) -> Vec<ChannelDetails> {
3785		let best_block_height = self.best_block.read().unwrap().height;
3786		let per_peer_state = self.per_peer_state.read().unwrap();
3787
3788		if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
3789			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3790			let peer_state = &mut *peer_state_lock;
3791			let features = &peer_state.latest_features;
3792			let context_to_details = |context| {
3793				ChannelDetails::from_channel_context(context, best_block_height, features.clone(), &self.fee_estimator)
3794			};
3795			return peer_state.channel_by_id
3796				.iter()
3797				.map(|(_, phase)| phase.context())
3798				.map(context_to_details)
3799				.collect();
3800		}
3801		vec![]
3802	}
3803
3804	/// Returns in an undefined order recent payments that -- if not fulfilled -- have yet to find a
3805	/// successful path, or have unresolved HTLCs.
3806	///
3807	/// This can be useful for payments that may have been prepared, but ultimately not sent, as a
3808	/// result of a crash. If such a payment exists, is not listed here, and an
3809	/// [`Event::PaymentSent`] has not been received, you may consider resending the payment.
3810	///
3811	/// [`Event::PaymentSent`]: events::Event::PaymentSent
3812	pub fn list_recent_payments(&self) -> Vec<RecentPaymentDetails> {
3813		self.pending_outbound_payments.pending_outbound_payments.lock().unwrap().iter()
3814			.filter_map(|(payment_id, pending_outbound_payment)| match pending_outbound_payment {
3815				PendingOutboundPayment::AwaitingInvoice { .. }
3816					| PendingOutboundPayment::AwaitingOffer { .. }
3817					// InvoiceReceived is an intermediate state and doesn't need to be exposed
3818					| PendingOutboundPayment::InvoiceReceived { .. } =>
3819				{
3820					Some(RecentPaymentDetails::AwaitingInvoice { payment_id: *payment_id })
3821				},
3822				PendingOutboundPayment::StaticInvoiceReceived { .. } => {
3823					Some(RecentPaymentDetails::AwaitingInvoice { payment_id: *payment_id })
3824				},
3825				PendingOutboundPayment::Retryable { payment_hash, total_msat, .. } => {
3826					Some(RecentPaymentDetails::Pending {
3827						payment_id: *payment_id,
3828						payment_hash: *payment_hash,
3829						total_msat: *total_msat,
3830					})
3831				},
3832				PendingOutboundPayment::Abandoned { payment_hash, .. } => {
3833					Some(RecentPaymentDetails::Abandoned { payment_id: *payment_id, payment_hash: *payment_hash })
3834				},
3835				PendingOutboundPayment::Fulfilled { payment_hash, .. } => {
3836					Some(RecentPaymentDetails::Fulfilled { payment_id: *payment_id, payment_hash: *payment_hash })
3837				},
3838				PendingOutboundPayment::Legacy { .. } => None
3839			})
3840			.collect()
3841	}
3842
3843	fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
3844		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
3845
3846		let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new();
3847		let mut shutdown_result = None;
3848
3849		{
3850			let per_peer_state = self.per_peer_state.read().unwrap();
3851
3852			let peer_state_mutex = per_peer_state.get(counterparty_node_id)
3853				.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
3854
3855			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3856			let peer_state = &mut *peer_state_lock;
3857
3858			match peer_state.channel_by_id.entry(channel_id.clone()) {
3859				hash_map::Entry::Occupied(mut chan_phase_entry) => {
3860					if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
3861						let funding_txo_opt = chan.context.get_funding_txo();
3862						let their_features = &peer_state.latest_features;
3863						let (shutdown_msg, mut monitor_update_opt, htlcs) =
3864							chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
3865						failed_htlcs = htlcs;
3866
3867						// We can send the `shutdown` message before updating the `ChannelMonitor`
3868						// here as we don't need the monitor update to complete until we send a
3869						// `shutdown_signed`, which we'll delay if we're pending a monitor update.
3870						peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
3871							node_id: *counterparty_node_id,
3872							msg: shutdown_msg,
3873						});
3874
3875						debug_assert!(monitor_update_opt.is_none() || !chan.is_shutdown(),
3876							"We can't both complete shutdown and generate a monitor update");
3877
3878						// Update the monitor with the shutdown script if necessary.
3879						if let Some(monitor_update) = monitor_update_opt.take() {
3880							handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
3881								peer_state_lock, peer_state, per_peer_state, chan);
3882						}
3883					} else {
3884						let mut shutdown_res = chan_phase_entry.get_mut().context_mut()
3885							.force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) });
3886						remove_channel_phase!(self, peer_state, chan_phase_entry, shutdown_res);
3887						shutdown_result = Some(shutdown_res);
3888					}
3889				},
3890				hash_map::Entry::Vacant(_) => {
3891					return Err(APIError::ChannelUnavailable {
3892						err: format!(
3893							"Channel with id {} not found for the passed counterparty node_id {}",
3894							channel_id, counterparty_node_id,
3895						)
3896					});
3897				},
3898			}
3899		}
3900
3901		for htlc_source in failed_htlcs.drain(..) {
3902			let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
3903			let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id };
3904			self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
3905		}
3906
3907		if let Some(shutdown_result) = shutdown_result {
3908			self.finish_close_channel(shutdown_result);
3909		}
3910
3911		Ok(())
3912	}
3913
3914	/// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
3915	/// will be accepted on the given channel, and after additional timeout/the closing of all
3916	/// pending HTLCs, the channel will be closed on chain.
3917	///
3918	///  * If we are the channel initiator, we will pay between our [`ChannelCloseMinimum`] and
3919	///    [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`NonAnchorChannelFee`]
3920	///    fee estimate.
3921	///  * If our counterparty is the channel initiator, we will require a channel closing
3922	///    transaction feerate of at least our [`ChannelCloseMinimum`] feerate or the feerate which
3923	///    would appear on a force-closure transaction, whichever is lower. We will allow our
3924	///    counterparty to pay as much fee as they'd like, however.
3925	///
3926	/// May generate a [`SendShutdown`] message event on success, which should be relayed.
3927	///
3928	/// Raises [`APIError::ChannelUnavailable`] if the channel cannot be closed due to failing to
3929	/// generate a shutdown scriptpubkey or destination script set by
3930	/// [`SignerProvider::get_shutdown_scriptpubkey`]. A force-closure may be needed to close the
3931	/// channel.
3932	///
3933	/// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
3934	/// [`ChannelCloseMinimum`]: crate::chain::chaininterface::ConfirmationTarget::ChannelCloseMinimum
3935	/// [`NonAnchorChannelFee`]: crate::chain::chaininterface::ConfirmationTarget::NonAnchorChannelFee
3936	/// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown
3937	pub fn close_channel(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey) -> Result<(), APIError> {
3938		self.close_channel_internal(channel_id, counterparty_node_id, None, None)
3939	}
3940
3941	/// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
3942	/// will be accepted on the given channel, and after additional timeout/the closing of all
3943	/// pending HTLCs, the channel will be closed on chain.
3944	///
3945	/// `target_feerate_sat_per_1000_weight` has different meanings depending on if we initiated
3946	/// the channel being closed or not:
3947	///  * If we are the channel initiator, we will pay at least this feerate on the closing
3948	///    transaction. The upper-bound is set by
3949	///    [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`NonAnchorChannelFee`]
3950	///    fee estimate (or `target_feerate_sat_per_1000_weight`, if it is greater).
3951	///  * If our counterparty is the channel initiator, we will refuse to accept a channel closure
3952	///    transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which
3953	///    will appear on a force-closure transaction, whichever is lower).
3954	///
3955	/// The `shutdown_script` provided  will be used as the `scriptPubKey` for the closing transaction.
3956	/// Will fail if a shutdown script has already been set for this channel by
3957	/// ['ChannelHandshakeConfig::commit_upfront_shutdown_pubkey`]. The given shutdown script must
3958	/// also be compatible with our and the counterparty's features.
3959	///
3960	/// May generate a [`SendShutdown`] message event on success, which should be relayed.
3961	///
3962	/// Raises [`APIError::ChannelUnavailable`] if the channel cannot be closed due to failing to
3963	/// generate a shutdown scriptpubkey or destination script set by
3964	/// [`SignerProvider::get_shutdown_scriptpubkey`]. A force-closure may be needed to close the
3965	/// channel.
3966	///
3967	/// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
3968	/// [`NonAnchorChannelFee`]: crate::chain::chaininterface::ConfirmationTarget::NonAnchorChannelFee
3969	/// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown
3970	pub fn close_channel_with_feerate_and_script(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
3971		self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
3972	}
3973
3974	/// Applies a [`ChannelMonitorUpdate`] which may or may not be for a channel which is closed.
3975	fn apply_post_close_monitor_update(
3976		&self, counterparty_node_id: PublicKey, channel_id: ChannelId, funding_txo: OutPoint,
3977		monitor_update: ChannelMonitorUpdate,
3978	) {
3979		// Note that there may be some post-close updates which need to be well-ordered with
3980		// respect to the `update_id`, so we hold the `peer_state` lock here.
3981		let per_peer_state = self.per_peer_state.read().unwrap();
3982		let mut peer_state_lock = per_peer_state.get(&counterparty_node_id)
3983			.expect("We must always have a peer entry for a peer with which we have channels that have ChannelMonitors")
3984			.lock().unwrap();
3985		let peer_state = &mut *peer_state_lock;
3986		match peer_state.channel_by_id.entry(channel_id) {
3987			hash_map::Entry::Occupied(mut chan_phase) => {
3988				if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
3989					handle_new_monitor_update!(self, funding_txo,
3990						monitor_update, peer_state_lock, peer_state, per_peer_state, chan);
3991					return;
3992				} else {
3993					debug_assert!(false, "We shouldn't have an update for a non-funded channel");
3994				}
3995			},
3996			hash_map::Entry::Vacant(_) => {},
3997		}
3998
3999		handle_new_monitor_update!(
4000			self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
4001			counterparty_node_id, channel_id, POST_CHANNEL_CLOSE
4002		);
4003	}
4004
4005	/// When a channel is removed, two things need to happen:
4006	/// (a) [`locked_close_channel`] must be called in the same `per_peer_state` lock as
4007	///     the channel-closing action,
4008	/// (b) this needs to be called without holding any locks (except
4009	///     [`ChannelManager::total_consistency_lock`].
4010	fn finish_close_channel(&self, mut shutdown_res: ShutdownResult) {
4011		debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
4012		#[cfg(debug_assertions)]
4013		for (_, peer) in self.per_peer_state.read().unwrap().iter() {
4014			debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
4015		}
4016
4017		let logger = WithContext::from(
4018			&self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id), None
4019		);
4020
4021		log_debug!(logger, "Finishing closure of channel due to {} with {} HTLCs to fail",
4022			shutdown_res.closure_reason, shutdown_res.dropped_outbound_htlcs.len());
4023		for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
4024			let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
4025			let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
4026			let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
4027			self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
4028		}
4029		if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
4030			debug_assert!(false, "This should have been handled in `locked_close_channel`");
4031			self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
4032		}
4033		if self.background_events_processed_since_startup.load(Ordering::Acquire) {
4034			// If a `ChannelMonitorUpdate` was applied (i.e. any time we have a funding txo and are
4035			// not in the startup sequence) check if we need to handle any
4036			// `MonitorUpdateCompletionAction`s.
4037			// TODO: If we do the `in_flight_monitor_updates.is_empty()` check in
4038			// `locked_close_channel` we can skip the locks here.
4039			if let Some(funding_txo) = shutdown_res.channel_funding_txo {
4040				let per_peer_state = self.per_peer_state.read().unwrap();
4041				if let Some(peer_state_mtx) = per_peer_state.get(&shutdown_res.counterparty_node_id) {
4042					let mut peer_state = peer_state_mtx.lock().unwrap();
4043					if peer_state.in_flight_monitor_updates.get(&funding_txo).map(|l| l.is_empty()).unwrap_or(true) {
4044						let update_actions = peer_state.monitor_update_blocked_actions
4045							.remove(&shutdown_res.channel_id).unwrap_or(Vec::new());
4046
4047						mem::drop(peer_state);
4048						mem::drop(per_peer_state);
4049
4050						self.handle_monitor_update_completion_actions(update_actions);
4051					}
4052				}
4053			}
4054		}
4055		let mut shutdown_results = Vec::new();
4056		if let Some(txid) = shutdown_res.unbroadcasted_batch_funding_txid {
4057			let mut funding_batch_states = self.funding_batch_states.lock().unwrap();
4058			let affected_channels = funding_batch_states.remove(&txid).into_iter().flatten();
4059			let per_peer_state = self.per_peer_state.read().unwrap();
4060			let mut has_uncompleted_channel = None;
4061			for (channel_id, counterparty_node_id, state) in affected_channels {
4062				if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
4063					let mut peer_state = peer_state_mutex.lock().unwrap();
4064					if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) {
4065						let mut close_res = chan.context_mut().force_shutdown(false, ClosureReason::FundingBatchClosure);
4066						locked_close_channel!(self, &mut *peer_state, chan.context(), close_res);
4067						shutdown_results.push(close_res);
4068					}
4069				}
4070				has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state));
4071			}
4072			debug_assert!(
4073				has_uncompleted_channel.unwrap_or(true),
4074				"Closing a batch where all channels have completed initial monitor update",
4075			);
4076		}
4077
4078		{
4079			let mut pending_events = self.pending_events.lock().unwrap();
4080			pending_events.push_back((events::Event::ChannelClosed {
4081				channel_id: shutdown_res.channel_id,
4082				user_channel_id: shutdown_res.user_channel_id,
4083				reason: shutdown_res.closure_reason,
4084				counterparty_node_id: Some(shutdown_res.counterparty_node_id),
4085				channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis),
4086				channel_funding_txo: shutdown_res.channel_funding_txo,
4087				last_local_balance_msat: Some(shutdown_res.last_local_balance_msat),
4088			}, None));
4089
4090			if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx {
4091				let funding_info = if shutdown_res.is_manual_broadcast {
4092					FundingInfo::OutPoint {
4093						outpoint: shutdown_res.channel_funding_txo
4094							.expect("We had an unbroadcasted funding tx, so should also have had a funding outpoint"),
4095					}
4096				} else {
4097					FundingInfo::Tx{ transaction }
4098				};
4099				pending_events.push_back((events::Event::DiscardFunding {
4100					channel_id: shutdown_res.channel_id, funding_info
4101				}, None));
4102			}
4103		}
4104		for shutdown_result in shutdown_results.drain(..) {
4105			self.finish_close_channel(shutdown_result);
4106		}
4107	}
4108
4109	/// `peer_msg` should be set when we receive a message from a peer, but not set when the
4110	/// user closes, which will be re-exposed as the `ChannelClosed` reason.
4111	fn force_close_channel_with_peer(&self, channel_id: &ChannelId, peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool)
4112	-> Result<PublicKey, APIError> {
4113		let per_peer_state = self.per_peer_state.read().unwrap();
4114		let peer_state_mutex = per_peer_state.get(peer_node_id)
4115			.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?;
4116		let (update_opt, counterparty_node_id) = {
4117			let mut peer_state = peer_state_mutex.lock().unwrap();
4118			let closure_reason = if let Some(peer_msg) = peer_msg {
4119				ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) }
4120			} else {
4121				ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(broadcast) }
4122			};
4123			let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None);
4124			if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
4125				log_error!(logger, "Force-closing channel {}", channel_id);
4126				let (mut shutdown_res, update_opt) = match chan_phase_entry.get_mut() {
4127					ChannelPhase::Funded(ref mut chan) => {
4128						(
4129							chan.context.force_shutdown(broadcast, closure_reason),
4130							self.get_channel_update_for_broadcast(&chan).ok(),
4131						)
4132					},
4133					ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) |
4134					ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => {
4135						// Unfunded channel has no update
4136						(chan_phase_entry.get_mut().context_mut().force_shutdown(false, closure_reason), None)
4137					},
4138				};
4139				let chan_phase = remove_channel_phase!(self, peer_state, chan_phase_entry, shutdown_res);
4140				mem::drop(peer_state);
4141				mem::drop(per_peer_state);
4142				self.finish_close_channel(shutdown_res);
4143				(update_opt, chan_phase.context().get_counterparty_node_id())
4144			} else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() {
4145				log_error!(logger, "Force-closing channel {}", &channel_id);
4146				// N.B. that we don't send any channel close event here: we
4147				// don't have a user_channel_id, and we never sent any opening
4148				// events anyway.
4149				(None, *peer_node_id)
4150			} else {
4151				return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, peer_node_id) });
4152			}
4153		};
4154		if let Some(update) = update_opt {
4155			// If we have some Channel Update to broadcast, we cache it and broadcast it later.
4156			let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
4157			pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
4158				msg: update
4159			});
4160		}
4161
4162		Ok(counterparty_node_id)
4163	}
4164
4165	fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, broadcast: bool, error_message: String)
4166	-> Result<(), APIError> {
4167		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4168		log_debug!(self.logger,
4169			"Force-closing channel, The error message sent to the peer : {}", error_message);
4170		match self.force_close_channel_with_peer(channel_id, &counterparty_node_id, None, broadcast) {
4171			Ok(counterparty_node_id) => {
4172				let per_peer_state = self.per_peer_state.read().unwrap();
4173				if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
4174					let mut peer_state = peer_state_mutex.lock().unwrap();
4175					peer_state.pending_msg_events.push(
4176						events::MessageSendEvent::HandleError {
4177							node_id: counterparty_node_id,
4178							action: msgs::ErrorAction::SendErrorMessage {
4179								msg: msgs::ErrorMessage { channel_id: *channel_id, data: error_message }
4180							},
4181						}
4182					);
4183				}
4184				Ok(())
4185			},
4186			Err(e) => Err(e)
4187		}
4188	}
4189
4190	/// Force closes a channel, immediately broadcasting the latest local transaction(s),
4191	/// rejecting new HTLCs.
4192	///
4193	/// The provided `error_message` is sent to connected peers for closing
4194	/// channels and should be a human-readable description of what went wrong.
4195	///
4196	/// Fails if `channel_id` is unknown to the manager, or if the `counterparty_node_id`
4197	/// isn't the counterparty of the corresponding channel.
4198	pub fn force_close_broadcasting_latest_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String)
4199	-> Result<(), APIError> {
4200		self.force_close_sending_error(channel_id, counterparty_node_id, true, error_message)
4201	}
4202
4203	/// Force closes a channel, rejecting new HTLCs on the given channel but skips broadcasting
4204	/// the latest local transaction(s).
4205	///
4206	/// The provided `error_message` is sent to connected peers for closing channels and should
4207	/// be a human-readable description of what went wrong.
4208	///
4209	/// Fails if `channel_id` is unknown to the manager, or if the
4210	/// `counterparty_node_id` isn't the counterparty of the corresponding channel.
4211	/// You can always broadcast the latest local transaction(s) via
4212	/// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`].
4213	pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String)
4214	-> Result<(), APIError> {
4215		self.force_close_sending_error(channel_id, counterparty_node_id, false, error_message)
4216	}
4217
4218	/// Force close all channels, immediately broadcasting the latest local commitment transaction
4219	/// for each to the chain and rejecting new HTLCs on each.
4220	///
4221	/// The provided `error_message` is sent to connected peers for closing channels and should
4222	/// be a human-readable description of what went wrong.
4223	pub fn force_close_all_channels_broadcasting_latest_txn(&self, error_message: String) {
4224		for chan in self.list_channels() {
4225			let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id, error_message.clone());
4226		}
4227	}
4228
4229	/// Force close all channels rejecting new HTLCs on each but without broadcasting the latest
4230	/// local transaction(s).
4231	///
4232	/// The provided `error_message` is sent to connected peers for closing channels and
4233	/// should be a human-readable description of what went wrong.
4234	pub fn force_close_all_channels_without_broadcasting_txn(&self, error_message: String) {
4235		for chan in self.list_channels() {
4236			let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id, error_message.clone());
4237		}
4238	}
4239
4240	fn can_forward_htlc_to_outgoing_channel(
4241		&self, chan: &mut Channel<SP>, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails
4242	) -> Result<(), (&'static str, u16)> {
4243		if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
4244			// Note that the behavior here should be identical to the above block - we
4245			// should NOT reveal the existence or non-existence of a private channel if
4246			// we don't allow forwards outbound over them.
4247			return Err(("Refusing to forward to a private channel based on our config.", 0x4000 | 10));
4248		}
4249		if chan.context.get_channel_type().supports_scid_privacy() && next_packet.outgoing_scid != chan.context.outbound_scid_alias() {
4250			// `option_scid_alias` (referred to in LDK as `scid_privacy`) means
4251			// "refuse to forward unless the SCID alias was used", so we pretend
4252			// we don't have the channel here.
4253			return Err(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10));
4254		}
4255
4256		// Note that we could technically not return an error yet here and just hope
4257		// that the connection is reestablished or monitor updated by the time we get
4258		// around to doing the actual forward, but better to fail early if we can and
4259		// hopefully an attacker trying to path-trace payments cannot make this occur
4260		// on a small/per-node/per-channel scale.
4261		if !chan.context.is_live() {
4262			if !chan.context.is_enabled() {
4263				// channel_disabled
4264				return Err(("Forwarding channel has been disconnected for some time.", 0x1000 | 20));
4265			} else {
4266				// temporary_channel_failure
4267				return Err(("Forwarding channel is not in a ready state.", 0x1000 | 7));
4268			}
4269		}
4270		if next_packet.outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
4271			return Err(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11));
4272		}
4273		if let Err((err, code)) = chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value) {
4274			return Err((err, code));
4275		}
4276
4277		Ok(())
4278	}
4279
4280	/// Executes a callback `C` that returns some value `X` on the channel found with the given
4281	/// `scid`. `None` is returned when the channel is not found.
4282	fn do_funded_channel_callback<X, C: Fn(&mut Channel<SP>) -> X>(
4283		&self, scid: u64, callback: C,
4284	) -> Option<X> {
4285		let (counterparty_node_id, channel_id) = match self.short_to_chan_info.read().unwrap().get(&scid).cloned() {
4286			None => return None,
4287			Some((cp_id, id)) => (cp_id, id),
4288		};
4289		let per_peer_state = self.per_peer_state.read().unwrap();
4290		let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
4291		if peer_state_mutex_opt.is_none() {
4292			return None;
4293		}
4294		let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
4295		let peer_state = &mut *peer_state_lock;
4296		match peer_state.channel_by_id.get_mut(&channel_id).and_then(
4297			|chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None }
4298		) {
4299			None => None,
4300			Some(chan) => Some(callback(chan)),
4301		}
4302	}
4303
4304	fn can_forward_htlc(
4305		&self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails
4306	) -> Result<(), (&'static str, u16)> {
4307		match self.do_funded_channel_callback(next_packet_details.outgoing_scid, |chan: &mut Channel<SP>| {
4308			self.can_forward_htlc_to_outgoing_channel(chan, msg, next_packet_details)
4309		}) {
4310			Some(Ok(())) => {},
4311			Some(Err(e)) => return Err(e),
4312			None => {
4313				// If we couldn't find the channel info for the scid, it may be a phantom or
4314				// intercept forward.
4315				if (self.default_configuration.accept_intercept_htlcs &&
4316					fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash)) ||
4317					fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash)
4318				{} else {
4319					return Err(("Don't have available channel for forwarding as requested.", 0x4000 | 10));
4320				}
4321			}
4322		}
4323
4324		let cur_height = self.best_block.read().unwrap().height + 1;
4325		if let Err((err_msg, err_code)) = check_incoming_htlc_cltv(
4326			cur_height, next_packet_details.outgoing_cltv_value, msg.cltv_expiry
4327		) {
4328			return Err((err_msg, err_code));
4329		}
4330
4331		Ok(())
4332	}
4333
4334	fn htlc_failure_from_update_add_err(
4335		&self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, err_msg: &'static str,
4336		err_code: u16, is_intro_node_blinded_forward: bool,
4337		shared_secret: &[u8; 32]
4338	) -> HTLCFailureMsg {
4339		// at capacity, we write fields `htlc_msat` and `len`
4340		let mut res = VecWriter(Vec::with_capacity(8 + 2));
4341		if err_code & 0x1000 == 0x1000 {
4342			if err_code == 0x1000 | 11 || err_code == 0x1000 | 12 {
4343				msg.amount_msat.write(&mut res).expect("Writes cannot fail");
4344			}
4345			else if err_code == 0x1000 | 13 {
4346				msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
4347			}
4348			else if err_code == 0x1000 | 20 {
4349				// TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
4350				0u16.write(&mut res).expect("Writes cannot fail");
4351			}
4352			// See https://github.com/lightning/bolts/blob/247e83d/04-onion-routing.md?plain=1#L1414-L1415
4353			(0u16).write(&mut res).expect("Writes cannot fail");
4354		}
4355
4356		log_info!(
4357			WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash)),
4358			"Failed to accept/forward incoming HTLC: {}", err_msg
4359		);
4360		// If `msg.blinding_point` is set, we must always fail with malformed.
4361		if msg.blinding_point.is_some() {
4362			return HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
4363				channel_id: msg.channel_id,
4364				htlc_id: msg.htlc_id,
4365				sha256_of_onion: [0; 32],
4366				failure_code: INVALID_ONION_BLINDING,
4367			});
4368		}
4369
4370		let (err_code, err_data) = if is_intro_node_blinded_forward {
4371			(INVALID_ONION_BLINDING, &[0; 32][..])
4372		} else {
4373			(err_code, &res.0[..])
4374		};
4375		HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
4376			channel_id: msg.channel_id,
4377			htlc_id: msg.htlc_id,
4378			reason: HTLCFailReason::reason(err_code, err_data.to_vec())
4379				.get_encrypted_failure_packet(shared_secret, &None),
4380		})
4381	}
4382
4383	fn decode_update_add_htlc_onion(
4384		&self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey,
4385	) -> Result<
4386		(onion_utils::Hop, [u8; 32], Option<Result<PublicKey, secp256k1::Error>>), HTLCFailureMsg
4387	> {
4388		let (next_hop, shared_secret, next_packet_details_opt) = decode_incoming_update_add_htlc_onion(
4389			msg, &*self.node_signer, &*self.logger, &self.secp_ctx
4390		)?;
4391
4392		let next_packet_details = match next_packet_details_opt {
4393			Some(next_packet_details) => next_packet_details,
4394			// it is a receive, so no need for outbound checks
4395			None => return Ok((next_hop, shared_secret, None)),
4396		};
4397
4398		// Perform outbound checks here instead of in [`Self::construct_pending_htlc_info`] because we
4399		// can't hold the outbound peer state lock at the same time as the inbound peer state lock.
4400		self.can_forward_htlc(&msg, &next_packet_details).map_err(|e| {
4401			let (err_msg, err_code) = e;
4402			self.htlc_failure_from_update_add_err(
4403				msg, counterparty_node_id, err_msg, err_code,
4404				next_hop.is_intro_node_blinded_forward(), &shared_secret
4405			)
4406		})?;
4407
4408		Ok((next_hop, shared_secret, Some(next_packet_details.next_packet_pubkey)))
4409	}
4410
4411	fn construct_pending_htlc_status<'a>(
4412		&self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, shared_secret: [u8; 32],
4413		decoded_hop: onion_utils::Hop, allow_underpay: bool,
4414		next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>,
4415	) -> PendingHTLCStatus {
4416		macro_rules! return_err {
4417			($msg: expr, $err_code: expr, $data: expr) => {
4418				{
4419					let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash));
4420					log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
4421					if msg.blinding_point.is_some() {
4422						return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
4423							msgs::UpdateFailMalformedHTLC {
4424								channel_id: msg.channel_id,
4425								htlc_id: msg.htlc_id,
4426								sha256_of_onion: [0; 32],
4427								failure_code: INVALID_ONION_BLINDING,
4428							}
4429						))
4430					}
4431					return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
4432						channel_id: msg.channel_id,
4433						htlc_id: msg.htlc_id,
4434						reason: HTLCFailReason::reason($err_code, $data.to_vec())
4435							.get_encrypted_failure_packet(&shared_secret, &None),
4436					}));
4437				}
4438			}
4439		}
4440		match decoded_hop {
4441			onion_utils::Hop::Receive(next_hop_data) => {
4442				// OUR PAYMENT!
4443				let current_height: u32 = self.best_block.read().unwrap().height;
4444				match create_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash,
4445					msg.amount_msat, msg.cltv_expiry, None, allow_underpay, msg.skimmed_fee_msat,
4446					current_height)
4447				{
4448					Ok(info) => {
4449						// Note that we could obviously respond immediately with an update_fulfill_htlc
4450						// message, however that would leak that we are the recipient of this payment, so
4451						// instead we stay symmetric with the forwarding case, only responding (after a
4452						// delay) once they've send us a commitment_signed!
4453						PendingHTLCStatus::Forward(info)
4454					},
4455					Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
4456				}
4457			},
4458			onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => {
4459				match create_fwd_pending_htlc_info(msg, next_hop_data, next_hop_hmac,
4460					new_packet_bytes, shared_secret, next_packet_pubkey_opt) {
4461					Ok(info) => PendingHTLCStatus::Forward(info),
4462					Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
4463				}
4464			}
4465		}
4466	}
4467
4468	/// Gets the current [`channel_update`] for the given channel. This first checks if the channel is
4469	/// public, and thus should be called whenever the result is going to be passed out in a
4470	/// [`MessageSendEvent::BroadcastChannelUpdate`] event.
4471	///
4472	/// Note that in [`internal_closing_signed`], this function is called without the `peer_state`
4473	/// corresponding to the channel's counterparty locked, as the channel been removed from the
4474	/// storage and the `peer_state` lock has been dropped.
4475	///
4476	/// [`channel_update`]: msgs::ChannelUpdate
4477	/// [`internal_closing_signed`]: Self::internal_closing_signed
4478	fn get_channel_update_for_broadcast(&self, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
4479		if !chan.context.should_announce() {
4480			return Err(LightningError {
4481				err: "Cannot broadcast a channel_update for a private channel".to_owned(),
4482				action: msgs::ErrorAction::IgnoreError
4483			});
4484		}
4485		if chan.context.get_short_channel_id().is_none() {
4486			return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError});
4487		}
4488		let logger = WithChannelContext::from(&self.logger, &chan.context, None);
4489		log_trace!(logger, "Attempting to generate broadcast channel update for channel {}", &chan.context.channel_id());
4490		self.get_channel_update_for_unicast(chan)
4491	}
4492
4493	/// Gets the current [`channel_update`] for the given channel. This does not check if the channel
4494	/// is public (only returning an `Err` if the channel does not yet have an assigned SCID),
4495	/// and thus MUST NOT be called unless the recipient of the resulting message has already
4496	/// provided evidence that they know about the existence of the channel.
4497	///
4498	/// Note that through [`internal_closing_signed`], this function is called without the
4499	/// `peer_state`  corresponding to the channel's counterparty locked, as the channel been
4500	/// removed from the storage and the `peer_state` lock has been dropped.
4501	///
4502	/// [`channel_update`]: msgs::ChannelUpdate
4503	/// [`internal_closing_signed`]: Self::internal_closing_signed
4504	fn get_channel_update_for_unicast(&self, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
4505		let logger = WithChannelContext::from(&self.logger, &chan.context, None);
4506		log_trace!(logger, "Attempting to generate channel update for channel {}", chan.context.channel_id());
4507		let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) {
4508			None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
4509			Some(id) => id,
4510		};
4511
4512		let logger = WithChannelContext::from(&self.logger, &chan.context, None);
4513		log_trace!(logger, "Generating channel update for channel {}", chan.context.channel_id());
4514		let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
4515		let enabled = chan.context.is_enabled();
4516
4517		let unsigned = msgs::UnsignedChannelUpdate {
4518			chain_hash: self.chain_hash,
4519			short_channel_id,
4520			timestamp: chan.context.get_update_time_counter(),
4521			message_flags: 1, // Only must_be_one
4522			channel_flags: (!were_node_one) as u8 | ((!enabled as u8) << 1),
4523			cltv_expiry_delta: chan.context.get_cltv_expiry_delta(),
4524			htlc_minimum_msat: chan.context.get_counterparty_htlc_minimum_msat(),
4525			htlc_maximum_msat: chan.context.get_announced_htlc_max_msat(),
4526			fee_base_msat: chan.context.get_outbound_forwarding_fee_base_msat(),
4527			fee_proportional_millionths: chan.context.get_fee_proportional_millionths(),
4528			excess_data: Vec::new(),
4529		};
4530		// Panic on failure to signal LDK should be restarted to retry signing the `ChannelUpdate`.
4531		// If we returned an error and the `node_signer` cannot provide a signature for whatever
4532		// reason`, we wouldn't be able to receive inbound payments through the corresponding
4533		// channel.
4534		let sig = self.node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelUpdate(&unsigned)).unwrap();
4535
4536		Ok(msgs::ChannelUpdate {
4537			signature: sig,
4538			contents: unsigned
4539		})
4540	}
4541
4542	#[cfg(test)]
4543	pub(crate) fn test_send_payment_along_path(&self, path: &Path, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
4544		let _lck = self.total_consistency_lock.read().unwrap();
4545		self.send_payment_along_path(SendAlongPathArgs {
4546			path, payment_hash, recipient_onion: &recipient_onion, total_value,
4547			cur_height, payment_id, keysend_preimage, invoice_request: None, session_priv_bytes
4548		})
4549	}
4550
4551	fn send_payment_along_path(&self, args: SendAlongPathArgs) -> Result<(), APIError> {
4552		let SendAlongPathArgs {
4553			path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage,
4554			invoice_request, session_priv_bytes
4555		} = args;
4556		// The top-level caller should hold the total_consistency_lock read lock.
4557		debug_assert!(self.total_consistency_lock.try_write().is_err());
4558		let prng_seed = self.entropy_source.get_secure_random_bytes();
4559		let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
4560
4561		let (onion_packet, htlc_msat, htlc_cltv) = onion_utils::create_payment_onion(
4562			&self.secp_ctx, &path, &session_priv, total_value, recipient_onion, cur_height,
4563			payment_hash, keysend_preimage, invoice_request, prng_seed
4564		).map_err(|e| {
4565			let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None, Some(*payment_hash));
4566			log_error!(logger, "Failed to build an onion for path for payment hash {}", payment_hash);
4567			e
4568		})?;
4569
4570		let err: Result<(), _> = loop {
4571			let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.hops.first().unwrap().short_channel_id) {
4572				None => {
4573					let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None, Some(*payment_hash));
4574					log_error!(logger, "Failed to find first-hop for payment hash {}", payment_hash);
4575					return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()})
4576				},
4577				Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
4578			};
4579
4580			let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(id), Some(*payment_hash));
4581			log_trace!(logger,
4582				"Attempting to send payment with payment hash {} along path with next hop {}",
4583				payment_hash, path.hops.first().unwrap().short_channel_id);
4584
4585			let per_peer_state = self.per_peer_state.read().unwrap();
4586			let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
4587				.ok_or_else(|| APIError::ChannelUnavailable{err: "No peer matching the path's first hop found!".to_owned() })?;
4588			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4589			let peer_state = &mut *peer_state_lock;
4590			if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(id) {
4591				match chan_phase_entry.get_mut() {
4592					ChannelPhase::Funded(chan) => {
4593						if !chan.context.is_live() {
4594							return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
4595						}
4596						let funding_txo = chan.context.get_funding_txo().unwrap();
4597						let logger = WithChannelContext::from(&self.logger, &chan.context, Some(*payment_hash));
4598						let send_res = chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(),
4599							htlc_cltv, HTLCSource::OutboundRoute {
4600								path: path.clone(),
4601								session_priv: session_priv.clone(),
4602								first_hop_htlc_msat: htlc_msat,
4603								payment_id,
4604							}, onion_packet, None, &self.fee_estimator, &&logger);
4605						match break_chan_phase_entry!(self, peer_state, send_res, chan_phase_entry) {
4606							Some(monitor_update) => {
4607								match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
4608									false => {
4609										// Note that MonitorUpdateInProgress here indicates (per function
4610										// docs) that we will resend the commitment update once monitor
4611										// updating completes. Therefore, we must return an error
4612										// indicating that it is unsafe to retry the payment wholesale,
4613										// which we do in the send_payment check for
4614										// MonitorUpdateInProgress, below.
4615										return Err(APIError::MonitorUpdateInProgress);
4616									},
4617									true => {},
4618								}
4619							},
4620							None => {},
4621						}
4622					},
4623					_ => return Err(APIError::ChannelUnavailable{err: "Channel to first hop is unfunded".to_owned()}),
4624				};
4625			} else {
4626				// The channel was likely removed after we fetched the id from the
4627				// `short_to_chan_info` map, but before we successfully locked the
4628				// `channel_by_id` map.
4629				// This can occur as no consistency guarantees exists between the two maps.
4630				return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()});
4631			}
4632			return Ok(());
4633		};
4634		match handle_error!(self, err, path.hops.first().unwrap().pubkey) {
4635			Ok(_) => unreachable!(),
4636			Err(e) => {
4637				Err(APIError::ChannelUnavailable { err: e.err })
4638			},
4639		}
4640	}
4641
4642	/// Sends a payment along a given route. See [`Self::send_payment`] for more info.
4643	///
4644	/// LDK will not automatically retry this payment, though it may be manually re-sent after an
4645	/// [`Event::PaymentFailed`] is generated.
4646	pub fn send_payment_with_route(
4647		&self, mut route: Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields,
4648		payment_id: PaymentId
4649	) -> Result<(), RetryableSendFailure> {
4650		let best_block_height = self.best_block.read().unwrap().height;
4651		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4652		let route_params = route.route_params.clone().unwrap_or_else(|| {
4653			// Create a dummy route params since they're a required parameter but unused in this case
4654			let (payee_node_id, cltv_delta) = route.paths.first()
4655				.and_then(|path| path.hops.last().map(|hop| (hop.pubkey, hop.cltv_expiry_delta as u32)))
4656				.unwrap_or_else(|| (PublicKey::from_slice(&[2; 32]).unwrap(), MIN_FINAL_CLTV_EXPIRY_DELTA as u32));
4657			let dummy_payment_params = PaymentParameters::from_node_id(payee_node_id, cltv_delta);
4658			RouteParameters::from_payment_params_and_value(dummy_payment_params, route.get_total_amount())
4659		});
4660		if route.route_params.is_none() { route.route_params = Some(route_params.clone()); }
4661		let router = FixedRouter::new(route);
4662		self.pending_outbound_payments
4663			.send_payment(payment_hash, recipient_onion, payment_id, Retry::Attempts(0),
4664				route_params, &&router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
4665				&self.entropy_source, &self.node_signer, best_block_height, &self.logger,
4666				&self.pending_events, |args| self.send_payment_along_path(args))
4667	}
4668
4669	/// Sends a payment to the route found using the provided [`RouteParameters`], retrying failed
4670	/// payment paths based on the provided `Retry`.
4671	///
4672	/// May generate [`UpdateHTLCs`] message(s) event on success, which should be relayed (e.g. via
4673	/// [`PeerManager::process_events`]).
4674	///
4675	/// # Avoiding Duplicate Payments
4676	///
4677	/// If a pending payment is currently in-flight with the same [`PaymentId`] provided, this
4678	/// method will error with [`RetryableSendFailure::DuplicatePayment`]. Note, however, that once a
4679	/// payment is no longer pending (either via [`ChannelManager::abandon_payment`], or handling of
4680	/// an [`Event::PaymentSent`] or [`Event::PaymentFailed`]) LDK will not stop you from sending a
4681	/// second payment with the same [`PaymentId`].
4682	///
4683	/// Thus, in order to ensure duplicate payments are not sent, you should implement your own
4684	/// tracking of payments, including state to indicate once a payment has completed. Because you
4685	/// should also ensure that [`PaymentHash`]es are not re-used, for simplicity, you should
4686	/// consider using the [`PaymentHash`] as the key for tracking payments. In that case, the
4687	/// [`PaymentId`] should be a copy of the [`PaymentHash`] bytes.
4688	///
4689	/// Additionally, in the scenario where we begin the process of sending a payment, but crash
4690	/// before `send_payment` returns (or prior to [`ChannelMonitorUpdate`] persistence if you're
4691	/// using [`ChannelMonitorUpdateStatus::InProgress`]), the payment may be lost on restart. See
4692	/// [`ChannelManager::list_recent_payments`] for more information.
4693	///
4694	/// Routes are automatically found using the [`Router] provided on startup. To fix a route for a
4695	/// particular payment, use [`Self::send_payment_with_route`] or match the [`PaymentId`] passed to
4696	/// [`Router::find_route_with_id`].
4697	///
4698	/// [`Event::PaymentSent`]: events::Event::PaymentSent
4699	/// [`Event::PaymentFailed`]: events::Event::PaymentFailed
4700	/// [`UpdateHTLCs`]: events::MessageSendEvent::UpdateHTLCs
4701	/// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
4702	/// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
4703	pub fn send_payment(
4704		&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId,
4705		route_params: RouteParameters, retry_strategy: Retry
4706	) -> Result<(), RetryableSendFailure> {
4707		let best_block_height = self.best_block.read().unwrap().height;
4708		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4709		self.pending_outbound_payments
4710			.send_payment(payment_hash, recipient_onion, payment_id, retry_strategy, route_params,
4711				&self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
4712				&self.entropy_source, &self.node_signer, best_block_height, &self.logger,
4713				&self.pending_events, |args| self.send_payment_along_path(args))
4714	}
4715
4716	#[cfg(test)]
4717	pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
4718		let best_block_height = self.best_block.read().unwrap().height;
4719		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4720		self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion,
4721			keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer,
4722			best_block_height, |args| self.send_payment_along_path(args))
4723	}
4724
4725	#[cfg(test)]
4726	pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route: &Route) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
4727		let best_block_height = self.best_block.read().unwrap().height;
4728		self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, recipient_onion, payment_id, route, None, &self.entropy_source, best_block_height)
4729	}
4730
4731	#[cfg(test)]
4732	pub(crate) fn test_set_payment_metadata(&self, payment_id: PaymentId, new_payment_metadata: Option<Vec<u8>>) {
4733		self.pending_outbound_payments.test_set_payment_metadata(payment_id, new_payment_metadata);
4734	}
4735
4736	/// Pays the [`Bolt12Invoice`] associated with the `payment_id` encoded in its `payer_metadata`.
4737	///
4738	/// The invoice's `payer_metadata` is used to authenticate that the invoice was indeed requested
4739	/// before attempting a payment. [`Bolt12PaymentError::UnexpectedInvoice`] is returned if this
4740	/// fails or if the encoded `payment_id` is not recognized. The latter may happen once the
4741	/// payment is no longer tracked because the payment was attempted after:
4742	/// - an invoice for the `payment_id` was already paid,
4743	/// - one full [timer tick] has elapsed since initially requesting the invoice when paying an
4744	///   offer, or
4745	/// - the refund corresponding to the invoice has already expired.
4746	///
4747	/// To retry the payment, request another invoice using a new `payment_id`.
4748	///
4749	/// Attempting to pay the same invoice twice while the first payment is still pending will
4750	/// result in a [`Bolt12PaymentError::DuplicateInvoice`].
4751	///
4752	/// Otherwise, either [`Event::PaymentSent`] or [`Event::PaymentFailed`] are used to indicate
4753	/// whether or not the payment was successful.
4754	///
4755	/// [timer tick]: Self::timer_tick_occurred
4756	pub fn send_payment_for_bolt12_invoice(
4757		&self, invoice: &Bolt12Invoice, context: Option<&OffersContext>,
4758	) -> Result<(), Bolt12PaymentError> {
4759		match self.verify_bolt12_invoice(invoice, context) {
4760			Ok(payment_id) => self.send_payment_for_verified_bolt12_invoice(invoice, payment_id),
4761			Err(()) => Err(Bolt12PaymentError::UnexpectedInvoice),
4762		}
4763	}
4764
4765	fn verify_bolt12_invoice(
4766		&self, invoice: &Bolt12Invoice, context: Option<&OffersContext>,
4767	) -> Result<PaymentId, ()> {
4768		let secp_ctx = &self.secp_ctx;
4769		let expanded_key = &self.inbound_payment_key;
4770
4771		match context {
4772			None if invoice.is_for_refund_without_paths() => {
4773				invoice.verify_using_metadata(expanded_key, secp_ctx)
4774			},
4775			Some(&OffersContext::OutboundPayment { payment_id, nonce, .. }) => {
4776				invoice.verify_using_payer_data(payment_id, nonce, expanded_key, secp_ctx)
4777			},
4778			_ => Err(()),
4779		}
4780	}
4781
4782	fn send_payment_for_verified_bolt12_invoice(&self, invoice: &Bolt12Invoice, payment_id: PaymentId) -> Result<(), Bolt12PaymentError> {
4783		let best_block_height = self.best_block.read().unwrap().height;
4784		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4785		let features = self.bolt12_invoice_features();
4786		self.pending_outbound_payments
4787			.send_payment_for_bolt12_invoice(
4788				invoice, payment_id, &self.router, self.list_usable_channels(), features,
4789				|| self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, &self,
4790				&self.secp_ctx, best_block_height, &self.logger, &self.pending_events,
4791				|args| self.send_payment_along_path(args)
4792			)
4793	}
4794
4795	#[cfg(async_payments)]
4796	fn initiate_async_payment(
4797		&self, invoice: &StaticInvoice, payment_id: PaymentId
4798	) -> Result<(), Bolt12PaymentError> {
4799		let mut res = Ok(());
4800		PersistenceNotifierGuard::optionally_notify(self, || {
4801			let best_block_height = self.best_block.read().unwrap().height;
4802			let features = self.bolt12_invoice_features();
4803			let outbound_pmts_res = self.pending_outbound_payments.static_invoice_received(
4804				invoice, payment_id, features, best_block_height, &*self.entropy_source,
4805				&self.pending_events
4806			);
4807			match outbound_pmts_res {
4808				Ok(()) => {},
4809				Err(Bolt12PaymentError::UnexpectedInvoice) | Err(Bolt12PaymentError::DuplicateInvoice) => {
4810					res = outbound_pmts_res.map(|_| ());
4811					return NotifyOption::SkipPersistNoEvents
4812				},
4813				Err(e) => {
4814					res = Err(e);
4815					return NotifyOption::DoPersist
4816				}
4817			};
4818
4819			let nonce = Nonce::from_entropy_source(&*self.entropy_source);
4820			let hmac = payment_id.hmac_for_async_payment(nonce, &self.inbound_payment_key);
4821			let reply_paths = match self.create_blinded_paths(
4822				MessageContext::AsyncPayments(
4823					AsyncPaymentsContext::OutboundPayment { payment_id, nonce, hmac }
4824				)
4825			) {
4826				Ok(paths) => paths,
4827				Err(()) => {
4828					self.abandon_payment_with_reason(payment_id, PaymentFailureReason::BlindedPathCreationFailed);
4829					res = Err(Bolt12PaymentError::BlindedPathCreationFailed);
4830					return NotifyOption::DoPersist
4831				}
4832			};
4833
4834			let mut pending_async_payments_messages = self.pending_async_payments_messages.lock().unwrap();
4835			const HTLC_AVAILABLE_LIMIT: usize = 10;
4836			reply_paths
4837				.iter()
4838				.flat_map(|reply_path| invoice.message_paths().iter().map(move |invoice_path| (invoice_path, reply_path)))
4839				.take(HTLC_AVAILABLE_LIMIT)
4840				.for_each(|(invoice_path, reply_path)| {
4841					let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
4842						destination: Destination::BlindedPath(invoice_path.clone()),
4843						reply_path: reply_path.clone(),
4844					};
4845					let message = AsyncPaymentsMessage::HeldHtlcAvailable(HeldHtlcAvailable {});
4846					pending_async_payments_messages.push((message, instructions));
4847				});
4848
4849			NotifyOption::DoPersist
4850		});
4851
4852		res
4853	}
4854
4855	#[cfg(async_payments)]
4856	fn send_payment_for_static_invoice(
4857		&self, payment_id: PaymentId
4858	) -> Result<(), Bolt12PaymentError> {
4859		let best_block_height = self.best_block.read().unwrap().height;
4860		let mut res = Ok(());
4861		PersistenceNotifierGuard::optionally_notify(self, || {
4862			let outbound_pmts_res = self.pending_outbound_payments.send_payment_for_static_invoice(
4863				payment_id, &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
4864				&self.entropy_source, &self.node_signer, &self, &self.secp_ctx, best_block_height,
4865				&self.logger, &self.pending_events, |args| self.send_payment_along_path(args)
4866			);
4867			match outbound_pmts_res {
4868				Err(Bolt12PaymentError::UnexpectedInvoice) | Err(Bolt12PaymentError::DuplicateInvoice) => {
4869					res = outbound_pmts_res.map(|_| ());
4870					NotifyOption::SkipPersistNoEvents
4871				},
4872				other_res => {
4873					res = other_res;
4874					NotifyOption::DoPersist
4875				}
4876			}
4877		});
4878		res
4879	}
4880
4881	/// Signals that no further attempts for the given payment should occur. Useful if you have a
4882	/// pending outbound payment with retries remaining, but wish to stop retrying the payment before
4883	/// retries are exhausted.
4884	///
4885	/// # Event Generation
4886	///
4887	/// If no [`Event::PaymentFailed`] event had been generated before, one will be generated as soon
4888	/// as there are no remaining pending HTLCs for this payment.
4889	///
4890	/// Note that calling this method does *not* prevent a payment from succeeding. You must still
4891	/// wait until you receive either a [`Event::PaymentFailed`] or [`Event::PaymentSent`] event to
4892	/// determine the ultimate status of a payment.
4893	///
4894	/// # Requested Invoices
4895	///
4896	/// In the case of paying a [`Bolt12Invoice`] via [`ChannelManager::pay_for_offer`], abandoning
4897	/// the payment prior to receiving the invoice will result in an [`Event::PaymentFailed`] and
4898	/// prevent any attempts at paying it once received.
4899	///
4900	/// # Restart Behavior
4901	///
4902	/// If an [`Event::PaymentFailed`] is generated and we restart without first persisting the
4903	/// [`ChannelManager`], another [`Event::PaymentFailed`] may be generated.
4904	///
4905	/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
4906	pub fn abandon_payment(&self, payment_id: PaymentId) {
4907		self.abandon_payment_with_reason(payment_id, PaymentFailureReason::UserAbandoned)
4908	}
4909
4910	fn abandon_payment_with_reason(&self, payment_id: PaymentId, reason: PaymentFailureReason) {
4911		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4912		self.pending_outbound_payments.abandon_payment(payment_id, reason, &self.pending_events);
4913	}
4914
4915	/// Send a spontaneous payment, which is a payment that does not require the recipient to have
4916	/// generated an invoice. Optionally, you may specify the preimage. If you do choose to specify
4917	/// the preimage, it must be a cryptographically secure random value that no intermediate node
4918	/// would be able to guess -- otherwise, an intermediate node may claim the payment and it will
4919	/// never reach the recipient.
4920	///
4921	/// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See
4922	/// [`send_payment`] for more information about the risks of duplicate preimage usage.
4923	///
4924	/// See [`send_payment`] documentation for more details on the idempotency guarantees provided by
4925	/// the [`PaymentId`] key.
4926	///
4927	/// See [`PaymentParameters::for_keysend`] for help in constructing `route_params` for spontaneous
4928	/// payments.
4929	///
4930	/// [`send_payment`]: Self::send_payment
4931	/// [`PaymentParameters::for_keysend`]: crate::routing::router::PaymentParameters::for_keysend
4932	pub fn send_spontaneous_payment(
4933		&self, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields,
4934		payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry
4935	) -> Result<PaymentHash, RetryableSendFailure> {
4936		let best_block_height = self.best_block.read().unwrap().height;
4937		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4938		self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, recipient_onion,
4939			payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(),
4940			|| self.compute_inflight_htlcs(),  &self.entropy_source, &self.node_signer, best_block_height,
4941			&self.logger, &self.pending_events, |args| self.send_payment_along_path(args))
4942	}
4943
4944	/// Send a payment that is probing the given route for liquidity. We calculate the
4945	/// [`PaymentHash`] of probes based on a static secret and a random [`PaymentId`], which allows
4946	/// us to easily discern them from real payments.
4947	pub fn send_probe(&self, path: Path) -> Result<(PaymentHash, PaymentId), ProbeSendFailure> {
4948		let best_block_height = self.best_block.read().unwrap().height;
4949		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4950		self.pending_outbound_payments.send_probe(path, self.probing_cookie_secret,
4951			&self.entropy_source, &self.node_signer, best_block_height,
4952			|args| self.send_payment_along_path(args))
4953	}
4954
4955	/// Returns whether a payment with the given [`PaymentHash`] and [`PaymentId`] is, in fact, a
4956	/// payment probe.
4957	#[cfg(test)]
4958	pub(crate) fn payment_is_probe(&self, payment_hash: &PaymentHash, payment_id: &PaymentId) -> bool {
4959		outbound_payment::payment_is_probe(payment_hash, payment_id, self.probing_cookie_secret)
4960	}
4961
4962	/// Sends payment probes over all paths of a route that would be used to pay the given
4963	/// amount to the given `node_id`.
4964	///
4965	/// See [`ChannelManager::send_preflight_probes`] for more information.
4966	pub fn send_spontaneous_preflight_probes(
4967		&self, node_id: PublicKey, amount_msat: u64, final_cltv_expiry_delta: u32,
4968		liquidity_limit_multiplier: Option<u64>,
4969	) -> Result<Vec<(PaymentHash, PaymentId)>, ProbeSendFailure> {
4970		let payment_params =
4971			PaymentParameters::from_node_id(node_id, final_cltv_expiry_delta);
4972
4973		let route_params = RouteParameters::from_payment_params_and_value(payment_params, amount_msat);
4974
4975		self.send_preflight_probes(route_params, liquidity_limit_multiplier)
4976	}
4977
4978	/// Sends payment probes over all paths of a route that would be used to pay a route found
4979	/// according to the given [`RouteParameters`].
4980	///
4981	/// This may be used to send "pre-flight" probes, i.e., to train our scorer before conducting
4982	/// the actual payment. Note this is only useful if there likely is sufficient time for the
4983	/// probe to settle before sending out the actual payment, e.g., when waiting for user
4984	/// confirmation in a wallet UI.
4985	///
4986	/// Otherwise, there is a chance the probe could take up some liquidity needed to complete the
4987	/// actual payment. Users should therefore be cautious and might avoid sending probes if
4988	/// liquidity is scarce and/or they don't expect the probe to return before they send the
4989	/// payment. To mitigate this issue, channels with available liquidity less than the required
4990	/// amount times the given `liquidity_limit_multiplier` won't be used to send pre-flight
4991	/// probes. If `None` is given as `liquidity_limit_multiplier`, it defaults to `3`.
4992	pub fn send_preflight_probes(
4993		&self, route_params: RouteParameters, liquidity_limit_multiplier: Option<u64>,
4994	) -> Result<Vec<(PaymentHash, PaymentId)>, ProbeSendFailure> {
4995		let liquidity_limit_multiplier = liquidity_limit_multiplier.unwrap_or(3);
4996
4997		let payer = self.get_our_node_id();
4998		let usable_channels = self.list_usable_channels();
4999		let first_hops = usable_channels.iter().collect::<Vec<_>>();
5000		let inflight_htlcs = self.compute_inflight_htlcs();
5001
5002		let route = self
5003			.router
5004			.find_route(&payer, &route_params, Some(&first_hops), inflight_htlcs)
5005			.map_err(|e| {
5006				log_error!(self.logger, "Failed to find path for payment probe: {:?}", e);
5007				ProbeSendFailure::RouteNotFound
5008			})?;
5009
5010		let mut used_liquidity_map = hash_map_with_capacity(first_hops.len());
5011
5012		let mut res = Vec::new();
5013
5014		for mut path in route.paths {
5015			// If the last hop is probably an unannounced channel we refrain from probing all the
5016			// way through to the end and instead probe up to the second-to-last channel.
5017			while let Some(last_path_hop) = path.hops.last() {
5018				if last_path_hop.maybe_announced_channel {
5019					// We found a potentially announced last hop.
5020					break;
5021				} else {
5022					// Drop the last hop, as it's likely unannounced.
5023					log_debug!(
5024						self.logger,
5025						"Avoided sending payment probe all the way to last hop {} as it is likely unannounced.",
5026						last_path_hop.short_channel_id
5027					);
5028					let final_value_msat = path.final_value_msat();
5029					path.hops.pop();
5030					if let Some(new_last) = path.hops.last_mut() {
5031						new_last.fee_msat += final_value_msat;
5032					}
5033				}
5034			}
5035
5036			if path.hops.len() < 2 {
5037				log_debug!(
5038					self.logger,
5039					"Skipped sending payment probe over path with less than two hops."
5040				);
5041				continue;
5042			}
5043
5044			if let Some(first_path_hop) = path.hops.first() {
5045				if let Some(first_hop) = first_hops.iter().find(|h| {
5046					h.get_outbound_payment_scid() == Some(first_path_hop.short_channel_id)
5047				}) {
5048					let path_value = path.final_value_msat() + path.fee_msat();
5049					let used_liquidity =
5050						used_liquidity_map.entry(first_path_hop.short_channel_id).or_insert(0);
5051
5052					if first_hop.next_outbound_htlc_limit_msat
5053						< (*used_liquidity + path_value) * liquidity_limit_multiplier
5054					{
5055						log_debug!(self.logger, "Skipped sending payment probe to avoid putting channel {} under the liquidity limit.", first_path_hop.short_channel_id);
5056						continue;
5057					} else {
5058						*used_liquidity += path_value;
5059					}
5060				}
5061			}
5062
5063			res.push(self.send_probe(path).map_err(|e| {
5064				log_error!(self.logger, "Failed to send pre-flight probe: {:?}", e);
5065				e
5066			})?);
5067		}
5068
5069		Ok(res)
5070	}
5071
5072	/// Handles the generation of a funding transaction, optionally (for tests) with a function
5073	/// which checks the correctness of the funding transaction given the associated channel.
5074	fn funding_transaction_generated_intern<FundingOutput: FnMut(&OutboundV1Channel<SP>) -> Result<OutPoint, &'static str>>(
5075		&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding_transaction: Transaction, is_batch_funding: bool,
5076		mut find_funding_output: FundingOutput, is_manual_broadcast: bool,
5077	) -> Result<(), APIError> {
5078		let per_peer_state = self.per_peer_state.read().unwrap();
5079		let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
5080			.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
5081
5082		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5083		let peer_state = &mut *peer_state_lock;
5084		let funding_txo;
5085		let (mut chan, msg_opt) = match peer_state.channel_by_id.remove(&temporary_channel_id) {
5086			Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => {
5087				macro_rules! close_chan { ($err: expr, $api_err: expr, $chan: expr) => { {
5088					let counterparty;
5089					let err = if let ChannelError::Close((msg, reason)) = $err {
5090						let channel_id = $chan.context.channel_id();
5091						counterparty = chan.context.get_counterparty_node_id();
5092						let shutdown_res = $chan.context.force_shutdown(false, reason);
5093						MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None)
5094					} else { unreachable!(); };
5095
5096					mem::drop(peer_state_lock);
5097					mem::drop(per_peer_state);
5098					let _: Result<(), _> = handle_error!(self, Err(err), counterparty);
5099					Err($api_err)
5100				} } }
5101				match find_funding_output(&chan) {
5102					Ok(found_funding_txo) => funding_txo = found_funding_txo,
5103					Err(err) => {
5104						let chan_err = ChannelError::close(err.to_owned());
5105						let api_err = APIError::APIMisuseError { err: err.to_owned() };
5106						return close_chan!(chan_err, api_err, chan);
5107					},
5108				}
5109
5110				let logger = WithChannelContext::from(&self.logger, &chan.context, None);
5111				let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger);
5112				match funding_res {
5113					Ok(funding_msg) => (chan, funding_msg),
5114					Err((mut chan, chan_err)) => {
5115						let api_err = APIError::ChannelUnavailable { err: "Signer refused to sign the initial commitment transaction".to_owned() };
5116						return close_chan!(chan_err, api_err, chan);
5117					}
5118				}
5119			},
5120			Some(phase) => {
5121				peer_state.channel_by_id.insert(temporary_channel_id, phase);
5122				return Err(APIError::APIMisuseError {
5123					err: format!(
5124						"Channel with id {} for the passed counterparty node_id {} is not an unfunded, outbound V1 channel",
5125						temporary_channel_id, counterparty_node_id),
5126				})
5127			},
5128			None => return Err(APIError::ChannelUnavailable {err: format!(
5129				"Channel with id {} not found for the passed counterparty node_id {}",
5130				temporary_channel_id, counterparty_node_id),
5131				}),
5132		};
5133
5134		if let Some(msg) = msg_opt {
5135			peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
5136				node_id: chan.context.get_counterparty_node_id(),
5137				msg,
5138			});
5139		}
5140		if is_manual_broadcast {
5141			chan.context.set_manual_broadcast();
5142		}
5143		match peer_state.channel_by_id.entry(chan.context.channel_id()) {
5144			hash_map::Entry::Occupied(_) => {
5145				panic!("Generated duplicate funding txid?");
5146			},
5147			hash_map::Entry::Vacant(e) => {
5148				let mut outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
5149				match outpoint_to_peer.entry(funding_txo) {
5150					hash_map::Entry::Vacant(e) => { e.insert(chan.context.get_counterparty_node_id()); },
5151					hash_map::Entry::Occupied(o) => {
5152						let err = format!(
5153							"An existing channel using outpoint {} is open with peer {}",
5154							funding_txo, o.get()
5155						);
5156						mem::drop(outpoint_to_peer);
5157						mem::drop(peer_state_lock);
5158						mem::drop(per_peer_state);
5159						let reason = ClosureReason::ProcessingError { err: err.clone() };
5160						self.finish_close_channel(chan.context.force_shutdown(true, reason));
5161						return Err(APIError::ChannelUnavailable { err });
5162					}
5163				}
5164				e.insert(ChannelPhase::UnfundedOutboundV1(chan));
5165			}
5166		}
5167		Ok(())
5168	}
5169
5170	#[cfg(test)]
5171	pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> {
5172		let txid = funding_transaction.compute_txid();
5173		self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, false, |_| {
5174			Ok(OutPoint { txid, index: output_index })
5175		}, false)
5176	}
5177
5178	/// Call this upon creation of a funding transaction for the given channel.
5179	///
5180	/// Returns an [`APIError::APIMisuseError`] if the funding_transaction spent non-SegWit outputs
5181	/// or if no output was found which matches the parameters in [`Event::FundingGenerationReady`].
5182	///
5183	/// Returns [`APIError::APIMisuseError`] if the funding transaction is not final for propagation
5184	/// across the p2p network.
5185	///
5186	/// Returns [`APIError::ChannelUnavailable`] if a funding transaction has already been provided
5187	/// for the channel or if the channel has been closed as indicated by [`Event::ChannelClosed`].
5188	///
5189	/// May panic if the output found in the funding transaction is duplicative with some other
5190	/// channel (note that this should be trivially prevented by using unique funding transaction
5191	/// keys per-channel).
5192	///
5193	/// Do NOT broadcast the funding transaction yourself. When we have safely received our
5194	/// counterparty's signature the funding transaction will automatically be broadcast via the
5195	/// [`BroadcasterInterface`] provided when this `ChannelManager` was constructed.
5196	///
5197	/// Note that this includes RBF or similar transaction replacement strategies - lightning does
5198	/// not currently support replacing a funding transaction on an existing channel. Instead,
5199	/// create a new channel with a conflicting funding transaction.
5200	///
5201	/// Note to keep the miner incentives aligned in moving the blockchain forward, we recommend
5202	/// the wallet software generating the funding transaction to apply anti-fee sniping as
5203	/// implemented by Bitcoin Core wallet. See <https://bitcoinops.org/en/topics/fee-sniping/>
5204	/// for more details.
5205	///
5206	/// [`Event::FundingGenerationReady`]: crate::events::Event::FundingGenerationReady
5207	/// [`Event::ChannelClosed`]: crate::events::Event::ChannelClosed
5208	pub fn funding_transaction_generated(&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
5209		self.batch_funding_transaction_generated(&[(&temporary_channel_id, &counterparty_node_id)], funding_transaction)
5210	}
5211
5212
5213	/// **Unsafe**: This method does not validate the spent output. It is the caller's
5214	/// responsibility to ensure the spent outputs are SegWit, as well as making sure the funding
5215	/// transaction has a final absolute locktime, i.e., its locktime is lower than the next block height.
5216	///
5217	/// For a safer method, please refer to [`ChannelManager::funding_transaction_generated`].
5218	///
5219	/// Call this in response to a [`Event::FundingGenerationReady`] event.
5220	///
5221	/// Note that if this method is called successfully, the funding transaction won't be
5222	/// broadcasted and you are expected to broadcast it manually when receiving the
5223	/// [`Event::FundingTxBroadcastSafe`] event.
5224	///
5225	/// Returns [`APIError::ChannelUnavailable`] if a funding transaction has already been provided
5226	/// for the channel or if the channel has been closed as indicated by [`Event::ChannelClosed`].
5227	///
5228	/// May panic if the funding output is duplicative with some other channel (note that this
5229	/// should be trivially prevented by using unique funding transaction keys per-channel).
5230	///
5231	/// Note to keep the miner incentives aligned in moving the blockchain forward, we recommend
5232	/// the wallet software generating the funding transaction to apply anti-fee sniping as
5233	/// implemented by Bitcoin Core wallet. See <https://bitcoinops.org/en/topics/fee-sniping/> for
5234	/// more details.
5235	///
5236	/// [`Event::FundingGenerationReady`]: crate::events::Event::FundingGenerationReady
5237	/// [`Event::FundingTxBroadcastSafe`]: crate::events::Event::FundingTxBroadcastSafe
5238	/// [`Event::ChannelClosed`]: crate::events::Event::ChannelClosed
5239	/// [`ChannelManager::funding_transaction_generated`]: crate::ln::channelmanager::ChannelManager::funding_transaction_generated
5240	pub fn unsafe_manual_funding_transaction_generated(&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding: OutPoint) -> Result<(), APIError> {
5241		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5242
5243		let temporary_channels = &[(&temporary_channel_id, &counterparty_node_id)];
5244		return self.batch_funding_transaction_generated_intern(temporary_channels, FundingType::Unchecked(funding));
5245
5246	}
5247
5248	/// Call this upon creation of a batch funding transaction for the given channels.
5249	///
5250	/// Return values are identical to [`Self::funding_transaction_generated`], respective to
5251	/// each individual channel and transaction output.
5252	///
5253	/// Do NOT broadcast the funding transaction yourself. This batch funding transaction
5254	/// will only be broadcast when we have safely received and persisted the counterparty's
5255	/// signature for each channel.
5256	///
5257	/// If there is an error, all channels in the batch are to be considered closed.
5258	pub fn batch_funding_transaction_generated(&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding_transaction: Transaction) -> Result<(), APIError> {
5259		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5260		self.batch_funding_transaction_generated_intern(temporary_channels, FundingType::Checked(funding_transaction))
5261	}
5262
5263	fn batch_funding_transaction_generated_intern(&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding: FundingType) -> Result<(), APIError> {
5264		let mut result = Ok(());
5265		if let FundingType::Checked(funding_transaction) = &funding {
5266			if !funding_transaction.is_coinbase() {
5267				for inp in funding_transaction.input.iter() {
5268					if inp.witness.is_empty() {
5269						result = result.and(Err(APIError::APIMisuseError {
5270							err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned()
5271						}));
5272					}
5273				}
5274			}
5275
5276			if funding_transaction.output.len() > u16::max_value() as usize {
5277				result = result.and(Err(APIError::APIMisuseError {
5278					err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
5279				}));
5280			}
5281			let height = self.best_block.read().unwrap().height;
5282			// Transactions are evaluated as final by network mempools if their locktime is strictly
5283			// lower than the next block height. However, the modules constituting our Lightning
5284			// node might not have perfect sync about their blockchain views. Thus, if the wallet
5285			// module is ahead of LDK, only allow one more block of headroom.
5286			if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) &&
5287				funding_transaction.lock_time.is_block_height() &&
5288					funding_transaction.lock_time.to_consensus_u32() > height + 1
5289			{
5290				result = result.and(Err(APIError::APIMisuseError {
5291					err: "Funding transaction absolute timelock is non-final".to_owned()
5292				}));
5293			}
5294		}
5295
5296		let txid = funding.txid();
5297		let is_batch_funding = temporary_channels.len() > 1;
5298		let mut funding_batch_states = if is_batch_funding {
5299			Some(self.funding_batch_states.lock().unwrap())
5300		} else {
5301			None
5302		};
5303		let mut funding_batch_state = funding_batch_states.as_mut().and_then(|states| {
5304			match states.entry(txid) {
5305				btree_map::Entry::Occupied(_) => {
5306					result = result.clone().and(Err(APIError::APIMisuseError {
5307						err: "Batch funding transaction with the same txid already exists".to_owned()
5308					}));
5309					None
5310				},
5311				btree_map::Entry::Vacant(vacant) => Some(vacant.insert(Vec::new())),
5312			}
5313		});
5314		let is_manual_broadcast = funding.is_manual_broadcast();
5315		for &(temporary_channel_id, counterparty_node_id) in temporary_channels {
5316			result = result.and_then(|_| self.funding_transaction_generated_intern(
5317				*temporary_channel_id,
5318				*counterparty_node_id,
5319				funding.transaction_or_dummy(),
5320				is_batch_funding,
5321				|chan| {
5322					let mut output_index = None;
5323					let expected_spk = chan.context.get_funding_redeemscript().to_p2wsh();
5324					let outpoint = match &funding {
5325						FundingType::Checked(tx) => {
5326							for (idx, outp) in tx.output.iter().enumerate() {
5327								if outp.script_pubkey == expected_spk && outp.value.to_sat() == chan.context.get_value_satoshis() {
5328									if output_index.is_some() {
5329										return Err("Multiple outputs matched the expected script and value");
5330									}
5331									output_index = Some(idx as u16);
5332								}
5333							}
5334							if output_index.is_none() {
5335								return Err("No output matched the script_pubkey and value in the FundingGenerationReady event");
5336							}
5337							OutPoint { txid, index: output_index.unwrap() }
5338						},
5339						FundingType::Unchecked(outpoint) => outpoint.clone(),
5340					};
5341					if let Some(funding_batch_state) = funding_batch_state.as_mut() {
5342						// TODO(dual_funding): We only do batch funding for V1 channels at the moment, but we'll probably
5343						// need to fix this somehow to not rely on using the outpoint for the channel ID if we
5344						// want to support V2 batching here as well.
5345						funding_batch_state.push((ChannelId::v1_from_funding_outpoint(outpoint), *counterparty_node_id, false));
5346					}
5347					Ok(outpoint)
5348				},
5349				is_manual_broadcast)
5350			);
5351		}
5352		if let Err(ref e) = result {
5353			// Remaining channels need to be removed on any error.
5354			let e = format!("Error in transaction funding: {:?}", e);
5355			let mut channels_to_remove = Vec::new();
5356			channels_to_remove.extend(funding_batch_states.as_mut()
5357				.and_then(|states| states.remove(&txid))
5358				.into_iter().flatten()
5359				.map(|(chan_id, node_id, _state)| (chan_id, node_id))
5360			);
5361			channels_to_remove.extend(temporary_channels.iter()
5362				.map(|(&chan_id, &node_id)| (chan_id, node_id))
5363			);
5364			let mut shutdown_results = Vec::new();
5365			{
5366				let per_peer_state = self.per_peer_state.read().unwrap();
5367				for (channel_id, counterparty_node_id) in channels_to_remove {
5368					per_peer_state.get(&counterparty_node_id)
5369						.map(|peer_state_mutex| peer_state_mutex.lock().unwrap())
5370						.and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id).map(|chan| (chan, peer_state)))
5371						.map(|(mut chan, mut peer_state)| {
5372							let closure_reason = ClosureReason::ProcessingError { err: e.clone() };
5373							let mut close_res = chan.context_mut().force_shutdown(false, closure_reason);
5374							locked_close_channel!(self, peer_state, chan.context(), close_res);
5375							shutdown_results.push(close_res);
5376							peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
5377								node_id: counterparty_node_id,
5378								action: msgs::ErrorAction::SendErrorMessage {
5379									msg: msgs::ErrorMessage {
5380										channel_id,
5381										data: "Failed to fund channel".to_owned(),
5382									}
5383								},
5384							});
5385						});
5386				}
5387			}
5388			mem::drop(funding_batch_states);
5389			for shutdown_result in shutdown_results.drain(..) {
5390				self.finish_close_channel(shutdown_result);
5391			}
5392		}
5393		result
5394	}
5395
5396	/// Atomically applies partial updates to the [`ChannelConfig`] of the given channels.
5397	///
5398	/// Once the updates are applied, each eligible channel (advertised with a known short channel
5399	/// ID and a change in [`forwarding_fee_proportional_millionths`], [`forwarding_fee_base_msat`],
5400	/// or [`cltv_expiry_delta`]) has a [`BroadcastChannelUpdate`] event message generated
5401	/// containing the new [`ChannelUpdate`] message which should be broadcast to the network.
5402	///
5403	/// Returns [`ChannelUnavailable`] when a channel is not found or an incorrect
5404	/// `counterparty_node_id` is provided.
5405	///
5406	/// Returns [`APIMisuseError`] when a [`cltv_expiry_delta`] update is to be applied with a value
5407	/// below [`MIN_CLTV_EXPIRY_DELTA`].
5408	///
5409	/// If an error is returned, none of the updates should be considered applied.
5410	///
5411	/// [`forwarding_fee_proportional_millionths`]: ChannelConfig::forwarding_fee_proportional_millionths
5412	/// [`forwarding_fee_base_msat`]: ChannelConfig::forwarding_fee_base_msat
5413	/// [`cltv_expiry_delta`]: ChannelConfig::cltv_expiry_delta
5414	/// [`BroadcastChannelUpdate`]: events::MessageSendEvent::BroadcastChannelUpdate
5415	/// [`ChannelUpdate`]: msgs::ChannelUpdate
5416	/// [`ChannelUnavailable`]: APIError::ChannelUnavailable
5417	/// [`APIMisuseError`]: APIError::APIMisuseError
5418	pub fn update_partial_channel_config(
5419		&self, counterparty_node_id: &PublicKey, channel_ids: &[ChannelId], config_update: &ChannelConfigUpdate,
5420	) -> Result<(), APIError> {
5421		if config_update.cltv_expiry_delta.map(|delta| delta < MIN_CLTV_EXPIRY_DELTA).unwrap_or(false) {
5422			return Err(APIError::APIMisuseError {
5423				err: format!("The chosen CLTV expiry delta is below the minimum of {}", MIN_CLTV_EXPIRY_DELTA),
5424			});
5425		}
5426
5427		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5428		let per_peer_state = self.per_peer_state.read().unwrap();
5429		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
5430			.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
5431		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5432		let peer_state = &mut *peer_state_lock;
5433
5434		for channel_id in channel_ids {
5435			if !peer_state.has_channel(channel_id) {
5436				return Err(APIError::ChannelUnavailable {
5437					err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, counterparty_node_id),
5438				});
5439			};
5440		}
5441		for channel_id in channel_ids {
5442			if let Some(channel_phase) = peer_state.channel_by_id.get_mut(channel_id) {
5443				let mut config = channel_phase.context().config();
5444				config.apply(config_update);
5445				if !channel_phase.context_mut().update_config(&config) {
5446					continue;
5447				}
5448				if let ChannelPhase::Funded(channel) = channel_phase {
5449					if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
5450						let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
5451						pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
5452					} else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
5453						peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
5454							node_id: channel.context.get_counterparty_node_id(),
5455							msg,
5456						});
5457					}
5458				}
5459				continue;
5460			} else {
5461				// This should not be reachable as we've already checked for non-existence in the previous channel_id loop.
5462				debug_assert!(false);
5463				return Err(APIError::ChannelUnavailable {
5464					err: format!(
5465						"Channel with ID {} for passed counterparty_node_id {} disappeared after we confirmed its existence - this should not be reachable!",
5466						channel_id, counterparty_node_id),
5467				});
5468			};
5469		}
5470		Ok(())
5471	}
5472
5473	/// Atomically updates the [`ChannelConfig`] for the given channels.
5474	///
5475	/// Once the updates are applied, each eligible channel (advertised with a known short channel
5476	/// ID and a change in [`forwarding_fee_proportional_millionths`], [`forwarding_fee_base_msat`],
5477	/// or [`cltv_expiry_delta`]) has a [`BroadcastChannelUpdate`] event message generated
5478	/// containing the new [`ChannelUpdate`] message which should be broadcast to the network.
5479	///
5480	/// Returns [`ChannelUnavailable`] when a channel is not found or an incorrect
5481	/// `counterparty_node_id` is provided.
5482	///
5483	/// Returns [`APIMisuseError`] when a [`cltv_expiry_delta`] update is to be applied with a value
5484	/// below [`MIN_CLTV_EXPIRY_DELTA`].
5485	///
5486	/// If an error is returned, none of the updates should be considered applied.
5487	///
5488	/// [`forwarding_fee_proportional_millionths`]: ChannelConfig::forwarding_fee_proportional_millionths
5489	/// [`forwarding_fee_base_msat`]: ChannelConfig::forwarding_fee_base_msat
5490	/// [`cltv_expiry_delta`]: ChannelConfig::cltv_expiry_delta
5491	/// [`BroadcastChannelUpdate`]: events::MessageSendEvent::BroadcastChannelUpdate
5492	/// [`ChannelUpdate`]: msgs::ChannelUpdate
5493	/// [`ChannelUnavailable`]: APIError::ChannelUnavailable
5494	/// [`APIMisuseError`]: APIError::APIMisuseError
5495	pub fn update_channel_config(
5496		&self, counterparty_node_id: &PublicKey, channel_ids: &[ChannelId], config: &ChannelConfig,
5497	) -> Result<(), APIError> {
5498		return self.update_partial_channel_config(counterparty_node_id, channel_ids, &(*config).into());
5499	}
5500
5501	/// Attempts to forward an intercepted HTLC over the provided channel id and with the provided
5502	/// amount to forward. Should only be called in response to an [`HTLCIntercepted`] event.
5503	///
5504	/// Intercepted HTLCs can be useful for Lightning Service Providers (LSPs) to open a just-in-time
5505	/// channel to a receiving node if the node lacks sufficient inbound liquidity.
5506	///
5507	/// To make use of intercepted HTLCs, set [`UserConfig::accept_intercept_htlcs`] and use
5508	/// [`ChannelManager::get_intercept_scid`] to generate short channel id(s) to put in the
5509	/// receiver's invoice route hints. These route hints will signal to LDK to generate an
5510	/// [`HTLCIntercepted`] event when it receives the forwarded HTLC, and this method or
5511	/// [`ChannelManager::fail_intercepted_htlc`] MUST be called in response to the event.
5512	///
5513	/// Note that LDK does not enforce fee requirements in `amt_to_forward_msat`, and will not stop
5514	/// you from forwarding more than you received. See
5515	/// [`HTLCIntercepted::expected_outbound_amount_msat`] for more on forwarding a different amount
5516	/// than expected.
5517	///
5518	/// Errors if the event was not handled in time, in which case the HTLC was automatically failed
5519	/// backwards.
5520	///
5521	/// [`UserConfig::accept_intercept_htlcs`]: crate::util::config::UserConfig::accept_intercept_htlcs
5522	/// [`HTLCIntercepted`]: events::Event::HTLCIntercepted
5523	/// [`HTLCIntercepted::expected_outbound_amount_msat`]: events::Event::HTLCIntercepted::expected_outbound_amount_msat
5524	// TODO: when we move to deciding the best outbound channel at forward time, only take
5525	// `next_node_id` and not `next_hop_channel_id`
5526	pub fn forward_intercepted_htlc(&self, intercept_id: InterceptId, next_hop_channel_id: &ChannelId, next_node_id: PublicKey, amt_to_forward_msat: u64) -> Result<(), APIError> {
5527		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5528
5529		let next_hop_scid = {
5530			let peer_state_lock = self.per_peer_state.read().unwrap();
5531			let peer_state_mutex = peer_state_lock.get(&next_node_id)
5532				.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", next_node_id) })?;
5533			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5534			let peer_state = &mut *peer_state_lock;
5535			match peer_state.channel_by_id.get(next_hop_channel_id) {
5536				Some(ChannelPhase::Funded(chan)) => {
5537					if !chan.context.is_usable() {
5538						return Err(APIError::ChannelUnavailable {
5539							err: format!("Channel with id {} not fully established", next_hop_channel_id)
5540						})
5541					}
5542					chan.context.get_short_channel_id().unwrap_or(chan.context.outbound_scid_alias())
5543				},
5544				Some(_) => return Err(APIError::ChannelUnavailable {
5545					err: format!("Channel with id {} for the passed counterparty node_id {} is still opening.",
5546						next_hop_channel_id, next_node_id)
5547				}),
5548				None => {
5549					let error = format!("Channel with id {} not found for the passed counterparty node_id {}",
5550						next_hop_channel_id, next_node_id);
5551					let logger = WithContext::from(&self.logger, Some(next_node_id), Some(*next_hop_channel_id), None);
5552					log_error!(logger, "{} when attempting to forward intercepted HTLC", error);
5553					return Err(APIError::ChannelUnavailable {
5554						err: error
5555					})
5556				}
5557			}
5558		};
5559
5560		let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id)
5561			.ok_or_else(|| APIError::APIMisuseError {
5562				err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0))
5563			})?;
5564
5565		let routing = match payment.forward_info.routing {
5566			PendingHTLCRouting::Forward { onion_packet, blinded, incoming_cltv_expiry, .. } => {
5567				PendingHTLCRouting::Forward {
5568					onion_packet, blinded, incoming_cltv_expiry, short_channel_id: next_hop_scid,
5569				}
5570			},
5571			_ => unreachable!() // Only `PendingHTLCRouting::Forward`s are intercepted
5572		};
5573		let skimmed_fee_msat =
5574			payment.forward_info.outgoing_amt_msat.saturating_sub(amt_to_forward_msat);
5575		let pending_htlc_info = PendingHTLCInfo {
5576			skimmed_fee_msat: if skimmed_fee_msat == 0 { None } else { Some(skimmed_fee_msat) },
5577			outgoing_amt_msat: amt_to_forward_msat, routing, ..payment.forward_info
5578		};
5579
5580		let mut per_source_pending_forward = [(
5581			payment.prev_short_channel_id,
5582			payment.prev_counterparty_node_id,
5583			payment.prev_funding_outpoint,
5584			payment.prev_channel_id,
5585			payment.prev_user_channel_id,
5586			vec![(pending_htlc_info, payment.prev_htlc_id)]
5587		)];
5588		self.forward_htlcs(&mut per_source_pending_forward);
5589		Ok(())
5590	}
5591
5592	/// Fails the intercepted HTLC indicated by intercept_id. Should only be called in response to
5593	/// an [`HTLCIntercepted`] event. See [`ChannelManager::forward_intercepted_htlc`].
5594	///
5595	/// Errors if the event was not handled in time, in which case the HTLC was automatically failed
5596	/// backwards.
5597	///
5598	/// [`HTLCIntercepted`]: events::Event::HTLCIntercepted
5599	pub fn fail_intercepted_htlc(&self, intercept_id: InterceptId) -> Result<(), APIError> {
5600		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5601
5602		let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id)
5603			.ok_or_else(|| APIError::APIMisuseError {
5604				err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0))
5605			})?;
5606
5607		if let PendingHTLCRouting::Forward { short_channel_id, incoming_cltv_expiry, .. } = payment.forward_info.routing {
5608			let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
5609				short_channel_id: payment.prev_short_channel_id,
5610				user_channel_id: Some(payment.prev_user_channel_id),
5611				outpoint: payment.prev_funding_outpoint,
5612				channel_id: payment.prev_channel_id,
5613				counterparty_node_id: payment.prev_counterparty_node_id,
5614				htlc_id: payment.prev_htlc_id,
5615				incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret,
5616				phantom_shared_secret: None,
5617				blinded_failure: payment.forward_info.routing.blinded_failure(),
5618				cltv_expiry: incoming_cltv_expiry,
5619			});
5620
5621			let failure_reason = HTLCFailReason::from_failure_code(0x4000 | 10);
5622			let destination = HTLCDestination::UnknownNextHop { requested_forward_scid: short_channel_id };
5623			self.fail_htlc_backwards_internal(&htlc_source, &payment.forward_info.payment_hash, &failure_reason, destination);
5624		} else { unreachable!() } // Only `PendingHTLCRouting::Forward`s are intercepted
5625
5626		Ok(())
5627	}
5628
5629	fn process_pending_update_add_htlcs(&self) {
5630		let mut decode_update_add_htlcs = new_hash_map();
5631		mem::swap(&mut decode_update_add_htlcs, &mut self.decode_update_add_htlcs.lock().unwrap());
5632
5633		let get_failed_htlc_destination = |outgoing_scid_opt: Option<u64>, payment_hash: PaymentHash| {
5634			if let Some(outgoing_scid) = outgoing_scid_opt {
5635				match self.short_to_chan_info.read().unwrap().get(&outgoing_scid) {
5636					Some((outgoing_counterparty_node_id, outgoing_channel_id)) =>
5637						HTLCDestination::NextHopChannel {
5638							node_id: Some(*outgoing_counterparty_node_id),
5639							channel_id: *outgoing_channel_id,
5640						},
5641					None => HTLCDestination::UnknownNextHop {
5642						requested_forward_scid: outgoing_scid,
5643					},
5644				}
5645			} else {
5646				HTLCDestination::FailedPayment { payment_hash }
5647			}
5648		};
5649
5650		'outer_loop: for (incoming_scid, update_add_htlcs) in decode_update_add_htlcs {
5651			let incoming_channel_details_opt = self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel<SP>| {
5652				let counterparty_node_id = chan.context.get_counterparty_node_id();
5653				let channel_id = chan.context.channel_id();
5654				let funding_txo = chan.context.get_funding_txo().unwrap();
5655				let user_channel_id = chan.context.get_user_id();
5656				let accept_underpaying_htlcs = chan.context.config().accept_underpaying_htlcs;
5657				(counterparty_node_id, channel_id, funding_txo, user_channel_id, accept_underpaying_htlcs)
5658			});
5659			let (
5660				incoming_counterparty_node_id, incoming_channel_id, incoming_funding_txo,
5661				incoming_user_channel_id, incoming_accept_underpaying_htlcs
5662			 ) = if let Some(incoming_channel_details) = incoming_channel_details_opt {
5663				incoming_channel_details
5664			} else {
5665				// The incoming channel no longer exists, HTLCs should be resolved onchain instead.
5666				continue;
5667			};
5668
5669			let mut htlc_forwards = Vec::new();
5670			let mut htlc_fails = Vec::new();
5671			for update_add_htlc in &update_add_htlcs {
5672				let (next_hop, shared_secret, next_packet_details_opt) = match decode_incoming_update_add_htlc_onion(
5673					&update_add_htlc, &*self.node_signer, &*self.logger, &self.secp_ctx
5674				) {
5675					Ok(decoded_onion) => decoded_onion,
5676					Err(htlc_fail) => {
5677						htlc_fails.push((htlc_fail, HTLCDestination::InvalidOnion));
5678						continue;
5679					},
5680				};
5681
5682				let is_intro_node_blinded_forward = next_hop.is_intro_node_blinded_forward();
5683				let outgoing_scid_opt = next_packet_details_opt.as_ref().map(|d| d.outgoing_scid);
5684
5685				// Process the HTLC on the incoming channel.
5686				match self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel<SP>| {
5687					let logger = WithChannelContext::from(&self.logger, &chan.context, Some(update_add_htlc.payment_hash));
5688					chan.can_accept_incoming_htlc(
5689						update_add_htlc, &self.fee_estimator, &logger,
5690					)
5691				}) {
5692					Some(Ok(_)) => {},
5693					Some(Err((err, code))) => {
5694						let htlc_fail = self.htlc_failure_from_update_add_err(
5695							&update_add_htlc, &incoming_counterparty_node_id, err, code,
5696							is_intro_node_blinded_forward, &shared_secret,
5697						);
5698						let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
5699						htlc_fails.push((htlc_fail, htlc_destination));
5700						continue;
5701					},
5702					// The incoming channel no longer exists, HTLCs should be resolved onchain instead.
5703					None => continue 'outer_loop,
5704				}
5705
5706				// Now process the HTLC on the outgoing channel if it's a forward.
5707				if let Some(next_packet_details) = next_packet_details_opt.as_ref() {
5708					if let Err((err, code)) = self.can_forward_htlc(
5709						&update_add_htlc, next_packet_details
5710					) {
5711						let htlc_fail = self.htlc_failure_from_update_add_err(
5712							&update_add_htlc, &incoming_counterparty_node_id, err, code,
5713							is_intro_node_blinded_forward, &shared_secret,
5714						);
5715						let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
5716						htlc_fails.push((htlc_fail, htlc_destination));
5717						continue;
5718					}
5719				}
5720
5721				match self.construct_pending_htlc_status(
5722					&update_add_htlc, &incoming_counterparty_node_id, shared_secret, next_hop,
5723					incoming_accept_underpaying_htlcs, next_packet_details_opt.map(|d| d.next_packet_pubkey),
5724				) {
5725					PendingHTLCStatus::Forward(htlc_forward) => {
5726						htlc_forwards.push((htlc_forward, update_add_htlc.htlc_id));
5727					},
5728					PendingHTLCStatus::Fail(htlc_fail) => {
5729						let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
5730						htlc_fails.push((htlc_fail, htlc_destination));
5731					},
5732				}
5733			}
5734
5735			// Process all of the forwards and failures for the channel in which the HTLCs were
5736			// proposed to as a batch.
5737			let pending_forwards = (
5738				incoming_scid, Some(incoming_counterparty_node_id), incoming_funding_txo,
5739				incoming_channel_id, incoming_user_channel_id, htlc_forwards.drain(..).collect()
5740			);
5741			self.forward_htlcs_without_forward_event(&mut [pending_forwards]);
5742			for (htlc_fail, htlc_destination) in htlc_fails.drain(..) {
5743				let failure = match htlc_fail {
5744					HTLCFailureMsg::Relay(fail_htlc) => HTLCForwardInfo::FailHTLC {
5745						htlc_id: fail_htlc.htlc_id,
5746						err_packet: fail_htlc.reason,
5747					},
5748					HTLCFailureMsg::Malformed(fail_malformed_htlc) => HTLCForwardInfo::FailMalformedHTLC {
5749						htlc_id: fail_malformed_htlc.htlc_id,
5750						sha256_of_onion: fail_malformed_htlc.sha256_of_onion,
5751						failure_code: fail_malformed_htlc.failure_code,
5752					},
5753				};
5754				self.forward_htlcs.lock().unwrap().entry(incoming_scid).or_default().push(failure);
5755				self.pending_events.lock().unwrap().push_back((events::Event::HTLCHandlingFailed {
5756					prev_channel_id: incoming_channel_id,
5757					failed_next_destination: htlc_destination,
5758				}, None));
5759			}
5760		}
5761	}
5762
5763	/// Processes HTLCs which are pending waiting on random forward delay.
5764	///
5765	/// Should only really ever be called in response to a PendingHTLCsForwardable event.
5766	/// Will likely generate further events.
5767	pub fn process_pending_htlc_forwards(&self) {
5768		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5769
5770		self.process_pending_update_add_htlcs();
5771
5772		let mut new_events = VecDeque::new();
5773		let mut failed_forwards = Vec::new();
5774		let mut phantom_receives: Vec<(u64, Option<PublicKey>, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
5775		{
5776			let mut forward_htlcs = new_hash_map();
5777			mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
5778
5779			for (short_chan_id, mut pending_forwards) in forward_htlcs {
5780				if short_chan_id != 0 {
5781					let mut forwarding_counterparty = None;
5782					macro_rules! forwarding_channel_not_found {
5783						($forward_infos: expr) => {
5784							for forward_info in $forward_infos {
5785								match forward_info {
5786									HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
5787										prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
5788										prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo {
5789											routing, incoming_shared_secret, payment_hash, outgoing_amt_msat,
5790											outgoing_cltv_value, ..
5791										}
5792									}) => {
5793										let cltv_expiry = routing.incoming_cltv_expiry();
5794										macro_rules! failure_handler {
5795											($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
5796												let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id), Some(payment_hash));
5797												log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
5798
5799												let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
5800													short_channel_id: prev_short_channel_id,
5801													user_channel_id: Some(prev_user_channel_id),
5802													channel_id: prev_channel_id,
5803													outpoint: prev_funding_outpoint,
5804													counterparty_node_id: prev_counterparty_node_id,
5805													htlc_id: prev_htlc_id,
5806													incoming_packet_shared_secret: incoming_shared_secret,
5807													phantom_shared_secret: $phantom_ss,
5808													blinded_failure: routing.blinded_failure(),
5809													cltv_expiry,
5810												});
5811
5812												let reason = if $next_hop_unknown {
5813													HTLCDestination::UnknownNextHop { requested_forward_scid: short_chan_id }
5814												} else {
5815													HTLCDestination::FailedPayment{ payment_hash }
5816												};
5817
5818												failed_forwards.push((htlc_source, payment_hash,
5819													HTLCFailReason::reason($err_code, $err_data),
5820													reason
5821												));
5822												continue;
5823											}
5824										}
5825										macro_rules! fail_forward {
5826											($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
5827												{
5828													failure_handler!($msg, $err_code, $err_data, $phantom_ss, true);
5829												}
5830											}
5831										}
5832										macro_rules! failed_payment {
5833											($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
5834												{
5835													failure_handler!($msg, $err_code, $err_data, $phantom_ss, false);
5836												}
5837											}
5838										}
5839										if let PendingHTLCRouting::Forward { ref onion_packet, .. } = routing {
5840											let phantom_pubkey_res = self.node_signer.get_node_id(Recipient::PhantomNode);
5841											if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.chain_hash) {
5842												let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes();
5843												let next_hop = match onion_utils::decode_next_payment_hop(
5844													phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac,
5845													payment_hash, None, &*self.node_signer
5846												) {
5847													Ok(res) => res,
5848													Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
5849														let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).to_byte_array();
5850														// In this scenario, the phantom would have sent us an
5851														// `update_fail_malformed_htlc`, meaning here we encrypt the error as
5852														// if it came from us (the second-to-last hop) but contains the sha256
5853														// of the onion.
5854														failed_payment!(err_msg, err_code, sha256_of_onion.to_vec(), None);
5855													},
5856													Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
5857														failed_payment!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
5858													},
5859												};
5860												match next_hop {
5861													onion_utils::Hop::Receive(hop_data) => {
5862														let current_height: u32 = self.best_block.read().unwrap().height;
5863														match create_recv_pending_htlc_info(hop_data,
5864															incoming_shared_secret, payment_hash, outgoing_amt_msat,
5865															outgoing_cltv_value, Some(phantom_shared_secret), false, None,
5866															current_height)
5867														{
5868															Ok(info) => phantom_receives.push((
5869																prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint,
5870																prev_channel_id, prev_user_channel_id, vec![(info, prev_htlc_id)]
5871															)),
5872															Err(InboundHTLCErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
5873														}
5874													},
5875													_ => panic!(),
5876												}
5877											} else {
5878												fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
5879											}
5880										} else {
5881											fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
5882										}
5883									},
5884									HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => {
5885										// Channel went away before we could fail it. This implies
5886										// the channel is now on chain and our counterparty is
5887										// trying to broadcast the HTLC-Timeout, but that's their
5888										// problem, not ours.
5889									}
5890								}
5891							}
5892						}
5893					}
5894					let chan_info_opt = self.short_to_chan_info.read().unwrap().get(&short_chan_id).cloned();
5895					let (counterparty_node_id, forward_chan_id) = match chan_info_opt {
5896						Some((cp_id, chan_id)) => (cp_id, chan_id),
5897						None => {
5898							forwarding_channel_not_found!(pending_forwards.drain(..));
5899							continue;
5900						}
5901					};
5902					forwarding_counterparty = Some(counterparty_node_id);
5903					let per_peer_state = self.per_peer_state.read().unwrap();
5904					let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
5905					if peer_state_mutex_opt.is_none() {
5906						forwarding_channel_not_found!(pending_forwards.drain(..));
5907						continue;
5908					}
5909					let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
5910					let peer_state = &mut *peer_state_lock;
5911					let mut draining_pending_forwards = pending_forwards.drain(..);
5912					while let Some(forward_info) = draining_pending_forwards.next() {
5913						let queue_fail_htlc_res = match forward_info {
5914							HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
5915								prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
5916								prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo {
5917									incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
5918									routing: PendingHTLCRouting::Forward {
5919										ref onion_packet, blinded, incoming_cltv_expiry, ..
5920									}, skimmed_fee_msat, ..
5921								},
5922							}) => {
5923								let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
5924									short_channel_id: prev_short_channel_id,
5925									user_channel_id: Some(prev_user_channel_id),
5926									counterparty_node_id: prev_counterparty_node_id,
5927									channel_id: prev_channel_id,
5928									outpoint: prev_funding_outpoint,
5929									htlc_id: prev_htlc_id,
5930									incoming_packet_shared_secret: incoming_shared_secret,
5931									// Phantom payments are only PendingHTLCRouting::Receive.
5932									phantom_shared_secret: None,
5933									blinded_failure: blinded.map(|b| b.failure),
5934									cltv_expiry: incoming_cltv_expiry,
5935								});
5936								let next_blinding_point = blinded.and_then(|b| {
5937									b.next_blinding_override.or_else(|| {
5938										let encrypted_tlvs_ss = self.node_signer.ecdh(
5939											Recipient::Node, &b.inbound_blinding_point, None
5940										).unwrap().secret_bytes();
5941										onion_utils::next_hop_pubkey(
5942											&self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss
5943										).ok()
5944									})
5945								});
5946
5947								// Forward the HTLC over the most appropriate channel with the corresponding peer,
5948								// applying non-strict forwarding.
5949								// The channel with the least amount of outbound liquidity will be used to maximize the
5950								// probability of being able to successfully forward a subsequent HTLC.
5951								let maybe_optimal_channel = peer_state.channel_by_id.values_mut().filter_map(|phase| match phase {
5952									ChannelPhase::Funded(chan) => {
5953										let balances = chan.context.get_available_balances(&self.fee_estimator);
5954										if outgoing_amt_msat <= balances.next_outbound_htlc_limit_msat &&
5955											outgoing_amt_msat >= balances.next_outbound_htlc_minimum_msat &&
5956											chan.context.is_usable() {
5957											Some((chan, balances))
5958										} else {
5959											None
5960										}
5961									},
5962									_ => None,
5963								}).min_by_key(|(_, balances)| balances.next_outbound_htlc_limit_msat).map(|(c, _)| c);
5964								let optimal_channel = match maybe_optimal_channel {
5965									Some(chan) => chan,
5966									None => {
5967										// Fall back to the specified channel to return an appropriate error.
5968										if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
5969											chan
5970										} else {
5971											forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
5972											break;
5973										}
5974									}
5975								};
5976
5977								let logger = WithChannelContext::from(&self.logger, &optimal_channel.context, Some(payment_hash));
5978								let channel_description = if optimal_channel.context.get_short_channel_id() == Some(short_chan_id) {
5979									"specified"
5980								} else {
5981									"alternate"
5982								};
5983								log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} channel {} with corresponding peer {}",
5984									prev_short_channel_id, &payment_hash, short_chan_id, channel_description, optimal_channel.context.channel_id(), &counterparty_node_id);
5985								if let Err(e) = optimal_channel.queue_add_htlc(outgoing_amt_msat,
5986										payment_hash, outgoing_cltv_value, htlc_source.clone(),
5987										onion_packet.clone(), skimmed_fee_msat, next_blinding_point, &self.fee_estimator,
5988										&&logger)
5989								{
5990									if let ChannelError::Ignore(msg) = e {
5991										log_trace!(logger, "Failed to forward HTLC with payment_hash {} to peer {}: {}", &payment_hash, &counterparty_node_id, msg);
5992									} else {
5993										panic!("Stated return value requirements in send_htlc() were not met");
5994									}
5995
5996									if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
5997										let failure_code = 0x1000|7;
5998										let data = self.get_htlc_inbound_temp_fail_data(failure_code);
5999										failed_forwards.push((htlc_source, payment_hash,
6000											HTLCFailReason::reason(failure_code, data),
6001											HTLCDestination::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id }
6002										));
6003									} else {
6004										forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
6005										break;
6006									}
6007								}
6008								None
6009							},
6010							HTLCForwardInfo::AddHTLC { .. } => {
6011								panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
6012							},
6013							HTLCForwardInfo::FailHTLC { htlc_id, ref err_packet } => {
6014								if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
6015									let logger = WithChannelContext::from(&self.logger, &chan.context, None);
6016									log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
6017									Some((chan.queue_fail_htlc(htlc_id, err_packet.clone(), &&logger), htlc_id))
6018								} else {
6019									forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
6020									break;
6021								}
6022							},
6023							HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
6024								if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
6025									let logger = WithChannelContext::from(&self.logger, &chan.context, None);
6026									log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
6027									let res = chan.queue_fail_malformed_htlc(
6028										htlc_id, failure_code, sha256_of_onion, &&logger
6029									);
6030									Some((res, htlc_id))
6031								} else {
6032									forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
6033									break;
6034								}
6035							},
6036						};
6037						if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
6038							if let Err(e) = queue_fail_htlc_res {
6039								if let ChannelError::Ignore(msg) = e {
6040									if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
6041										let logger = WithChannelContext::from(&self.logger, &chan.context, None);
6042										log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
6043									}
6044								} else {
6045									panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
6046								}
6047								// fail-backs are best-effort, we probably already have one
6048								// pending, and if not that's OK, if not, the channel is on
6049								// the chain and sending the HTLC-Timeout is their problem.
6050							}
6051						}
6052					}
6053				} else {
6054					'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
6055						match forward_info {
6056							HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
6057								prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
6058								prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo {
6059									routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat,
6060									skimmed_fee_msat, ..
6061								}
6062							}) => {
6063								let blinded_failure = routing.blinded_failure();
6064								let (
6065									cltv_expiry, onion_payload, payment_data, payment_context, phantom_shared_secret,
6066									mut onion_fields, has_recipient_created_payment_secret
6067								) = match routing {
6068									PendingHTLCRouting::Receive {
6069										payment_data, payment_metadata, payment_context,
6070										incoming_cltv_expiry, phantom_shared_secret, custom_tlvs,
6071										requires_blinded_error: _
6072									} => {
6073										let _legacy_hop_data = Some(payment_data.clone());
6074										let onion_fields = RecipientOnionFields { payment_secret: Some(payment_data.payment_secret),
6075												payment_metadata, custom_tlvs };
6076										(incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data },
6077											Some(payment_data), payment_context, phantom_shared_secret, onion_fields,
6078											true)
6079									},
6080									PendingHTLCRouting::ReceiveKeysend {
6081										payment_data, payment_preimage, payment_metadata,
6082										incoming_cltv_expiry, custom_tlvs, requires_blinded_error: _,
6083										has_recipient_created_payment_secret,
6084									} => {
6085										let onion_fields = RecipientOnionFields {
6086											payment_secret: payment_data.as_ref().map(|data| data.payment_secret),
6087											payment_metadata,
6088											custom_tlvs,
6089										};
6090										(incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage),
6091											payment_data, None, None, onion_fields, has_recipient_created_payment_secret)
6092									},
6093									_ => {
6094										panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
6095									}
6096								};
6097								let claimable_htlc = ClaimableHTLC {
6098									prev_hop: HTLCPreviousHopData {
6099										short_channel_id: prev_short_channel_id,
6100										user_channel_id: Some(prev_user_channel_id),
6101										counterparty_node_id: prev_counterparty_node_id,
6102										channel_id: prev_channel_id,
6103										outpoint: prev_funding_outpoint,
6104										htlc_id: prev_htlc_id,
6105										incoming_packet_shared_secret: incoming_shared_secret,
6106										phantom_shared_secret,
6107										blinded_failure,
6108										cltv_expiry: Some(cltv_expiry),
6109									},
6110									// We differentiate the received value from the sender intended value
6111									// if possible so that we don't prematurely mark MPP payments complete
6112									// if routing nodes overpay
6113									value: incoming_amt_msat.unwrap_or(outgoing_amt_msat),
6114									sender_intended_value: outgoing_amt_msat,
6115									timer_ticks: 0,
6116									total_value_received: None,
6117									total_msat: if let Some(data) = &payment_data { data.total_msat } else { outgoing_amt_msat },
6118									cltv_expiry,
6119									onion_payload,
6120									counterparty_skimmed_fee_msat: skimmed_fee_msat,
6121								};
6122
6123								let mut committed_to_claimable = false;
6124
6125								macro_rules! fail_htlc {
6126									($htlc: expr, $payment_hash: expr) => {
6127										debug_assert!(!committed_to_claimable);
6128										let mut htlc_msat_height_data = $htlc.value.to_be_bytes().to_vec();
6129										htlc_msat_height_data.extend_from_slice(
6130											&self.best_block.read().unwrap().height.to_be_bytes(),
6131										);
6132										failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
6133												short_channel_id: $htlc.prev_hop.short_channel_id,
6134												user_channel_id: $htlc.prev_hop.user_channel_id,
6135												counterparty_node_id: $htlc.prev_hop.counterparty_node_id,
6136												channel_id: prev_channel_id,
6137												outpoint: prev_funding_outpoint,
6138												htlc_id: $htlc.prev_hop.htlc_id,
6139												incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
6140												phantom_shared_secret,
6141												blinded_failure,
6142												cltv_expiry: Some(cltv_expiry),
6143											}), payment_hash,
6144											HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data),
6145											HTLCDestination::FailedPayment { payment_hash: $payment_hash },
6146										));
6147										continue 'next_forwardable_htlc;
6148									}
6149								}
6150								let phantom_shared_secret = claimable_htlc.prev_hop.phantom_shared_secret;
6151								let mut receiver_node_id = self.our_network_pubkey;
6152								if phantom_shared_secret.is_some() {
6153									receiver_node_id = self.node_signer.get_node_id(Recipient::PhantomNode)
6154										.expect("Failed to get node_id for phantom node recipient");
6155								}
6156
6157								macro_rules! check_total_value {
6158									($purpose: expr) => {{
6159										let mut payment_claimable_generated = false;
6160										let is_keysend = $purpose.is_keysend();
6161										let mut claimable_payments = self.claimable_payments.lock().unwrap();
6162										if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
6163											fail_htlc!(claimable_htlc, payment_hash);
6164										}
6165										let ref mut claimable_payment = claimable_payments.claimable_payments
6166											.entry(payment_hash)
6167											// Note that if we insert here we MUST NOT fail_htlc!()
6168											.or_insert_with(|| {
6169												committed_to_claimable = true;
6170												ClaimablePayment {
6171													purpose: $purpose.clone(), htlcs: Vec::new(), onion_fields: None,
6172												}
6173											});
6174										if $purpose != claimable_payment.purpose {
6175											let log_keysend = |keysend| if keysend { "keysend" } else { "non-keysend" };
6176											log_trace!(self.logger, "Failing new {} HTLC with payment_hash {} as we already had an existing {} HTLC with the same payment hash", log_keysend(is_keysend), &payment_hash, log_keysend(!is_keysend));
6177											fail_htlc!(claimable_htlc, payment_hash);
6178										}
6179										if let Some(earlier_fields) = &mut claimable_payment.onion_fields {
6180											if earlier_fields.check_merge(&mut onion_fields).is_err() {
6181												fail_htlc!(claimable_htlc, payment_hash);
6182											}
6183										} else {
6184											claimable_payment.onion_fields = Some(onion_fields);
6185										}
6186										let mut total_value = claimable_htlc.sender_intended_value;
6187										let mut earliest_expiry = claimable_htlc.cltv_expiry;
6188										for htlc in claimable_payment.htlcs.iter() {
6189											total_value += htlc.sender_intended_value;
6190											earliest_expiry = cmp::min(earliest_expiry, htlc.cltv_expiry);
6191											if htlc.total_msat != claimable_htlc.total_msat {
6192												log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
6193													&payment_hash, claimable_htlc.total_msat, htlc.total_msat);
6194												total_value = msgs::MAX_VALUE_MSAT;
6195											}
6196											if total_value >= msgs::MAX_VALUE_MSAT { break; }
6197										}
6198										// The condition determining whether an MPP is complete must
6199										// match exactly the condition used in `timer_tick_occurred`
6200										if total_value >= msgs::MAX_VALUE_MSAT {
6201											fail_htlc!(claimable_htlc, payment_hash);
6202										} else if total_value - claimable_htlc.sender_intended_value >= claimable_htlc.total_msat {
6203											log_trace!(self.logger, "Failing HTLC with payment_hash {} as payment is already claimable",
6204												&payment_hash);
6205											fail_htlc!(claimable_htlc, payment_hash);
6206										} else if total_value >= claimable_htlc.total_msat {
6207											#[allow(unused_assignments)] {
6208												committed_to_claimable = true;
6209											}
6210											claimable_payment.htlcs.push(claimable_htlc);
6211											let amount_msat =
6212												claimable_payment.htlcs.iter().map(|htlc| htlc.value).sum();
6213											claimable_payment.htlcs.iter_mut()
6214												.for_each(|htlc| htlc.total_value_received = Some(amount_msat));
6215											let counterparty_skimmed_fee_msat = claimable_payment.htlcs.iter()
6216												.map(|htlc| htlc.counterparty_skimmed_fee_msat.unwrap_or(0)).sum();
6217											debug_assert!(total_value.saturating_sub(amount_msat) <=
6218												counterparty_skimmed_fee_msat);
6219											claimable_payment.htlcs.sort();
6220											let payment_id =
6221												claimable_payment.inbound_payment_id(&self.inbound_payment_id_secret);
6222											new_events.push_back((events::Event::PaymentClaimable {
6223												receiver_node_id: Some(receiver_node_id),
6224												payment_hash,
6225												purpose: $purpose,
6226												amount_msat,
6227												counterparty_skimmed_fee_msat,
6228												via_channel_id: Some(prev_channel_id),
6229												via_user_channel_id: Some(prev_user_channel_id),
6230												claim_deadline: Some(earliest_expiry - HTLC_FAIL_BACK_BUFFER),
6231												onion_fields: claimable_payment.onion_fields.clone(),
6232												payment_id: Some(payment_id),
6233											}, None));
6234											payment_claimable_generated = true;
6235										} else {
6236											// Nothing to do - we haven't reached the total
6237											// payment value yet, wait until we receive more
6238											// MPP parts.
6239											claimable_payment.htlcs.push(claimable_htlc);
6240											#[allow(unused_assignments)] {
6241												committed_to_claimable = true;
6242											}
6243										}
6244										payment_claimable_generated
6245									}}
6246								}
6247
6248								// Check that the payment hash and secret are known. Note that we
6249								// MUST take care to handle the "unknown payment hash" and
6250								// "incorrect payment secret" cases here identically or we'd expose
6251								// that we are the ultimate recipient of the given payment hash.
6252								// Further, we must not expose whether we have any other HTLCs
6253								// associated with the same payment_hash pending or not.
6254								let payment_preimage = if has_recipient_created_payment_secret {
6255									if let Some(ref payment_data) = payment_data {
6256										let (payment_preimage, min_final_cltv_expiry_delta) = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) {
6257											Ok(result) => result,
6258											Err(()) => {
6259												log_trace!(self.logger, "Failing new HTLC with payment_hash {} as payment verification failed", &payment_hash);
6260												fail_htlc!(claimable_htlc, payment_hash);
6261											}
6262										};
6263										if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta {
6264											let expected_min_expiry_height = (self.current_best_block().height + min_final_cltv_expiry_delta as u32) as u64;
6265											if (cltv_expiry as u64) < expected_min_expiry_height {
6266												log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})",
6267												&payment_hash, cltv_expiry, expected_min_expiry_height);
6268												fail_htlc!(claimable_htlc, payment_hash);
6269											}
6270										}
6271										payment_preimage
6272									} else { fail_htlc!(claimable_htlc, payment_hash); }
6273								} else { None };
6274								match claimable_htlc.onion_payload {
6275									OnionPayload::Invoice { .. } => {
6276										let payment_data = payment_data.unwrap();
6277										let purpose = events::PaymentPurpose::from_parts(
6278											payment_preimage,
6279											payment_data.payment_secret,
6280											payment_context,
6281										);
6282										check_total_value!(purpose);
6283									},
6284									OnionPayload::Spontaneous(preimage) => {
6285										let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
6286										check_total_value!(purpose);
6287									}
6288								}
6289							},
6290							HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => {
6291								panic!("Got pending fail of our own HTLC");
6292							}
6293						}
6294					}
6295				}
6296			}
6297		}
6298
6299		let best_block_height = self.best_block.read().unwrap().height;
6300		self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(),
6301			|| self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
6302			&self.pending_events, &self.logger, |args| self.send_payment_along_path(args));
6303
6304		for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
6305			self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
6306		}
6307		self.forward_htlcs(&mut phantom_receives);
6308
6309		// Freeing the holding cell here is relatively redundant - in practice we'll do it when we
6310		// next get a `get_and_clear_pending_msg_events` call, but some tests rely on it, and it's
6311		// nice to do the work now if we can rather than while we're trying to get messages in the
6312		// network stack.
6313		self.check_free_holding_cells();
6314
6315		if new_events.is_empty() { return }
6316		let mut events = self.pending_events.lock().unwrap();
6317		events.append(&mut new_events);
6318	}
6319
6320	/// Free the background events, generally called from [`PersistenceNotifierGuard`] constructors.
6321	///
6322	/// Expects the caller to have a total_consistency_lock read lock.
6323	fn process_background_events(&self) -> NotifyOption {
6324		debug_assert_ne!(self.total_consistency_lock.held_by_thread(), LockHeldState::NotHeldByThread);
6325
6326		self.background_events_processed_since_startup.store(true, Ordering::Release);
6327
6328		let mut background_events = Vec::new();
6329		mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events);
6330		if background_events.is_empty() {
6331			return NotifyOption::SkipPersistNoEvents;
6332		}
6333
6334		for event in background_events.drain(..) {
6335			match event {
6336				BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, _channel_id, update)) => {
6337					// The channel has already been closed, so no use bothering to care about the
6338					// monitor updating completing.
6339					let _ = self.chain_monitor.update_channel(funding_txo, &update);
6340				},
6341				BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
6342					self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
6343				},
6344				BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
6345					let per_peer_state = self.per_peer_state.read().unwrap();
6346					if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
6347						let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6348						let peer_state = &mut *peer_state_lock;
6349						if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
6350							handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
6351						} else {
6352							let update_actions = peer_state.monitor_update_blocked_actions
6353								.remove(&channel_id).unwrap_or(Vec::new());
6354							mem::drop(peer_state_lock);
6355							mem::drop(per_peer_state);
6356							self.handle_monitor_update_completion_actions(update_actions);
6357						}
6358					}
6359				},
6360			}
6361		}
6362		NotifyOption::DoPersist
6363	}
6364
6365	#[cfg(any(test, feature = "_test_utils"))]
6366	/// Process background events, for functional testing
6367	pub fn test_process_background_events(&self) {
6368		let _lck = self.total_consistency_lock.read().unwrap();
6369		let _ = self.process_background_events();
6370	}
6371
6372	fn update_channel_fee(&self, chan_id: &ChannelId, chan: &mut Channel<SP>, new_feerate: u32) -> NotifyOption {
6373		if !chan.context.is_outbound() { return NotifyOption::SkipPersistNoEvents; }
6374
6375		let logger = WithChannelContext::from(&self.logger, &chan.context, None);
6376
6377		// If the feerate has decreased by less than half, don't bother
6378		if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
6379			return NotifyOption::SkipPersistNoEvents;
6380		}
6381		if !chan.context.is_live() {
6382			log_trace!(logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
6383				chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
6384			return NotifyOption::SkipPersistNoEvents;
6385		}
6386		log_trace!(logger, "Channel {} qualifies for a feerate change from {} to {}.",
6387			&chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
6388
6389		chan.queue_update_fee(new_feerate, &self.fee_estimator, &&logger);
6390		NotifyOption::DoPersist
6391	}
6392
6393	#[cfg(fuzzing)]
6394	/// In chanmon_consistency we want to sometimes do the channel fee updates done in
6395	/// timer_tick_occurred, but we can't generate the disabled channel updates as it considers
6396	/// these a fuzz failure (as they usually indicate a channel force-close, which is exactly what
6397	/// it wants to detect). Thus, we have a variant exposed here for its benefit.
6398	pub fn maybe_update_chan_fees(&self) {
6399		PersistenceNotifierGuard::optionally_notify(self, || {
6400			let mut should_persist = NotifyOption::SkipPersistNoEvents;
6401
6402			let non_anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6403			let anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee);
6404
6405			let per_peer_state = self.per_peer_state.read().unwrap();
6406			for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
6407				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6408				let peer_state = &mut *peer_state_lock;
6409				for (chan_id, chan) in peer_state.channel_by_id.iter_mut().filter_map(
6410					|(chan_id, phase)| if let ChannelPhase::Funded(chan) = phase { Some((chan_id, chan)) } else { None }
6411				) {
6412					let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6413						anchor_feerate
6414					} else {
6415						non_anchor_feerate
6416					};
6417					let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
6418					if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
6419				}
6420			}
6421
6422			should_persist
6423		});
6424	}
6425
6426	/// Performs actions which should happen on startup and roughly once per minute thereafter.
6427	///
6428	/// This currently includes:
6429	///  * Increasing or decreasing the on-chain feerate estimates for our outbound channels,
6430	///  * Broadcasting [`ChannelUpdate`] messages if we've been disconnected from our peer for more
6431	///    than a minute, informing the network that they should no longer attempt to route over
6432	///    the channel.
6433	///  * Expiring a channel's previous [`ChannelConfig`] if necessary to only allow forwarding HTLCs
6434	///    with the current [`ChannelConfig`].
6435	///  * Removing peers which have disconnected but and no longer have any channels.
6436	///  * Force-closing and removing channels which have not completed establishment in a timely manner.
6437	///  * Forgetting about stale outbound payments, either those that have already been fulfilled
6438	///    or those awaiting an invoice that hasn't been delivered in the necessary amount of time.
6439	///    The latter is determined using the system clock in `std` and the highest seen block time
6440	///    minus two hours in non-`std`.
6441	///
6442	/// Note that this may cause reentrancy through [`chain::Watch::update_channel`] calls or feerate
6443	/// estimate fetches.
6444	///
6445	/// [`ChannelUpdate`]: msgs::ChannelUpdate
6446	/// [`ChannelConfig`]: crate::util::config::ChannelConfig
6447	pub fn timer_tick_occurred(&self) {
6448		PersistenceNotifierGuard::optionally_notify(self, || {
6449			let mut should_persist = NotifyOption::SkipPersistNoEvents;
6450
6451			let non_anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6452			let anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee);
6453
6454			let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
6455			let mut timed_out_mpp_htlcs = Vec::new();
6456			let mut pending_peers_awaiting_removal = Vec::new();
6457			let mut shutdown_channels = Vec::new();
6458
6459			macro_rules! process_unfunded_channel_tick {
6460				($peer_state: expr, $chan: expr, $pending_msg_events: expr) => { {
6461					let context = &mut $chan.context;
6462					context.maybe_expire_prev_config();
6463					if $chan.unfunded_context.should_expire_unfunded_channel() {
6464						let logger = WithChannelContext::from(&self.logger, context, None);
6465						log_error!(logger,
6466							"Force-closing pending channel with ID {} for not establishing in a timely manner",
6467							context.channel_id());
6468						let mut close_res = context.force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) });
6469						locked_close_channel!(self, $peer_state, context, close_res);
6470						shutdown_channels.push(close_res);
6471						$pending_msg_events.push(MessageSendEvent::HandleError {
6472							node_id: context.get_counterparty_node_id(),
6473							action: msgs::ErrorAction::SendErrorMessage {
6474								msg: msgs::ErrorMessage {
6475									channel_id: context.channel_id(),
6476									data: "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(),
6477								},
6478							},
6479						});
6480						false
6481					} else {
6482						true
6483					}
6484				} }
6485			}
6486
6487			{
6488				let per_peer_state = self.per_peer_state.read().unwrap();
6489				for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
6490					let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6491					let peer_state = &mut *peer_state_lock;
6492					let pending_msg_events = &mut peer_state.pending_msg_events;
6493					let counterparty_node_id = *counterparty_node_id;
6494					peer_state.channel_by_id.retain(|chan_id, phase| {
6495						match phase {
6496							ChannelPhase::Funded(chan) => {
6497								let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6498									anchor_feerate
6499								} else {
6500									non_anchor_feerate
6501								};
6502								let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
6503								if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
6504
6505								if let Err(e) = chan.timer_check_closing_negotiation_progress() {
6506									let (needs_close, err) = convert_chan_phase_err!(self, peer_state, e, chan, chan_id, FUNDED_CHANNEL);
6507									handle_errors.push((Err(err), counterparty_node_id));
6508									if needs_close { return false; }
6509								}
6510
6511								match chan.channel_update_status() {
6512									ChannelUpdateStatus::Enabled if !chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)),
6513									ChannelUpdateStatus::Disabled if chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)),
6514									ChannelUpdateStatus::DisabledStaged(_) if chan.context.is_live()
6515										=> chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
6516									ChannelUpdateStatus::EnabledStaged(_) if !chan.context.is_live()
6517										=> chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
6518									ChannelUpdateStatus::DisabledStaged(mut n) if !chan.context.is_live() => {
6519										n += 1;
6520										if n >= DISABLE_GOSSIP_TICKS {
6521											chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
6522											if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
6523												let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
6524												pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
6525													msg: update
6526												});
6527											}
6528											should_persist = NotifyOption::DoPersist;
6529										} else {
6530											chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(n));
6531										}
6532									},
6533									ChannelUpdateStatus::EnabledStaged(mut n) if chan.context.is_live() => {
6534										n += 1;
6535										if n >= ENABLE_GOSSIP_TICKS {
6536											chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
6537											if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
6538												let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
6539												pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
6540													msg: update
6541												});
6542											}
6543											should_persist = NotifyOption::DoPersist;
6544										} else {
6545											chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(n));
6546										}
6547									},
6548									_ => {},
6549								}
6550
6551								chan.context.maybe_expire_prev_config();
6552
6553								if chan.should_disconnect_peer_awaiting_response() {
6554									let logger = WithChannelContext::from(&self.logger, &chan.context, None);
6555									log_debug!(logger, "Disconnecting peer {} due to not making any progress on channel {}",
6556											counterparty_node_id, chan_id);
6557									pending_msg_events.push(MessageSendEvent::HandleError {
6558										node_id: counterparty_node_id,
6559										action: msgs::ErrorAction::DisconnectPeerWithWarning {
6560											msg: msgs::WarningMessage {
6561												channel_id: *chan_id,
6562												data: "Disconnecting due to timeout awaiting response".to_owned(),
6563											},
6564										},
6565									});
6566								}
6567
6568								true
6569							},
6570							ChannelPhase::UnfundedInboundV1(chan) => {
6571								process_unfunded_channel_tick!(peer_state, chan, pending_msg_events)
6572							},
6573							ChannelPhase::UnfundedOutboundV1(chan) => {
6574								process_unfunded_channel_tick!(peer_state, chan, pending_msg_events)
6575							},
6576							ChannelPhase::UnfundedInboundV2(chan) => {
6577								process_unfunded_channel_tick!(peer_state, chan, pending_msg_events)
6578							},
6579							ChannelPhase::UnfundedOutboundV2(chan) => {
6580								process_unfunded_channel_tick!(peer_state, chan, pending_msg_events)
6581							},
6582						}
6583					});
6584
6585					for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() {
6586						if { req.ticks_remaining -= 1 ; req.ticks_remaining } <= 0 {
6587							let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*chan_id), None);
6588							log_error!(logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id);
6589							peer_state.pending_msg_events.push(
6590								events::MessageSendEvent::HandleError {
6591									node_id: counterparty_node_id,
6592									action: msgs::ErrorAction::SendErrorMessage {
6593										msg: msgs::ErrorMessage { channel_id: chan_id.clone(), data: "Channel force-closed".to_owned() }
6594									},
6595								}
6596							);
6597						}
6598					}
6599					peer_state.inbound_channel_request_by_id.retain(|_, req| req.ticks_remaining > 0);
6600
6601					if peer_state.ok_to_remove(true) {
6602						pending_peers_awaiting_removal.push(counterparty_node_id);
6603					}
6604				}
6605			}
6606
6607			// When a peer disconnects but still has channels, the peer's `peer_state` entry in the
6608			// `per_peer_state` is not removed by the `peer_disconnected` function. If the channels
6609			// of to that peer is later closed while still being disconnected (i.e. force closed),
6610			// we therefore need to remove the peer from `peer_state` separately.
6611			// To avoid having to take the `per_peer_state` `write` lock once the channels are
6612			// closed, we instead remove such peers awaiting removal here on a timer, to limit the
6613			// negative effects on parallelism as much as possible.
6614			if pending_peers_awaiting_removal.len() > 0 {
6615				let mut per_peer_state = self.per_peer_state.write().unwrap();
6616				for counterparty_node_id in pending_peers_awaiting_removal {
6617					match per_peer_state.entry(counterparty_node_id) {
6618						hash_map::Entry::Occupied(entry) => {
6619							// Remove the entry if the peer is still disconnected and we still
6620							// have no channels to the peer.
6621							let remove_entry = {
6622								let peer_state = entry.get().lock().unwrap();
6623								peer_state.ok_to_remove(true)
6624							};
6625							if remove_entry {
6626								entry.remove_entry();
6627							}
6628						},
6629						hash_map::Entry::Vacant(_) => { /* The PeerState has already been removed */ }
6630					}
6631				}
6632			}
6633
6634			self.claimable_payments.lock().unwrap().claimable_payments.retain(|payment_hash, payment| {
6635				if payment.htlcs.is_empty() {
6636					// This should be unreachable
6637					debug_assert!(false);
6638					return false;
6639				}
6640				if let OnionPayload::Invoice { .. } = payment.htlcs[0].onion_payload {
6641					// Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
6642					// In this case we're not going to handle any timeouts of the parts here.
6643					// This condition determining whether the MPP is complete here must match
6644					// exactly the condition used in `process_pending_htlc_forwards`.
6645					if payment.htlcs[0].total_msat <= payment.htlcs.iter()
6646						.fold(0, |total, htlc| total + htlc.sender_intended_value)
6647					{
6648						return true;
6649					} else if payment.htlcs.iter_mut().any(|htlc| {
6650						htlc.timer_ticks += 1;
6651						return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
6652					}) {
6653						timed_out_mpp_htlcs.extend(payment.htlcs.drain(..)
6654							.map(|htlc: ClaimableHTLC| (htlc.prev_hop, *payment_hash)));
6655						return false;
6656					}
6657				}
6658				true
6659			});
6660
6661			for htlc_source in timed_out_mpp_htlcs.drain(..) {
6662				let source = HTLCSource::PreviousHopData(htlc_source.0.clone());
6663				let reason = HTLCFailReason::from_failure_code(23);
6664				let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
6665				self.fail_htlc_backwards_internal(&source, &htlc_source.1, &reason, receiver);
6666			}
6667
6668			for (err, counterparty_node_id) in handle_errors.drain(..) {
6669				let _ = handle_error!(self, err, counterparty_node_id);
6670			}
6671
6672			for shutdown_res in shutdown_channels {
6673				self.finish_close_channel(shutdown_res);
6674			}
6675
6676			#[cfg(feature = "std")]
6677			let duration_since_epoch = std::time::SystemTime::now()
6678				.duration_since(std::time::SystemTime::UNIX_EPOCH)
6679				.expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
6680			#[cfg(not(feature = "std"))]
6681			let duration_since_epoch = Duration::from_secs(
6682				self.highest_seen_timestamp.load(Ordering::Acquire).saturating_sub(7200) as u64
6683			);
6684
6685			self.pending_outbound_payments.remove_stale_payments(
6686				duration_since_epoch, &self.pending_events
6687			);
6688
6689			// Technically we don't need to do this here, but if we have holding cell entries in a
6690			// channel that need freeing, it's better to do that here and block a background task
6691			// than block the message queueing pipeline.
6692			if self.check_free_holding_cells() {
6693				should_persist = NotifyOption::DoPersist;
6694			}
6695
6696			should_persist
6697		});
6698	}
6699
6700	/// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect
6701	/// after a PaymentClaimable event, failing the HTLC back to its origin and freeing resources
6702	/// along the path (including in our own channel on which we received it).
6703	///
6704	/// Note that in some cases around unclean shutdown, it is possible the payment may have
6705	/// already been claimed by you via [`ChannelManager::claim_funds`] prior to you seeing (a
6706	/// second copy of) the [`events::Event::PaymentClaimable`] event. Alternatively, the payment
6707	/// may have already been failed automatically by LDK if it was nearing its expiration time.
6708	///
6709	/// While LDK will never claim a payment automatically on your behalf (i.e. without you calling
6710	/// [`ChannelManager::claim_funds`]), you should still monitor for
6711	/// [`events::Event::PaymentClaimed`] events even for payments you intend to fail, especially on
6712	/// startup during which time claims that were in-progress at shutdown may be replayed.
6713	pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
6714		self.fail_htlc_backwards_with_reason(payment_hash, FailureCode::IncorrectOrUnknownPaymentDetails);
6715	}
6716
6717	/// This is a variant of [`ChannelManager::fail_htlc_backwards`] that allows you to specify the
6718	/// reason for the failure.
6719	///
6720	/// See [`FailureCode`] for valid failure codes.
6721	pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: FailureCode) {
6722		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
6723
6724		let removed_source = self.claimable_payments.lock().unwrap().claimable_payments.remove(payment_hash);
6725		if let Some(payment) = removed_source {
6726			for htlc in payment.htlcs {
6727				let reason = self.get_htlc_fail_reason_from_failure_code(failure_code, &htlc);
6728				let source = HTLCSource::PreviousHopData(htlc.prev_hop);
6729				let receiver = HTLCDestination::FailedPayment { payment_hash: *payment_hash };
6730				self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
6731			}
6732		}
6733	}
6734
6735	/// Gets error data to form an [`HTLCFailReason`] given a [`FailureCode`] and [`ClaimableHTLC`].
6736	fn get_htlc_fail_reason_from_failure_code(&self, failure_code: FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason {
6737		match failure_code {
6738			FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(failure_code.into()),
6739			FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code.into()),
6740			FailureCode::IncorrectOrUnknownPaymentDetails => {
6741				let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
6742				htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes());
6743				HTLCFailReason::reason(failure_code.into(), htlc_msat_height_data)
6744			},
6745			FailureCode::InvalidOnionPayload(data) => {
6746				let fail_data = match data {
6747					Some((typ, offset)) => [BigSize(typ).encode(), offset.encode()].concat(),
6748					None => Vec::new(),
6749				};
6750				HTLCFailReason::reason(failure_code.into(), fail_data)
6751			}
6752		}
6753	}
6754
6755	/// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code
6756	/// that we want to return and a channel.
6757	///
6758	/// This is for failures on the channel on which the HTLC was *received*, not failures
6759	/// forwarding
6760	fn get_htlc_inbound_temp_fail_data(&self, err_code: u16) -> Vec<u8> {
6761		debug_assert_eq!(err_code & 0x1000, 0x1000);
6762		debug_assert_ne!(err_code, 0x1000|11);
6763		debug_assert_ne!(err_code, 0x1000|12);
6764		debug_assert_ne!(err_code, 0x1000|13);
6765		// at capacity, we write fields `disabled_flags` and `len`
6766		let mut enc = VecWriter(Vec::with_capacity(4));
6767		if err_code == 0x1000 | 20 {
6768			// No flags for `disabled_flags` are currently defined so they're always two zero bytes.
6769			// See https://github.com/lightning/bolts/blob/341ec84/04-onion-routing.md?plain=1#L1008
6770			0u16.write(&mut enc).expect("Writes cannot fail");
6771		}
6772		// See https://github.com/lightning/bolts/blob/247e83d/04-onion-routing.md?plain=1#L1414-L1415
6773		(0u16).write(&mut enc).expect("Writes cannot fail");
6774		enc.0
6775	}
6776
6777	// Fail a list of HTLCs that were just freed from the holding cell. The HTLCs need to be
6778	// failed backwards or, if they were one of our outgoing HTLCs, then their failure needs to
6779	// be surfaced to the user.
6780	fn fail_holding_cell_htlcs(
6781		&self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: ChannelId,
6782		counterparty_node_id: &PublicKey
6783	) {
6784		let (failure_code, onion_failure_data) = {
6785			let per_peer_state = self.per_peer_state.read().unwrap();
6786			if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
6787				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6788				let peer_state = &mut *peer_state_lock;
6789				match peer_state.channel_by_id.entry(channel_id) {
6790					hash_map::Entry::Occupied(chan_phase_entry) => {
6791						if let ChannelPhase::Funded(_chan) = chan_phase_entry.get() {
6792							let failure_code = 0x1000|7;
6793							let data = self.get_htlc_inbound_temp_fail_data(failure_code);
6794							(failure_code, data)
6795						} else {
6796							// We shouldn't be trying to fail holding cell HTLCs on an unfunded channel.
6797							debug_assert!(false);
6798							(0x4000|10, Vec::new())
6799						}
6800					},
6801					hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
6802				}
6803			} else { (0x4000|10, Vec::new()) }
6804		};
6805
6806		for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
6807			let reason = HTLCFailReason::reason(failure_code, onion_failure_data.clone());
6808			let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id };
6809			self.fail_htlc_backwards_internal(&htlc_src, &payment_hash, &reason, receiver);
6810		}
6811	}
6812
6813	fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
6814		let push_forward_event = self.fail_htlc_backwards_internal_without_forward_event(source, payment_hash, onion_error, destination);
6815		if push_forward_event { self.push_pending_forwards_ev(); }
6816	}
6817
6818	/// Fails an HTLC backwards to the sender of it to us.
6819	/// Note that we do not assume that channels corresponding to failed HTLCs are still available.
6820	fn fail_htlc_backwards_internal_without_forward_event(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) -> bool {
6821		// Ensure that no peer state channel storage lock is held when calling this function.
6822		// This ensures that future code doesn't introduce a lock-order requirement for
6823		// `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
6824		// this function with any `per_peer_state` peer lock acquired would.
6825		#[cfg(debug_assertions)]
6826		for (_, peer) in self.per_peer_state.read().unwrap().iter() {
6827			debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
6828		}
6829
6830		//TODO: There is a timing attack here where if a node fails an HTLC back to us they can
6831		//identify whether we sent it or not based on the (I presume) very different runtime
6832		//between the branches here. We should make this async and move it into the forward HTLCs
6833		//timer handling.
6834
6835		// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
6836		// from block_connected which may run during initialization prior to the chain_monitor
6837		// being fully configured. See the docs for `ChannelManagerReadArgs` for more.
6838		let mut push_forward_event;
6839		match source {
6840			HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => {
6841				push_forward_event = self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path,
6842					session_priv, payment_id, self.probing_cookie_secret, &self.secp_ctx,
6843					&self.pending_events, &self.logger);
6844			},
6845			HTLCSource::PreviousHopData(HTLCPreviousHopData {
6846				ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret,
6847				ref phantom_shared_secret, outpoint: _, ref blinded_failure, ref channel_id, ..
6848			}) => {
6849				log_trace!(
6850					WithContext::from(&self.logger, None, Some(*channel_id), Some(*payment_hash)),
6851					"Failing {}HTLC with payment_hash {} backwards from us: {:?}",
6852					if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error
6853				);
6854				let failure = match blinded_failure {
6855					Some(BlindedFailure::FromIntroductionNode) => {
6856						let blinded_onion_error = HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32]);
6857						let err_packet = blinded_onion_error.get_encrypted_failure_packet(
6858							incoming_packet_shared_secret, phantom_shared_secret
6859						);
6860						HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }
6861					},
6862					Some(BlindedFailure::FromBlindedNode) => {
6863						HTLCForwardInfo::FailMalformedHTLC {
6864							htlc_id: *htlc_id,
6865							failure_code: INVALID_ONION_BLINDING,
6866							sha256_of_onion: [0; 32]
6867						}
6868					},
6869					None => {
6870						let err_packet = onion_error.get_encrypted_failure_packet(
6871							incoming_packet_shared_secret, phantom_shared_secret
6872						);
6873						HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }
6874					}
6875				};
6876
6877				push_forward_event = self.decode_update_add_htlcs.lock().unwrap().is_empty();
6878				let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
6879				push_forward_event &= forward_htlcs.is_empty();
6880				match forward_htlcs.entry(*short_channel_id) {
6881					hash_map::Entry::Occupied(mut entry) => {
6882						entry.get_mut().push(failure);
6883					},
6884					hash_map::Entry::Vacant(entry) => {
6885						entry.insert(vec!(failure));
6886					}
6887				}
6888				mem::drop(forward_htlcs);
6889				let mut pending_events = self.pending_events.lock().unwrap();
6890				pending_events.push_back((events::Event::HTLCHandlingFailed {
6891					prev_channel_id: *channel_id,
6892					failed_next_destination: destination,
6893				}, None));
6894			},
6895		}
6896		push_forward_event
6897	}
6898
6899	/// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any
6900	/// [`MessageSendEvent`]s needed to claim the payment.
6901	///
6902	/// This method is guaranteed to ensure the payment has been claimed but only if the current
6903	/// height is strictly below [`Event::PaymentClaimable::claim_deadline`]. To avoid race
6904	/// conditions, you should wait for an [`Event::PaymentClaimed`] before considering the payment
6905	/// successful. It will generally be available in the next [`process_pending_events`] call.
6906	///
6907	/// Note that if you did not set an `amount_msat` when calling [`create_inbound_payment`] or
6908	/// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentClaimable`
6909	/// event matches your expectation. If you fail to do so and call this method, you may provide
6910	/// the sender "proof-of-payment" when they did not fulfill the full expected payment.
6911	///
6912	/// This function will fail the payment if it has custom TLVs with even type numbers, as we
6913	/// will assume they are unknown. If you intend to accept even custom TLVs, you should use
6914	/// [`claim_funds_with_known_custom_tlvs`].
6915	///
6916	/// [`Event::PaymentClaimable`]: crate::events::Event::PaymentClaimable
6917	/// [`Event::PaymentClaimable::claim_deadline`]: crate::events::Event::PaymentClaimable::claim_deadline
6918	/// [`Event::PaymentClaimed`]: crate::events::Event::PaymentClaimed
6919	/// [`process_pending_events`]: EventsProvider::process_pending_events
6920	/// [`create_inbound_payment`]: Self::create_inbound_payment
6921	/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
6922	/// [`claim_funds_with_known_custom_tlvs`]: Self::claim_funds_with_known_custom_tlvs
6923	pub fn claim_funds(&self, payment_preimage: PaymentPreimage) {
6924		self.claim_payment_internal(payment_preimage, false);
6925	}
6926
6927	/// This is a variant of [`claim_funds`] that allows accepting a payment with custom TLVs with
6928	/// even type numbers.
6929	///
6930	/// # Note
6931	///
6932	/// You MUST check you've understood all even TLVs before using this to
6933	/// claim, otherwise you may unintentionally agree to some protocol you do not understand.
6934	///
6935	/// [`claim_funds`]: Self::claim_funds
6936	pub fn claim_funds_with_known_custom_tlvs(&self, payment_preimage: PaymentPreimage) {
6937		self.claim_payment_internal(payment_preimage, true);
6938	}
6939
6940	fn claim_payment_internal(&self, payment_preimage: PaymentPreimage, custom_tlvs_known: bool) {
6941		let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).to_byte_array());
6942
6943		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
6944
6945		let (sources, claiming_payment) = {
6946			let res = self.claimable_payments.lock().unwrap().begin_claiming_payment(
6947				payment_hash, &self.node_signer, &self.logger, &self.inbound_payment_id_secret,
6948				custom_tlvs_known,
6949			);
6950
6951			match res {
6952				Ok((htlcs, payment_info)) => (htlcs, payment_info),
6953				Err(htlcs) => {
6954					for htlc in htlcs {
6955						let reason = self.get_htlc_fail_reason_from_failure_code(FailureCode::InvalidOnionPayload(None), &htlc);
6956						let source = HTLCSource::PreviousHopData(htlc.prev_hop);
6957						let receiver = HTLCDestination::FailedPayment { payment_hash };
6958						self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
6959					}
6960					return;
6961				}
6962			}
6963		};
6964		debug_assert!(!sources.is_empty());
6965
6966		// Just in case one HTLC has been failed between when we generated the `PaymentClaimable`
6967		// and when we got here we need to check that the amount we're about to claim matches the
6968		// amount we told the user in the last `PaymentClaimable`. We also do a sanity-check that
6969		// the MPP parts all have the same `total_msat`.
6970		let mut claimable_amt_msat = 0;
6971		let mut prev_total_msat = None;
6972		let mut expected_amt_msat = None;
6973		let mut valid_mpp = true;
6974		let mut errs = Vec::new();
6975		let per_peer_state = self.per_peer_state.read().unwrap();
6976		for htlc in sources.iter() {
6977			if prev_total_msat.is_some() && prev_total_msat != Some(htlc.total_msat) {
6978				log_error!(self.logger, "Somehow ended up with an MPP payment with different expected total amounts - this should not be reachable!");
6979				debug_assert!(false);
6980				valid_mpp = false;
6981				break;
6982			}
6983			prev_total_msat = Some(htlc.total_msat);
6984
6985			if expected_amt_msat.is_some() && expected_amt_msat != htlc.total_value_received {
6986				log_error!(self.logger, "Somehow ended up with an MPP payment with different received total amounts - this should not be reachable!");
6987				debug_assert!(false);
6988				valid_mpp = false;
6989				break;
6990			}
6991			expected_amt_msat = htlc.total_value_received;
6992			claimable_amt_msat += htlc.value;
6993		}
6994		mem::drop(per_peer_state);
6995		if sources.is_empty() || expected_amt_msat.is_none() {
6996			self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
6997			log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!");
6998			return;
6999		}
7000		if claimable_amt_msat != expected_amt_msat.unwrap() {
7001			self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
7002			log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.",
7003				expected_amt_msat.unwrap(), claimable_amt_msat);
7004			return;
7005		}
7006		if valid_mpp {
7007			let mpp_parts: Vec<_> = sources.iter().filter_map(|htlc| {
7008				if let Some(cp_id) = htlc.prev_hop.counterparty_node_id {
7009					Some(MPPClaimHTLCSource {
7010						counterparty_node_id: cp_id,
7011						funding_txo: htlc.prev_hop.outpoint,
7012						channel_id: htlc.prev_hop.channel_id,
7013						htlc_id: htlc.prev_hop.htlc_id,
7014					})
7015				} else {
7016					None
7017				}
7018			}).collect();
7019			let pending_mpp_claim_ptr_opt = if sources.len() > 1 {
7020				Some(Arc::new(Mutex::new(PendingMPPClaim {
7021					channels_without_preimage: mpp_parts.clone(),
7022					channels_with_preimage: Vec::new(),
7023				})))
7024			} else {
7025				None
7026			};
7027			let payment_info = Some(PaymentClaimDetails { mpp_parts, claiming_payment });
7028			for htlc in sources {
7029				let this_mpp_claim = pending_mpp_claim_ptr_opt.as_ref().and_then(|pending_mpp_claim|
7030					if let Some(cp_id) = htlc.prev_hop.counterparty_node_id {
7031						let claim_ptr = PendingMPPClaimPointer(Arc::clone(pending_mpp_claim));
7032						Some((cp_id, htlc.prev_hop.channel_id, htlc.prev_hop.htlc_id, claim_ptr))
7033					} else {
7034						None
7035					}
7036				);
7037				let raa_blocker = pending_mpp_claim_ptr_opt.as_ref().map(|pending_claim| {
7038					RAAMonitorUpdateBlockingAction::ClaimedMPPPayment {
7039						pending_claim: PendingMPPClaimPointer(Arc::clone(pending_claim)),
7040					}
7041				});
7042				self.claim_funds_from_hop(
7043					htlc.prev_hop, payment_preimage, payment_info.clone(),
7044					|_, definitely_duplicate| {
7045						debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment");
7046						(Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim: this_mpp_claim }), raa_blocker)
7047					}
7048				);
7049			}
7050		} else {
7051			for htlc in sources {
7052				let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
7053				htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes());
7054				let source = HTLCSource::PreviousHopData(htlc.prev_hop);
7055				let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data);
7056				let receiver = HTLCDestination::FailedPayment { payment_hash };
7057				self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
7058			}
7059			self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
7060		}
7061
7062		// Now we can handle any errors which were generated.
7063		for (counterparty_node_id, err) in errs.drain(..) {
7064			let res: Result<(), _> = Err(err);
7065			let _ = handle_error!(self, res, counterparty_node_id);
7066		}
7067	}
7068
7069	fn claim_funds_from_hop<
7070		ComplFunc: FnOnce(Option<u64>, bool) -> (Option<MonitorUpdateCompletionAction>, Option<RAAMonitorUpdateBlockingAction>)
7071	>(
7072		&self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage,
7073		payment_info: Option<PaymentClaimDetails>, completion_action: ComplFunc,
7074	) {
7075		let counterparty_node_id = prev_hop.counterparty_node_id.or_else(|| {
7076			let short_to_chan_info = self.short_to_chan_info.read().unwrap();
7077			short_to_chan_info.get(&prev_hop.short_channel_id).map(|(cp_id, _)| *cp_id)
7078		});
7079
7080		let htlc_source = HTLCClaimSource {
7081			counterparty_node_id,
7082			funding_txo: prev_hop.outpoint,
7083			channel_id: prev_hop.channel_id,
7084			htlc_id: prev_hop.htlc_id,
7085		};
7086		self.claim_mpp_part(htlc_source, payment_preimage, payment_info, completion_action)
7087	}
7088
7089	fn claim_mpp_part<
7090		ComplFunc: FnOnce(Option<u64>, bool) -> (Option<MonitorUpdateCompletionAction>, Option<RAAMonitorUpdateBlockingAction>)
7091	>(
7092		&self, prev_hop: HTLCClaimSource, payment_preimage: PaymentPreimage,
7093		payment_info: Option<PaymentClaimDetails>, completion_action: ComplFunc,
7094	) {
7095		//TODO: Delay the claimed_funds relaying just like we do outbound relay!
7096
7097		// If we haven't yet run background events assume we're still deserializing and shouldn't
7098		// actually pass `ChannelMonitorUpdate`s to users yet. Instead, queue them up as
7099		// `BackgroundEvent`s.
7100		let during_init = !self.background_events_processed_since_startup.load(Ordering::Acquire);
7101
7102		// As we may call handle_monitor_update_completion_actions in rather rare cases, check that
7103		// the required mutexes are not held before we start.
7104		debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
7105		debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread);
7106
7107		let per_peer_state = self.per_peer_state.read().unwrap();
7108		let chan_id = prev_hop.channel_id;
7109
7110		const MISSING_MON_ERROR: &'static str =
7111			"If we're going to claim an HTLC against a channel, we should always have *some* state for the channel, even if just the latest ChannelMonitor update_id. This failure indicates we need to claim an HTLC from a channel for which we did not have a ChannelMonitor at startup and didn't create one while running.";
7112
7113		// Note here that `peer_state_opt` is always `Some` if `prev_hop.counterparty_node_id` is
7114		// `Some`. This is relied on in the closed-channel case below.
7115		let mut peer_state_opt = prev_hop.counterparty_node_id.as_ref().map(
7116			|counterparty_node_id| per_peer_state.get(counterparty_node_id)
7117				.map(|peer_mutex| peer_mutex.lock().unwrap())
7118				.expect(MISSING_MON_ERROR)
7119		);
7120
7121		if let Some(peer_state_lock) = peer_state_opt.as_mut() {
7122			let peer_state = &mut **peer_state_lock;
7123			if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(chan_id) {
7124				if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
7125					let logger = WithChannelContext::from(&self.logger, &chan.context, None);
7126					let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, payment_info, &&logger);
7127
7128					match fulfill_res {
7129						UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } => {
7130							let (action_opt, raa_blocker_opt) = completion_action(Some(htlc_value_msat), false);
7131							if let Some(action) = action_opt {
7132								log_trace!(logger, "Tracking monitor update completion action for channel {}: {:?}",
7133									chan_id, action);
7134								peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7135							}
7136							if let Some(raa_blocker) = raa_blocker_opt {
7137								peer_state.actions_blocking_raa_monitor_updates.entry(chan_id).or_insert_with(Vec::new).push(raa_blocker);
7138							}
7139							handle_new_monitor_update!(self, prev_hop.funding_txo, monitor_update, peer_state_opt,
7140								peer_state, per_peer_state, chan);
7141						}
7142						UpdateFulfillCommitFetch::DuplicateClaim {} => {
7143							let (action_opt, raa_blocker_opt) = completion_action(None, true);
7144							if let Some(raa_blocker) = raa_blocker_opt {
7145								// If we're making a claim during startup, its a replay of a
7146								// payment claim from a `ChannelMonitor`. In some cases (MPP or
7147								// if the HTLC was only recently removed) we make such claims
7148								// after an HTLC has been removed from a channel entirely, and
7149								// thus the RAA blocker has long since completed.
7150								//
7151								// In any other case, the RAA blocker must still be present and
7152								// blocking RAAs.
7153								debug_assert!(during_init ||
7154									peer_state.actions_blocking_raa_monitor_updates.get(&chan_id).unwrap().contains(&raa_blocker));
7155							}
7156							let action = if let Some(action) = action_opt {
7157								action
7158							} else {
7159								return;
7160							};
7161
7162							mem::drop(peer_state_opt);
7163
7164							log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
7165								chan_id, action);
7166							if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
7167								downstream_counterparty_node_id: node_id,
7168								downstream_funding_outpoint: _,
7169								blocking_action: blocker, downstream_channel_id: channel_id,
7170							} = action {
7171								if let Some(peer_state_mtx) = per_peer_state.get(&node_id) {
7172									let mut peer_state = peer_state_mtx.lock().unwrap();
7173									if let Some(blockers) = peer_state
7174										.actions_blocking_raa_monitor_updates
7175										.get_mut(&channel_id)
7176									{
7177										let mut found_blocker = false;
7178										blockers.retain(|iter| {
7179											// Note that we could actually be blocked, in
7180											// which case we need to only remove the one
7181											// blocker which was added duplicatively.
7182											let first_blocker = !found_blocker;
7183											if *iter == blocker { found_blocker = true; }
7184											*iter != blocker || !first_blocker
7185										});
7186										debug_assert!(found_blocker);
7187									}
7188								} else {
7189									debug_assert!(false);
7190								}
7191							} else if matches!(action, MonitorUpdateCompletionAction::PaymentClaimed { .. }) {
7192								debug_assert!(during_init,
7193									"Duplicate claims should always either be for forwarded payments(freeing another channel immediately) or during init (for claim replay)");
7194								mem::drop(per_peer_state);
7195								self.handle_monitor_update_completion_actions([action]);
7196							} else {
7197								debug_assert!(false,
7198									"Duplicate claims should always either be for forwarded payments(freeing another channel immediately) or during init (for claim replay)");
7199								return;
7200							};
7201						}
7202					}
7203				}
7204				return;
7205			}
7206		}
7207
7208		if prev_hop.counterparty_node_id.is_none() {
7209			let payment_hash: PaymentHash = payment_preimage.into();
7210			panic!(
7211				"Prior to upgrading to LDK 0.1, all pending HTLCs forwarded by LDK 0.0.123 or before must be resolved. It appears at least the HTLC with payment_hash {} (preimage {}) was not resolved. Please downgrade to LDK 0.0.125 and resolve the HTLC prior to upgrading.",
7212				payment_hash,
7213				payment_preimage,
7214			);
7215		}
7216		let counterparty_node_id = prev_hop.counterparty_node_id.expect("Checked immediately above");
7217		let mut peer_state = peer_state_opt.expect("peer_state_opt is always Some when the counterparty_node_id is Some");
7218
7219		let update_id = if let Some(latest_update_id) = peer_state.closed_channel_monitor_update_ids.get_mut(&chan_id) {
7220			*latest_update_id = latest_update_id.saturating_add(1);
7221			*latest_update_id
7222		} else {
7223			let err = "We need the latest ChannelMonitorUpdate ID to build a new update.
7224This should have been checked for availability on startup but somehow it is no longer available.
7225This indicates a bug inside LDK. Please report this error at https://github.com/lightningdevkit/rust-lightning/issues/new";
7226			log_error!(self.logger, "{}", err);
7227			panic!("{}", err);
7228		};
7229
7230		let preimage_update = ChannelMonitorUpdate {
7231			update_id,
7232			counterparty_node_id: Some(counterparty_node_id),
7233			updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
7234				payment_preimage,
7235				payment_info,
7236			}],
7237			channel_id: Some(prev_hop.channel_id),
7238		};
7239
7240		// Note that we do process the completion action here. This totally could be a
7241		// duplicate claim, but we have no way of knowing without interrogating the
7242		// `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
7243		// generally always allowed to be duplicative (and it's specifically noted in
7244		// `PaymentForwarded`).
7245		let (action_opt, raa_blocker_opt) = completion_action(None, false);
7246
7247		if let Some(raa_blocker) = raa_blocker_opt {
7248			peer_state.actions_blocking_raa_monitor_updates
7249				.entry(prev_hop.channel_id)
7250				.or_default()
7251				.push(raa_blocker);
7252		}
7253
7254		// Given the fact that we're in a bit of a weird edge case, its worth hashing the preimage
7255		// to include the `payment_hash` in the log metadata here.
7256		let payment_hash = payment_preimage.into();
7257		let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(chan_id), Some(payment_hash));
7258
7259		if let Some(action) = action_opt {
7260			log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7261				chan_id, action);
7262			peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7263		}
7264
7265		handle_new_monitor_update!(
7266			self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state,
7267			counterparty_node_id, chan_id, POST_CHANNEL_CLOSE
7268		);
7269	}
7270
7271	fn finalize_claims(&self, sources: Vec<HTLCSource>) {
7272		self.pending_outbound_payments.finalize_claims(sources, &self.pending_events);
7273	}
7274
7275	fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
7276		forwarded_htlc_value_msat: Option<u64>, skimmed_fee_msat: Option<u64>, from_onchain: bool,
7277		startup_replay: bool, next_channel_counterparty_node_id: Option<PublicKey>,
7278		next_channel_outpoint: OutPoint, next_channel_id: ChannelId, next_user_channel_id: Option<u128>,
7279	) {
7280		match source {
7281			HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
7282				debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire),
7283					"We don't support claim_htlc claims during startup - monitors may not be available yet");
7284				if let Some(pubkey) = next_channel_counterparty_node_id {
7285					debug_assert_eq!(pubkey, path.hops[0].pubkey);
7286				}
7287				let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
7288					channel_funding_outpoint: next_channel_outpoint, channel_id: next_channel_id,
7289					counterparty_node_id: path.hops[0].pubkey,
7290				};
7291				self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage,
7292					session_priv, path, from_onchain, ev_completion_action, &self.pending_events,
7293					&self.logger);
7294			},
7295			HTLCSource::PreviousHopData(hop_data) => {
7296				let prev_channel_id = hop_data.channel_id;
7297				let prev_user_channel_id = hop_data.user_channel_id;
7298				let prev_node_id = hop_data.counterparty_node_id;
7299				let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
7300				self.claim_funds_from_hop(hop_data, payment_preimage, None,
7301					|htlc_claim_value_msat, definitely_duplicate| {
7302						let chan_to_release =
7303							if let Some(node_id) = next_channel_counterparty_node_id {
7304								Some(EventUnblockedChannel {
7305									counterparty_node_id: node_id,
7306									funding_txo: next_channel_outpoint,
7307									channel_id: next_channel_id,
7308									blocking_action: completed_blocker
7309								})
7310							} else {
7311								// We can only get `None` here if we are processing a
7312								// `ChannelMonitor`-originated event, in which case we
7313								// don't care about ensuring we wake the downstream
7314								// channel's monitor updating - the channel is already
7315								// closed.
7316								None
7317							};
7318
7319						if definitely_duplicate && startup_replay {
7320							// On startup we may get redundant claims which are related to
7321							// monitor updates still in flight. In that case, we shouldn't
7322							// immediately free, but instead let that monitor update complete
7323							// in the background.
7324							(None, None)
7325						} else if definitely_duplicate {
7326							if let Some(other_chan) = chan_to_release {
7327								(Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
7328									downstream_counterparty_node_id: other_chan.counterparty_node_id,
7329									downstream_funding_outpoint: other_chan.funding_txo,
7330									downstream_channel_id: other_chan.channel_id,
7331									blocking_action: other_chan.blocking_action,
7332								}), None)
7333							} else { (None, None) }
7334						} else {
7335							let total_fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
7336								if let Some(claimed_htlc_value) = htlc_claim_value_msat {
7337									Some(claimed_htlc_value - forwarded_htlc_value)
7338								} else { None }
7339							} else { None };
7340							debug_assert!(skimmed_fee_msat <= total_fee_earned_msat,
7341								"skimmed_fee_msat must always be included in total_fee_earned_msat");
7342							(Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
7343								event: events::Event::PaymentForwarded {
7344									prev_channel_id: Some(prev_channel_id),
7345									next_channel_id: Some(next_channel_id),
7346									prev_user_channel_id,
7347									next_user_channel_id,
7348									prev_node_id,
7349									next_node_id: next_channel_counterparty_node_id,
7350									total_fee_earned_msat,
7351									skimmed_fee_msat,
7352									claim_from_onchain_tx: from_onchain,
7353									outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
7354								},
7355								downstream_counterparty_and_funding_outpoint: chan_to_release,
7356							}), None)
7357						}
7358					});
7359			},
7360		}
7361	}
7362
7363	/// Gets the node_id held by this ChannelManager
7364	pub fn get_our_node_id(&self) -> PublicKey {
7365		self.our_network_pubkey
7366	}
7367
7368	fn handle_monitor_update_completion_actions<I: IntoIterator<Item=MonitorUpdateCompletionAction>>(&self, actions: I) {
7369		debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
7370		debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread);
7371		debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
7372
7373		let mut freed_channels = Vec::new();
7374
7375		for action in actions.into_iter() {
7376			match action {
7377				MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim } => {
7378					if let Some((counterparty_node_id, chan_id, htlc_id, claim_ptr)) = pending_mpp_claim {
7379						let per_peer_state = self.per_peer_state.read().unwrap();
7380						per_peer_state.get(&counterparty_node_id).map(|peer_state_mutex| {
7381							let mut peer_state = peer_state_mutex.lock().unwrap();
7382							let blockers_entry = peer_state.actions_blocking_raa_monitor_updates.entry(chan_id);
7383							if let btree_map::Entry::Occupied(mut blockers) = blockers_entry {
7384								blockers.get_mut().retain(|blocker|
7385									if let &RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { pending_claim } = &blocker {
7386										if *pending_claim == claim_ptr {
7387											let mut pending_claim_state_lock = pending_claim.0.lock().unwrap();
7388											let pending_claim_state = &mut *pending_claim_state_lock;
7389											pending_claim_state.channels_without_preimage.retain(|htlc_info| {
7390												let this_claim =
7391													htlc_info.counterparty_node_id == counterparty_node_id
7392													&& htlc_info.channel_id == chan_id
7393													&& htlc_info.htlc_id == htlc_id;
7394												if this_claim {
7395													pending_claim_state.channels_with_preimage.push(htlc_info.clone());
7396													false
7397												} else { true }
7398											});
7399											if pending_claim_state.channels_without_preimage.is_empty() {
7400												for htlc_info in pending_claim_state.channels_with_preimage.iter() {
7401													let freed_chan = (
7402														htlc_info.counterparty_node_id,
7403														htlc_info.funding_txo,
7404														htlc_info.channel_id,
7405														blocker.clone()
7406													);
7407													freed_channels.push(freed_chan);
7408												}
7409											}
7410											!pending_claim_state.channels_without_preimage.is_empty()
7411										} else { true }
7412									} else { true }
7413								);
7414								if blockers.get().is_empty() {
7415									blockers.remove();
7416								}
7417							}
7418						});
7419					}
7420
7421					let payment = self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
7422					if let Some(ClaimingPayment {
7423						amount_msat,
7424						payment_purpose: purpose,
7425						receiver_node_id,
7426						htlcs,
7427						sender_intended_value: sender_intended_total_msat,
7428						onion_fields,
7429						payment_id,
7430					}) = payment {
7431						let event = events::Event::PaymentClaimed {
7432							payment_hash,
7433							purpose,
7434							amount_msat,
7435							receiver_node_id: Some(receiver_node_id),
7436							htlcs,
7437							sender_intended_total_msat,
7438							onion_fields,
7439							payment_id,
7440						};
7441						let event_action = (event, None);
7442						let mut pending_events = self.pending_events.lock().unwrap();
7443						// If we're replaying a claim on startup we may end up duplicating an event
7444						// that's already in our queue, so check before we push another one. The
7445						// `payment_id` should suffice to ensure we never spuriously drop a second
7446						// event for a duplicate payment.
7447						if !pending_events.contains(&event_action) {
7448							pending_events.push_back(event_action);
7449						}
7450					}
7451				},
7452				MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
7453					event, downstream_counterparty_and_funding_outpoint
7454				} => {
7455					self.pending_events.lock().unwrap().push_back((event, None));
7456					if let Some(unblocked) = downstream_counterparty_and_funding_outpoint {
7457						self.handle_monitor_update_release(
7458							unblocked.counterparty_node_id, unblocked.funding_txo,
7459							unblocked.channel_id, Some(unblocked.blocking_action),
7460						);
7461					}
7462				},
7463				MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
7464					downstream_counterparty_node_id, downstream_funding_outpoint, downstream_channel_id, blocking_action,
7465				} => {
7466					self.handle_monitor_update_release(
7467						downstream_counterparty_node_id,
7468						downstream_funding_outpoint,
7469						downstream_channel_id,
7470						Some(blocking_action),
7471					);
7472				},
7473			}
7474		}
7475
7476		for (node_id, funding_outpoint, channel_id, blocker) in freed_channels {
7477			self.handle_monitor_update_release(node_id, funding_outpoint, channel_id, Some(blocker));
7478		}
7479	}
7480
7481	/// Handles a channel reentering a functional state, either due to reconnect or a monitor
7482	/// update completion.
7483	fn handle_channel_resumption(&self, pending_msg_events: &mut Vec<MessageSendEvent>,
7484		channel: &mut Channel<SP>, raa: Option<msgs::RevokeAndACK>,
7485		commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
7486		pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_update_adds: Vec<msgs::UpdateAddHTLC>,
7487		funding_broadcastable: Option<Transaction>,
7488		channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>,
7489		tx_signatures: Option<msgs::TxSignatures>
7490	) -> (Option<(u64, Option<PublicKey>, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec<msgs::UpdateAddHTLC>)>) {
7491		let logger = WithChannelContext::from(&self.logger, &channel.context, None);
7492		log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures",
7493			&channel.context.channel_id(),
7494			if raa.is_some() { "an" } else { "no" },
7495			if commitment_update.is_some() { "a" } else { "no" },
7496			pending_forwards.len(), pending_update_adds.len(),
7497			if funding_broadcastable.is_some() { "" } else { "not " },
7498			if channel_ready.is_some() { "sending" } else { "without" },
7499			if announcement_sigs.is_some() { "sending" } else { "without" },
7500			if tx_signatures.is_some() { "sending" } else { "without" },
7501		);
7502
7503		let counterparty_node_id = channel.context.get_counterparty_node_id();
7504		let short_channel_id = channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias());
7505
7506		let mut htlc_forwards = None;
7507		if !pending_forwards.is_empty() {
7508			htlc_forwards = Some((
7509				short_channel_id, Some(channel.context.get_counterparty_node_id()),
7510				channel.context.get_funding_txo().unwrap(), channel.context.channel_id(),
7511				channel.context.get_user_id(), pending_forwards
7512			));
7513		}
7514		let mut decode_update_add_htlcs = None;
7515		if !pending_update_adds.is_empty() {
7516			decode_update_add_htlcs = Some((short_channel_id, pending_update_adds));
7517		}
7518
7519		if let Some(msg) = channel_ready {
7520			send_channel_ready!(self, pending_msg_events, channel, msg);
7521		}
7522		if let Some(msg) = announcement_sigs {
7523			pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
7524				node_id: counterparty_node_id,
7525				msg,
7526			});
7527		}
7528		if let Some(msg) = tx_signatures {
7529			pending_msg_events.push(events::MessageSendEvent::SendTxSignatures {
7530				node_id: counterparty_node_id,
7531				msg,
7532			});
7533		}
7534
7535		macro_rules! handle_cs { () => {
7536			if let Some(update) = commitment_update {
7537				pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
7538					node_id: counterparty_node_id,
7539					updates: update,
7540				});
7541			}
7542		} }
7543		macro_rules! handle_raa { () => {
7544			if let Some(revoke_and_ack) = raa {
7545				pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
7546					node_id: counterparty_node_id,
7547					msg: revoke_and_ack,
7548				});
7549			}
7550		} }
7551		match order {
7552			RAACommitmentOrder::CommitmentFirst => {
7553				handle_cs!();
7554				handle_raa!();
7555			},
7556			RAACommitmentOrder::RevokeAndACKFirst => {
7557				handle_raa!();
7558				handle_cs!();
7559			},
7560		}
7561
7562		if let Some(tx) = funding_broadcastable {
7563			if channel.context.is_manual_broadcast() {
7564				log_info!(logger, "Not broadcasting funding transaction with txid {} as it is manually managed", tx.compute_txid());
7565				let mut pending_events = self.pending_events.lock().unwrap();
7566				match channel.context.get_funding_txo() {
7567					Some(funding_txo) => {
7568						emit_funding_tx_broadcast_safe_event!(pending_events, channel, funding_txo.into_bitcoin_outpoint())
7569					},
7570					None => {
7571						debug_assert!(false, "Channel resumed without a funding txo, this should never happen!");
7572						return (htlc_forwards, decode_update_add_htlcs);
7573					}
7574				};
7575			} else {
7576				log_info!(logger, "Broadcasting funding transaction with txid {}", tx.compute_txid());
7577				self.tx_broadcaster.broadcast_transactions(&[&tx]);
7578			}
7579		}
7580
7581		{
7582			let mut pending_events = self.pending_events.lock().unwrap();
7583			emit_channel_pending_event!(pending_events, channel);
7584			emit_channel_ready_event!(pending_events, channel);
7585		}
7586
7587		(htlc_forwards, decode_update_add_htlcs)
7588	}
7589
7590	fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
7591		debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
7592
7593		let counterparty_node_id = match counterparty_node_id {
7594			Some(cp_id) => cp_id.clone(),
7595			None => {
7596				// TODO: Once we can rely on the counterparty_node_id from the
7597				// monitor event, this and the outpoint_to_peer map should be removed.
7598				let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
7599				match outpoint_to_peer.get(funding_txo) {
7600					Some(cp_id) => cp_id.clone(),
7601					None => return,
7602				}
7603			}
7604		};
7605		let per_peer_state = self.per_peer_state.read().unwrap();
7606		let mut peer_state_lock;
7607		let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
7608		if peer_state_mutex_opt.is_none() { return }
7609		peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
7610		let peer_state = &mut *peer_state_lock;
7611
7612		let remaining_in_flight =
7613			if let Some(pending) = peer_state.in_flight_monitor_updates.get_mut(funding_txo) {
7614				pending.retain(|upd| upd.update_id > highest_applied_update_id);
7615				pending.len()
7616			} else { 0 };
7617
7618		let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*channel_id), None);
7619		log_trace!(logger, "ChannelMonitor updated to {}. {} pending in-flight updates.",
7620			highest_applied_update_id, remaining_in_flight);
7621
7622		if remaining_in_flight != 0 {
7623			return;
7624		}
7625
7626		if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(channel_id) {
7627			if chan.is_awaiting_monitor_update() {
7628				log_trace!(logger, "Channel is open and awaiting update, resuming it");
7629				handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
7630			} else {
7631				log_trace!(logger, "Channel is open but not awaiting update");
7632			}
7633		} else {
7634			let update_actions = peer_state.monitor_update_blocked_actions
7635				.remove(channel_id).unwrap_or(Vec::new());
7636			log_trace!(logger, "Channel is closed, applying {} post-update actions", update_actions.len());
7637			mem::drop(peer_state_lock);
7638			mem::drop(per_peer_state);
7639			self.handle_monitor_update_completion_actions(update_actions);
7640		}
7641	}
7642
7643	/// Accepts a request to open a channel after a [`Event::OpenChannelRequest`].
7644	///
7645	/// The `temporary_channel_id` parameter indicates which inbound channel should be accepted,
7646	/// and the `counterparty_node_id` parameter is the id of the peer which has requested to open
7647	/// the channel.
7648	///
7649	/// The `user_channel_id` parameter will be provided back in
7650	/// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond
7651	/// with which `accept_inbound_channel`/`accept_inbound_channel_from_trusted_peer_0conf` call.
7652	///
7653	/// Note that this method will return an error and reject the channel, if it requires support
7654	/// for zero confirmations. Instead, `accept_inbound_channel_from_trusted_peer_0conf` must be
7655	/// used to accept such channels.
7656	///
7657	/// NOTE: LDK makes no attempt to prevent the counterparty from using non-standard inputs which
7658	/// will prevent the funding transaction from being relayed on the bitcoin network and hence being
7659	/// confirmed.
7660	///
7661	/// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
7662	/// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
7663	pub fn accept_inbound_channel(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
7664		self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, false, user_channel_id, vec![], Weight::from_wu(0))
7665	}
7666
7667	/// Accepts a request to open a channel after a [`events::Event::OpenChannelRequest`], treating
7668	/// it as confirmed immediately.
7669	///
7670	/// The `user_channel_id` parameter will be provided back in
7671	/// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond
7672	/// with which `accept_inbound_channel`/`accept_inbound_channel_from_trusted_peer_0conf` call.
7673	///
7674	/// Unlike [`ChannelManager::accept_inbound_channel`], this method accepts the incoming channel
7675	/// and (if the counterparty agrees), enables forwarding of payments immediately.
7676	///
7677	/// This fully trusts that the counterparty has honestly and correctly constructed the funding
7678	/// transaction and blindly assumes that it will eventually confirm.
7679	///
7680	/// If it does not confirm before we decide to close the channel, or if the funding transaction
7681	/// does not pay to the correct script the correct amount, *you will lose funds*.
7682	///
7683	/// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
7684	/// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
7685	pub fn accept_inbound_channel_from_trusted_peer_0conf(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
7686		self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, true, user_channel_id, vec![], Weight::from_wu(0))
7687	}
7688
7689	fn do_accept_inbound_channel(
7690		&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool,
7691		user_channel_id: u128, _funding_inputs: Vec<(TxIn, TransactionU16LenLimited)>,
7692		_total_witness_weight: Weight,
7693	) -> Result<(), APIError> {
7694		let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id), None);
7695		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
7696
7697		let peers_without_funded_channels =
7698			self.peers_without_funded_channels(|peer| { peer.total_channel_count() > 0 });
7699		let per_peer_state = self.per_peer_state.read().unwrap();
7700		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
7701		.ok_or_else(|| {
7702			let err_str = format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id);
7703			log_error!(logger, "{}", err_str);
7704
7705			APIError::ChannelUnavailable { err: err_str }
7706		})?;
7707		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
7708		let peer_state = &mut *peer_state_lock;
7709		let is_only_peer_channel = peer_state.total_channel_count() == 1;
7710
7711		// Find (and remove) the channel in the unaccepted table. If it's not there, something weird is
7712		// happening and return an error. N.B. that we create channel with an outbound SCID of zero so
7713		// that we can delay allocating the SCID until after we're sure that the checks below will
7714		// succeed.
7715		let res = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) {
7716			Some(unaccepted_channel) => {
7717				let best_block_height = self.best_block.read().unwrap().height;
7718				match unaccepted_channel.open_channel_msg {
7719					OpenChannelMessage::V1(open_channel_msg) => {
7720						InboundV1Channel::new(
7721							&self.fee_estimator, &self.entropy_source, &self.signer_provider, *counterparty_node_id,
7722							&self.channel_type_features(), &peer_state.latest_features, &open_channel_msg,
7723							user_channel_id, &self.default_configuration, best_block_height, &self.logger, accept_0conf
7724						).map_err(|err| MsgHandleErrInternal::from_chan_no_close(err, *temporary_channel_id)
7725						).map(|mut channel| {
7726							let logger = WithChannelContext::from(&self.logger, &channel.context, None);
7727							let message_send_event = channel.accept_inbound_channel(&&logger).map(|msg| {
7728								events::MessageSendEvent::SendAcceptChannel {
7729									node_id: *counterparty_node_id,
7730									msg,
7731								}
7732							});
7733							(*temporary_channel_id, ChannelPhase::UnfundedInboundV1(channel), message_send_event)
7734						})
7735					},
7736					#[cfg(dual_funding)]
7737					OpenChannelMessage::V2(open_channel_msg) => {
7738						InboundV2Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
7739							self.get_our_node_id(), *counterparty_node_id, &self.channel_type_features(), &peer_state.latest_features,
7740							&open_channel_msg, _funding_inputs, _total_witness_weight, user_channel_id,
7741							&self.default_configuration, best_block_height, &self.logger
7742						).map_err(|_| MsgHandleErrInternal::from_chan_no_close(
7743							ChannelError::Close(
7744								(
7745									"V2 channel rejected due to sender error".into(),
7746									ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) },
7747								)
7748							), *temporary_channel_id)
7749						).map(|channel| {
7750							let message_send_event =  events::MessageSendEvent::SendAcceptChannelV2 {
7751								node_id: channel.context.get_counterparty_node_id(),
7752								msg: channel.accept_inbound_dual_funded_channel()
7753							};
7754							(channel.context.channel_id(), ChannelPhase::UnfundedInboundV2(channel), Some(message_send_event))
7755						})
7756					},
7757				}
7758			},
7759			None => {
7760				let err_str = "No such channel awaiting to be accepted.".to_owned();
7761				log_error!(logger, "{}", err_str);
7762
7763				return Err(APIError::APIMisuseError { err: err_str });
7764			}
7765		};
7766
7767		// We have to match below instead of map_err on the above as in the map_err closure the borrow checker
7768		// would consider peer_state moved even though we would bail out with the `?` operator.
7769		let (channel_id, mut channel_phase, message_send_event) = match res {
7770			Ok(res) => res,
7771			Err(err) => {
7772				mem::drop(peer_state_lock);
7773				mem::drop(per_peer_state);
7774				// TODO(dunxen): Find/make less icky way to do this.
7775				match handle_error!(self, Result::<(), MsgHandleErrInternal>::Err(err), *counterparty_node_id) {
7776					Ok(_) => unreachable!("`handle_error` only returns Err as we've passed in an Err"),
7777					Err(e) => {
7778						return Err(APIError::ChannelUnavailable { err: e.err });
7779					},
7780				}
7781			}
7782		};
7783
7784		if accept_0conf {
7785			// This should have been correctly configured by the call to Inbound(V1/V2)Channel::new.
7786			debug_assert!(channel_phase.context().minimum_depth().unwrap() == 0);
7787		} else if channel_phase.context().get_channel_type().requires_zero_conf() {
7788			let send_msg_err_event = events::MessageSendEvent::HandleError {
7789				node_id: channel_phase.context().get_counterparty_node_id(),
7790				action: msgs::ErrorAction::SendErrorMessage{
7791					msg: msgs::ErrorMessage { channel_id: *temporary_channel_id, data: "No zero confirmation channels accepted".to_owned(), }
7792				}
7793			};
7794			peer_state.pending_msg_events.push(send_msg_err_event);
7795			let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
7796			log_error!(logger, "{}", err_str);
7797
7798			return Err(APIError::APIMisuseError { err: err_str });
7799		} else {
7800			// If this peer already has some channels, a new channel won't increase our number of peers
7801			// with unfunded channels, so as long as we aren't over the maximum number of unfunded
7802			// channels per-peer we can accept channels from a peer with existing ones.
7803			if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
7804				let send_msg_err_event = events::MessageSendEvent::HandleError {
7805					node_id: channel_phase.context().get_counterparty_node_id(),
7806					action: msgs::ErrorAction::SendErrorMessage{
7807						msg: msgs::ErrorMessage { channel_id: *temporary_channel_id, data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
7808					}
7809				};
7810				peer_state.pending_msg_events.push(send_msg_err_event);
7811				let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
7812				log_error!(logger, "{}", err_str);
7813
7814				return Err(APIError::APIMisuseError { err: err_str });
7815			}
7816		}
7817
7818		// Now that we know we have a channel, assign an outbound SCID alias.
7819		let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
7820		channel_phase.context_mut().set_outbound_scid_alias(outbound_scid_alias);
7821
7822		if let Some(message_send_event) = message_send_event {
7823			peer_state.pending_msg_events.push(message_send_event);
7824		}
7825		peer_state.channel_by_id.insert(channel_id, channel_phase);
7826
7827		Ok(())
7828	}
7829
7830	/// Gets the number of peers which match the given filter and do not have any funded, outbound,
7831	/// or 0-conf channels.
7832	///
7833	/// The filter is called for each peer and provided with the number of unfunded, inbound, and
7834	/// non-0-conf channels we have with the peer.
7835	fn peers_without_funded_channels<Filter>(&self, maybe_count_peer: Filter) -> usize
7836	where Filter: Fn(&PeerState<SP>) -> bool {
7837		let mut peers_without_funded_channels = 0;
7838		let best_block_height = self.best_block.read().unwrap().height;
7839		{
7840			let peer_state_lock = self.per_peer_state.read().unwrap();
7841			for (_, peer_mtx) in peer_state_lock.iter() {
7842				let peer = peer_mtx.lock().unwrap();
7843				if !maybe_count_peer(&*peer) { continue; }
7844				let num_unfunded_channels = Self::unfunded_channel_count(&peer, best_block_height);
7845				if num_unfunded_channels == peer.total_channel_count() {
7846					peers_without_funded_channels += 1;
7847				}
7848			}
7849		}
7850		return peers_without_funded_channels;
7851	}
7852
7853	fn unfunded_channel_count(
7854		peer: &PeerState<SP>, best_block_height: u32
7855	) -> usize {
7856		let mut num_unfunded_channels = 0;
7857		for (_, phase) in peer.channel_by_id.iter() {
7858			match phase {
7859				ChannelPhase::Funded(chan) => {
7860					// This covers non-zero-conf inbound `Channel`s that we are currently monitoring, but those
7861					// which have not yet had any confirmations on-chain.
7862					if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 &&
7863						chan.context.get_funding_tx_confirmations(best_block_height) == 0
7864					{
7865						num_unfunded_channels += 1;
7866					}
7867				},
7868				ChannelPhase::UnfundedInboundV1(chan) => {
7869					if chan.context.minimum_depth().unwrap_or(1) != 0 {
7870						num_unfunded_channels += 1;
7871					}
7872				},
7873				ChannelPhase::UnfundedInboundV2(chan) => {
7874					// Only inbound V2 channels that are not 0conf and that we do not contribute to will be
7875					// included in the unfunded count.
7876					if chan.context.minimum_depth().unwrap_or(1) != 0 &&
7877						chan.dual_funding_context.our_funding_satoshis == 0 {
7878						num_unfunded_channels += 1;
7879					}
7880				},
7881				ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedOutboundV2(_) => {
7882					// Outbound channels don't contribute to the unfunded count in the DoS context.
7883					continue;
7884				},
7885			}
7886		}
7887		num_unfunded_channels + peer.inbound_channel_request_by_id.len()
7888	}
7889
7890	fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: OpenChannelMessageRef<'_>) -> Result<(), MsgHandleErrInternal> {
7891		let common_fields = match msg {
7892			OpenChannelMessageRef::V1(msg) => &msg.common_fields,
7893			#[cfg(dual_funding)]
7894			OpenChannelMessageRef::V2(msg) => &msg.common_fields,
7895		};
7896
7897		// Do common open_channel(2) checks
7898
7899		// Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
7900		// likely to be lost on restart!
7901		if common_fields.chain_hash != self.chain_hash {
7902			return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(),
7903				 common_fields.temporary_channel_id));
7904		}
7905
7906		if !self.default_configuration.accept_inbound_channels {
7907			return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(),
7908				 common_fields.temporary_channel_id));
7909		}
7910
7911		// Get the number of peers with channels, but without funded ones. We don't care too much
7912		// about peers that never open a channel, so we filter by peers that have at least one
7913		// channel, and then limit the number of those with unfunded channels.
7914		let channeled_peers_without_funding =
7915			self.peers_without_funded_channels(|node| node.total_channel_count() > 0);
7916
7917		let per_peer_state = self.per_peer_state.read().unwrap();
7918		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
7919		    .ok_or_else(|| {
7920				debug_assert!(false);
7921				MsgHandleErrInternal::send_err_msg_no_close(
7922					format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
7923					common_fields.temporary_channel_id)
7924			})?;
7925		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
7926		let peer_state = &mut *peer_state_lock;
7927
7928		// If this peer already has some channels, a new channel won't increase our number of peers
7929		// with unfunded channels, so as long as we aren't over the maximum number of unfunded
7930		// channels per-peer we can accept channels from a peer with existing ones.
7931		if peer_state.total_channel_count() == 0 &&
7932			channeled_peers_without_funding >= MAX_UNFUNDED_CHANNEL_PEERS &&
7933			!self.default_configuration.manually_accept_inbound_channels
7934		{
7935			return Err(MsgHandleErrInternal::send_err_msg_no_close(
7936				"Have too many peers with unfunded channels, not accepting new ones".to_owned(),
7937				common_fields.temporary_channel_id));
7938		}
7939
7940		let best_block_height = self.best_block.read().unwrap().height;
7941		if Self::unfunded_channel_count(peer_state, best_block_height) >= MAX_UNFUNDED_CHANS_PER_PEER {
7942			return Err(MsgHandleErrInternal::send_err_msg_no_close(
7943				format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER),
7944				common_fields.temporary_channel_id));
7945		}
7946
7947		let channel_id = common_fields.temporary_channel_id;
7948		let channel_exists = peer_state.has_channel(&channel_id);
7949		if channel_exists {
7950			return Err(MsgHandleErrInternal::send_err_msg_no_close(
7951				"temporary_channel_id collision for the same peer!".to_owned(),
7952				common_fields.temporary_channel_id));
7953		}
7954
7955		// We can get the channel type at this point already as we'll need it immediately in both the
7956		// manual and the automatic acceptance cases.
7957		let channel_type = channel::channel_type_from_open_channel(
7958			common_fields, &peer_state.latest_features, &self.channel_type_features()
7959		).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, common_fields.temporary_channel_id))?;
7960
7961		// If we're doing manual acceptance checks on the channel, then defer creation until we're sure we want to accept.
7962		if self.default_configuration.manually_accept_inbound_channels {
7963			let mut pending_events = self.pending_events.lock().unwrap();
7964			let is_announced = (common_fields.channel_flags & 1) == 1;
7965			pending_events.push_back((events::Event::OpenChannelRequest {
7966				temporary_channel_id: common_fields.temporary_channel_id,
7967				counterparty_node_id: *counterparty_node_id,
7968				funding_satoshis: common_fields.funding_satoshis,
7969				channel_negotiation_type: match msg {
7970					OpenChannelMessageRef::V1(msg) => InboundChannelFunds::PushMsat(msg.push_msat),
7971					#[cfg(dual_funding)]
7972					OpenChannelMessageRef::V2(_) => InboundChannelFunds::DualFunded,
7973				},
7974				channel_type,
7975				is_announced,
7976				params: common_fields.channel_parameters(),
7977			}, None));
7978			peer_state.inbound_channel_request_by_id.insert(channel_id, InboundChannelRequest {
7979				open_channel_msg: match msg {
7980					OpenChannelMessageRef::V1(msg) => OpenChannelMessage::V1(msg.clone()),
7981					#[cfg(dual_funding)]
7982					OpenChannelMessageRef::V2(msg) => OpenChannelMessage::V2(msg.clone()),
7983				},
7984				ticks_remaining: UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS,
7985			});
7986			return Ok(());
7987		}
7988
7989		// Otherwise create the channel right now.
7990		let mut random_bytes = [0u8; 16];
7991		random_bytes.copy_from_slice(&self.entropy_source.get_secure_random_bytes()[..16]);
7992		let user_channel_id = u128::from_be_bytes(random_bytes);
7993
7994		if channel_type.requires_zero_conf() {
7995			return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), common_fields.temporary_channel_id));
7996		}
7997		if channel_type.requires_anchors_zero_fee_htlc_tx() {
7998			return Err(MsgHandleErrInternal::send_err_msg_no_close("No channels with anchor outputs accepted".to_owned(), common_fields.temporary_channel_id));
7999		}
8000
8001		let (mut channel_phase, message_send_event) = match msg {
8002			OpenChannelMessageRef::V1(msg) => {
8003				let mut channel = InboundV1Channel::new(
8004					&self.fee_estimator, &self.entropy_source, &self.signer_provider, *counterparty_node_id,
8005					&self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id,
8006					&self.default_configuration, best_block_height, &self.logger, /*is_0conf=*/false
8007				).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id))?;
8008				let logger = WithChannelContext::from(&self.logger, &channel.context, None);
8009				let message_send_event = channel.accept_inbound_channel(&&logger).map(|msg| {
8010					events::MessageSendEvent::SendAcceptChannel {
8011						node_id: *counterparty_node_id,
8012						msg,
8013					}
8014				});
8015				(ChannelPhase::UnfundedInboundV1(channel), message_send_event)
8016			},
8017			#[cfg(dual_funding)]
8018			OpenChannelMessageRef::V2(msg) => {
8019				let channel = InboundV2Channel::new(&self.fee_estimator, &self.entropy_source,
8020					&self.signer_provider, self.get_our_node_id(), *counterparty_node_id,
8021					&self.channel_type_features(), &peer_state.latest_features, msg, vec![], Weight::from_wu(0),
8022					user_channel_id, &self.default_configuration, best_block_height, &self.logger
8023				).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id))?;
8024				let message_send_event = events::MessageSendEvent::SendAcceptChannelV2 {
8025					node_id: *counterparty_node_id,
8026					msg: channel.accept_inbound_dual_funded_channel(),
8027				};
8028				(ChannelPhase::UnfundedInboundV2(channel), Some(message_send_event))
8029			},
8030		};
8031
8032		let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
8033		channel_phase.context_mut().set_outbound_scid_alias(outbound_scid_alias);
8034
8035		if let Some(message_send_event) = message_send_event {
8036			peer_state.pending_msg_events.push(message_send_event);
8037		}
8038		peer_state.channel_by_id.insert(channel_phase.context().channel_id(), channel_phase);
8039
8040		Ok(())
8041	}
8042
8043	fn internal_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
8044		// Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
8045		// likely to be lost on restart!
8046		let (value, output_script, user_id) = {
8047			let per_peer_state = self.per_peer_state.read().unwrap();
8048			let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8049				.ok_or_else(|| {
8050					debug_assert!(false);
8051					MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id)
8052				})?;
8053			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8054			let peer_state = &mut *peer_state_lock;
8055			match peer_state.channel_by_id.entry(msg.common_fields.temporary_channel_id) {
8056				hash_map::Entry::Occupied(mut phase) => {
8057					match phase.get_mut() {
8058						ChannelPhase::UnfundedOutboundV1(chan) => {
8059							try_chan_phase_entry!(self, peer_state, chan.accept_channel(msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), phase);
8060							(chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_p2wsh(), chan.context.get_user_id())
8061						},
8062						_ => {
8063							return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id));
8064						}
8065					}
8066				},
8067				hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id))
8068			}
8069		};
8070		let mut pending_events = self.pending_events.lock().unwrap();
8071		pending_events.push_back((events::Event::FundingGenerationReady {
8072			temporary_channel_id: msg.common_fields.temporary_channel_id,
8073			counterparty_node_id: *counterparty_node_id,
8074			channel_value_satoshis: value,
8075			output_script,
8076			user_channel_id: user_id,
8077		}, None));
8078		Ok(())
8079	}
8080
8081	fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
8082		let best_block = *self.best_block.read().unwrap();
8083
8084		let per_peer_state = self.per_peer_state.read().unwrap();
8085		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8086			.ok_or_else(|| {
8087				debug_assert!(false);
8088				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id)
8089			})?;
8090
8091		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8092		let peer_state = &mut *peer_state_lock;
8093		let (mut chan, funding_msg_opt, monitor) =
8094			match peer_state.channel_by_id.remove(&msg.temporary_channel_id) {
8095				Some(ChannelPhase::UnfundedInboundV1(inbound_chan)) => {
8096					let logger = WithChannelContext::from(&self.logger, &inbound_chan.context, None);
8097					match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) {
8098						Ok(res) => res,
8099						Err((inbound_chan, err)) => {
8100							// We've already removed this inbound channel from the map in `PeerState`
8101							// above so at this point we just need to clean up any lingering entries
8102							// concerning this channel as it is safe to do so.
8103							debug_assert!(matches!(err, ChannelError::Close(_)));
8104							// Really we should be returning the channel_id the peer expects based
8105							// on their funding info here, but they're horribly confused anyway, so
8106							// there's not a lot we can do to save them.
8107							return Err(convert_chan_phase_err!(self, peer_state, err, &mut ChannelPhase::UnfundedInboundV1(inbound_chan), &msg.temporary_channel_id).1);
8108						},
8109					}
8110				},
8111				Some(mut phase) => {
8112					let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id);
8113					let err = ChannelError::close(err_msg);
8114					return Err(convert_chan_phase_err!(self, peer_state, err, &mut phase, &msg.temporary_channel_id).1);
8115				},
8116				None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
8117			};
8118
8119		let funded_channel_id = chan.context.channel_id();
8120
8121		macro_rules! fail_chan { ($err: expr) => { {
8122			// Note that at this point we've filled in the funding outpoint on our
8123			// channel, but its actually in conflict with another channel. Thus, if
8124			// we call `convert_chan_phase_err` immediately (thus calling
8125			// `locked_close_channel`), we'll remove the existing channel from `outpoint_to_peer`.
8126			// Thus, we must first unset the funding outpoint on the channel.
8127			let err = ChannelError::close($err.to_owned());
8128			chan.unset_funding_info(msg.temporary_channel_id);
8129			return Err(convert_chan_phase_err!(self, peer_state, err, chan, &funded_channel_id, UNFUNDED_CHANNEL).1);
8130		} } }
8131
8132		match peer_state.channel_by_id.entry(funded_channel_id) {
8133			hash_map::Entry::Occupied(_) => {
8134				fail_chan!("Already had channel with the new channel_id");
8135			},
8136			hash_map::Entry::Vacant(e) => {
8137				let mut outpoint_to_peer_lock = self.outpoint_to_peer.lock().unwrap();
8138				match outpoint_to_peer_lock.entry(monitor.get_funding_txo().0) {
8139					hash_map::Entry::Occupied(_) => {
8140						fail_chan!("The funding_created message had the same funding_txid as an existing channel - funding is not possible");
8141					},
8142					hash_map::Entry::Vacant(i_e) => {
8143						let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
8144						if let Ok(persist_state) = monitor_res {
8145							i_e.insert(chan.context.get_counterparty_node_id());
8146							mem::drop(outpoint_to_peer_lock);
8147
8148							// There's no problem signing a counterparty's funding transaction if our monitor
8149							// hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
8150							// accepted payment from yet. We do, however, need to wait to send our channel_ready
8151							// until we have persisted our monitor.
8152							if let Some(msg) = funding_msg_opt {
8153								peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
8154									node_id: counterparty_node_id.clone(),
8155									msg,
8156								});
8157							}
8158
8159							if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) {
8160								handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
8161									per_peer_state, chan, INITIAL_MONITOR);
8162							} else {
8163								unreachable!("This must be a funded channel as we just inserted it.");
8164							}
8165							Ok(())
8166						} else {
8167							let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8168							log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
8169							fail_chan!("Duplicate funding outpoint");
8170						}
8171					}
8172				}
8173			}
8174		}
8175	}
8176
8177	fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
8178		let best_block = *self.best_block.read().unwrap();
8179		let per_peer_state = self.per_peer_state.read().unwrap();
8180		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8181			.ok_or_else(|| {
8182				debug_assert!(false);
8183				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8184			})?;
8185
8186		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8187		let peer_state = &mut *peer_state_lock;
8188		match peer_state.channel_by_id.entry(msg.channel_id) {
8189			hash_map::Entry::Occupied(chan_phase_entry) => {
8190				if matches!(chan_phase_entry.get(), ChannelPhase::UnfundedOutboundV1(_)) {
8191					let chan = if let ChannelPhase::UnfundedOutboundV1(chan) = chan_phase_entry.remove() { chan } else { unreachable!() };
8192					let logger = WithContext::from(
8193						&self.logger,
8194						Some(chan.context.get_counterparty_node_id()),
8195						Some(chan.context.channel_id()),
8196						None
8197					);
8198					let res =
8199						chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger);
8200					match res {
8201						Ok((mut chan, monitor)) => {
8202							if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
8203								// We really should be able to insert here without doing a second
8204								// lookup, but sadly rust stdlib doesn't currently allow keeping
8205								// the original Entry around with the value removed.
8206								let mut chan = peer_state.channel_by_id.entry(msg.channel_id).or_insert(ChannelPhase::Funded(chan));
8207								if let ChannelPhase::Funded(ref mut chan) = &mut chan {
8208									handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
8209								} else { unreachable!(); }
8210								Ok(())
8211							} else {
8212								let e = ChannelError::close("Channel funding outpoint was a duplicate".to_owned());
8213								// We weren't able to watch the channel to begin with, so no
8214								// updates should be made on it. Previously, full_stack_target
8215								// found an (unreachable) panic when the monitor update contained
8216								// within `shutdown_finish` was applied.
8217								chan.unset_funding_info(msg.channel_id);
8218								return Err(convert_chan_phase_err!(self, peer_state, e, &mut ChannelPhase::Funded(chan), &msg.channel_id).1);
8219							}
8220						},
8221						Err((chan, e)) => {
8222							debug_assert!(matches!(e, ChannelError::Close(_)),
8223								"We don't have a channel anymore, so the error better have expected close");
8224							// We've already removed this outbound channel from the map in
8225							// `PeerState` above so at this point we just need to clean up any
8226							// lingering entries concerning this channel as it is safe to do so.
8227							return Err(convert_chan_phase_err!(self, peer_state, e, &mut ChannelPhase::UnfundedOutboundV1(chan), &msg.channel_id).1);
8228						}
8229					}
8230				} else {
8231					return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id));
8232				}
8233			},
8234			hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
8235		}
8236	}
8237
8238	fn internal_tx_msg<HandleTxMsgFn: Fn(&mut ChannelPhase<SP>) -> Result<MessageSendEvent, &'static str>>(
8239		&self, counterparty_node_id: &PublicKey, channel_id: ChannelId, tx_msg_handler: HandleTxMsgFn
8240	) -> Result<(), MsgHandleErrInternal> {
8241		let per_peer_state = self.per_peer_state.read().unwrap();
8242		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8243			.ok_or_else(|| {
8244				debug_assert!(false);
8245				MsgHandleErrInternal::send_err_msg_no_close(
8246					format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
8247					channel_id)
8248			})?;
8249		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8250		let peer_state = &mut *peer_state_lock;
8251		match peer_state.channel_by_id.entry(channel_id) {
8252			hash_map::Entry::Occupied(mut chan_phase_entry) => {
8253				let channel_phase = chan_phase_entry.get_mut();
8254				let msg_send_event = match tx_msg_handler(channel_phase) {
8255					Ok(msg_send_event) => msg_send_event,
8256					Err(tx_msg_str) =>  return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(
8257						format!("Got a {tx_msg_str} message with no interactive transaction construction expected or in-progress")
8258					), channel_id)),
8259				};
8260				peer_state.pending_msg_events.push(msg_send_event);
8261				Ok(())
8262			},
8263			hash_map::Entry::Vacant(_) => {
8264				Err(MsgHandleErrInternal::send_err_msg_no_close(format!(
8265					"Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}",
8266					counterparty_node_id), channel_id)
8267				)
8268			}
8269		}
8270	}
8271
8272	fn internal_tx_add_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddInput) -> Result<(), MsgHandleErrInternal> {
8273		self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase<SP>| {
8274			match channel_phase {
8275				ChannelPhase::UnfundedInboundV2(ref mut channel) => {
8276					Ok(channel.tx_add_input(msg).into_msg_send_event(counterparty_node_id))
8277				},
8278				ChannelPhase::UnfundedOutboundV2(ref mut channel) => {
8279					Ok(channel.tx_add_input(msg).into_msg_send_event(counterparty_node_id))
8280				},
8281				_ => Err("tx_add_input"),
8282			}
8283		})
8284	}
8285
8286	fn internal_tx_add_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddOutput) -> Result<(), MsgHandleErrInternal> {
8287		self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase<SP>| {
8288			match channel_phase {
8289				ChannelPhase::UnfundedInboundV2(ref mut channel) => {
8290					Ok(channel.tx_add_output(msg).into_msg_send_event(counterparty_node_id))
8291				},
8292				ChannelPhase::UnfundedOutboundV2(ref mut channel) => {
8293					Ok(channel.tx_add_output(msg).into_msg_send_event(counterparty_node_id))
8294				},
8295				_ => Err("tx_add_output"),
8296			}
8297		})
8298	}
8299
8300	fn internal_tx_remove_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveInput) -> Result<(), MsgHandleErrInternal> {
8301		self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase<SP>| {
8302			match channel_phase {
8303				ChannelPhase::UnfundedInboundV2(ref mut channel) => {
8304					Ok(channel.tx_remove_input(msg).into_msg_send_event(counterparty_node_id))
8305				},
8306				ChannelPhase::UnfundedOutboundV2(ref mut channel) => {
8307					Ok(channel.tx_remove_input(msg).into_msg_send_event(counterparty_node_id))
8308				},
8309				_ => Err("tx_remove_input"),
8310			}
8311		})
8312	}
8313
8314	fn internal_tx_remove_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveOutput) -> Result<(), MsgHandleErrInternal> {
8315		self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase<SP>| {
8316			match channel_phase {
8317				ChannelPhase::UnfundedInboundV2(ref mut channel) => {
8318					Ok(channel.tx_remove_output(msg).into_msg_send_event(counterparty_node_id))
8319				},
8320				ChannelPhase::UnfundedOutboundV2(ref mut channel) => {
8321					Ok(channel.tx_remove_output(msg).into_msg_send_event(counterparty_node_id))
8322				},
8323				_ => Err("tx_remove_output"),
8324			}
8325		})
8326	}
8327
8328	fn internal_tx_complete(&self, counterparty_node_id: PublicKey, msg: &msgs::TxComplete) -> Result<(), MsgHandleErrInternal> {
8329		let per_peer_state = self.per_peer_state.read().unwrap();
8330		let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
8331			.ok_or_else(|| {
8332				debug_assert!(false);
8333				MsgHandleErrInternal::send_err_msg_no_close(
8334					format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
8335					msg.channel_id)
8336			})?;
8337		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8338		let peer_state = &mut *peer_state_lock;
8339		match peer_state.channel_by_id.entry(msg.channel_id) {
8340			hash_map::Entry::Occupied(mut chan_phase_entry) => {
8341				let channel_phase = chan_phase_entry.get_mut();
8342				let (msg_send_event_opt, signing_session_opt) = match channel_phase {
8343					ChannelPhase::UnfundedInboundV2(channel) => channel.tx_complete(msg)
8344						.into_msg_send_event_or_signing_session(counterparty_node_id),
8345					ChannelPhase::UnfundedOutboundV2(channel) => channel.tx_complete(msg)
8346						.into_msg_send_event_or_signing_session(counterparty_node_id),
8347					_ => try_chan_phase_entry!(self, peer_state, Err(ChannelError::Close(
8348						(
8349							"Got a tx_complete message with no interactive transaction construction expected or in-progress".into(),
8350							ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) },
8351						))), chan_phase_entry)
8352				};
8353				if let Some(msg_send_event) = msg_send_event_opt {
8354					peer_state.pending_msg_events.push(msg_send_event);
8355				};
8356				if let Some(mut signing_session) = signing_session_opt {
8357					let (commitment_signed, funding_ready_for_sig_event_opt) = match chan_phase_entry.get_mut() {
8358						ChannelPhase::UnfundedOutboundV2(chan) => {
8359							chan.funding_tx_constructed(&mut signing_session, &self.logger)
8360						},
8361						ChannelPhase::UnfundedInboundV2(chan) => {
8362							chan.funding_tx_constructed(&mut signing_session, &self.logger)
8363						},
8364						_ => Err(ChannelError::Warn(
8365							"Got a tx_complete message with no interactive transaction construction expected or in-progress"
8366							.into())),
8367					}.map_err(|err| MsgHandleErrInternal::send_err_msg_no_close(format!("{}", err), msg.channel_id))?;
8368					let (channel_id, channel_phase) = chan_phase_entry.remove_entry();
8369					let channel = match channel_phase {
8370						ChannelPhase::UnfundedOutboundV2(chan) => chan.into_channel(signing_session),
8371						ChannelPhase::UnfundedInboundV2(chan) => chan.into_channel(signing_session),
8372						_ => {
8373							debug_assert!(false); // It cannot be another variant as we are in the `Ok` branch of the above match.
8374							Err(ChannelError::Warn(
8375								"Got a tx_complete message with no interactive transaction construction expected or in-progress"
8376									.into()))
8377						},
8378					}.map_err(|err| MsgHandleErrInternal::send_err_msg_no_close(format!("{}", err), msg.channel_id))?;
8379					peer_state.channel_by_id.insert(channel_id, ChannelPhase::Funded(channel));
8380					if let Some(funding_ready_for_sig_event) = funding_ready_for_sig_event_opt {
8381						let mut pending_events = self.pending_events.lock().unwrap();
8382						pending_events.push_back((funding_ready_for_sig_event, None));
8383					}
8384					peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
8385						node_id: counterparty_node_id,
8386						updates: CommitmentUpdate {
8387							commitment_signed,
8388							update_add_htlcs: vec![],
8389							update_fulfill_htlcs: vec![],
8390							update_fail_htlcs: vec![],
8391							update_fail_malformed_htlcs: vec![],
8392							update_fee: None,
8393						},
8394					});
8395				}
8396				Ok(())
8397			},
8398			hash_map::Entry::Vacant(_) => {
8399				Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8400			}
8401		}
8402	}
8403
8404	fn internal_tx_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxSignatures)
8405	-> Result<(), MsgHandleErrInternal> {
8406		let per_peer_state = self.per_peer_state.read().unwrap();
8407		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8408			.ok_or_else(|| {
8409				debug_assert!(false);
8410				MsgHandleErrInternal::send_err_msg_no_close(
8411					format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
8412					msg.channel_id)
8413			})?;
8414		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8415		let peer_state = &mut *peer_state_lock;
8416		match peer_state.channel_by_id.entry(msg.channel_id) {
8417			hash_map::Entry::Occupied(mut chan_phase_entry) => {
8418				let channel_phase = chan_phase_entry.get_mut();
8419				match channel_phase {
8420					ChannelPhase::Funded(chan) => {
8421						let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8422						let (tx_signatures_opt, funding_tx_opt) = try_chan_phase_entry!(self, peer_state, chan.tx_signatures(msg, &&logger), chan_phase_entry);
8423						if let Some(tx_signatures) = tx_signatures_opt {
8424							peer_state.pending_msg_events.push(events::MessageSendEvent::SendTxSignatures {
8425								node_id: *counterparty_node_id,
8426								msg: tx_signatures,
8427							});
8428						}
8429						if let Some(ref funding_tx) = funding_tx_opt {
8430							self.tx_broadcaster.broadcast_transactions(&[funding_tx]);
8431							{
8432								let mut pending_events = self.pending_events.lock().unwrap();
8433								emit_channel_pending_event!(pending_events, chan);
8434							}
8435						}
8436					},
8437					_ => try_chan_phase_entry!(self, peer_state, Err(ChannelError::Close(
8438						(
8439							"Got an unexpected tx_signatures message".into(),
8440							ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) },
8441						))), chan_phase_entry)
8442				}
8443				Ok(())
8444			},
8445			hash_map::Entry::Vacant(_) => {
8446				Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8447			}
8448		}
8449	}
8450
8451	fn internal_tx_abort(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAbort)
8452	-> Result<(), MsgHandleErrInternal> {
8453		let per_peer_state = self.per_peer_state.read().unwrap();
8454		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8455			.ok_or_else(|| {
8456				debug_assert!(false);
8457				MsgHandleErrInternal::send_err_msg_no_close(
8458					format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
8459					msg.channel_id)
8460			})?;
8461		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8462		let peer_state = &mut *peer_state_lock;
8463		match peer_state.channel_by_id.entry(msg.channel_id) {
8464			hash_map::Entry::Occupied(mut chan_phase_entry) => {
8465				let channel_phase = chan_phase_entry.get_mut();
8466				let tx_constructor = match channel_phase {
8467					ChannelPhase::UnfundedInboundV2(chan) => chan.interactive_tx_constructor_mut(),
8468					ChannelPhase::UnfundedOutboundV2(chan) => chan.interactive_tx_constructor_mut(),
8469					ChannelPhase::Funded(_) => {
8470						// TODO(splicing)/TODO(RBF): We'll also be doing interactive tx construction
8471						// for a "ChannelPhase::Funded" when we want to bump the fee on an interactively
8472						// constructed funding tx or during splicing. For now we send an error as we would
8473						// never ack an RBF attempt or a splice for now:
8474						try_chan_phase_entry!(self, peer_state, Err(ChannelError::Warn(
8475							"Got an unexpected tx_abort message: After initial funding transaction is signed, \
8476							splicing and RBF attempts of interactive funding transactions are not supported yet so \
8477							we don't have any negotiation in progress".into(),
8478						)), chan_phase_entry)
8479					}
8480					ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
8481						try_chan_phase_entry!(self, peer_state, Err(ChannelError::Warn(
8482							"Got an unexpected tx_abort message: This is an unfunded channel created with V1 channel \
8483							establishment".into(),
8484						)), chan_phase_entry)
8485					},
8486				};
8487				// This checks for and resets the interactive negotiation state by `take()`ing it from the channel.
8488				// The existence of the `tx_constructor` indicates that we have not moved into the signing
8489				// phase for this interactively constructed transaction and hence we have not exchanged
8490				// `tx_signatures`. Either way, we never close the channel upon receiving a `tx_abort`:
8491				//   https://github.com/lightning/bolts/blob/247e83d/02-peer-protocol.md?plain=1#L574-L576
8492				if tx_constructor.take().is_some() {
8493					let msg = msgs::TxAbort {
8494						channel_id: msg.channel_id,
8495						data: "Acknowledged tx_abort".to_string().into_bytes(),
8496					};
8497					// NOTE: Since at this point we have not sent a `tx_abort` message for this negotiation
8498					// previously (tx_constructor was `Some`), we need to echo back a tx_abort message according
8499					// to the spec:
8500					//   https://github.com/lightning/bolts/blob/247e83d/02-peer-protocol.md?plain=1#L560-L561
8501					// For rationale why we echo back `tx_abort`:
8502					//   https://github.com/lightning/bolts/blob/247e83d/02-peer-protocol.md?plain=1#L578-L580
8503					peer_state.pending_msg_events.push(events::MessageSendEvent::SendTxAbort {
8504						node_id: *counterparty_node_id,
8505						msg,
8506					});
8507				}
8508				Ok(())
8509			},
8510			hash_map::Entry::Vacant(_) => {
8511				Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8512			}
8513		}
8514	}
8515
8516	fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {
8517		// Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
8518		// closing a channel), so any changes are likely to be lost on restart!
8519		let per_peer_state = self.per_peer_state.read().unwrap();
8520		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8521			.ok_or_else(|| {
8522				debug_assert!(false);
8523				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8524			})?;
8525		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8526		let peer_state = &mut *peer_state_lock;
8527		match peer_state.channel_by_id.entry(msg.channel_id) {
8528			hash_map::Entry::Occupied(mut chan_phase_entry) => {
8529				if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8530					let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8531					let announcement_sigs_opt = try_chan_phase_entry!(self, peer_state, chan.channel_ready(&msg, &self.node_signer,
8532						self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &&logger), chan_phase_entry);
8533					if let Some(announcement_sigs) = announcement_sigs_opt {
8534						log_trace!(logger, "Sending announcement_signatures for channel {}", chan.context.channel_id());
8535						peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
8536							node_id: counterparty_node_id.clone(),
8537							msg: announcement_sigs,
8538						});
8539					} else if chan.context.is_usable() {
8540						// If we're sending an announcement_signatures, we'll send the (public)
8541						// channel_update after sending a channel_announcement when we receive our
8542						// counterparty's announcement_signatures. Thus, we only bother to send a
8543						// channel_update here if the channel is not public, i.e. we're not sending an
8544						// announcement_signatures.
8545						log_trace!(logger, "Sending private initial channel_update for our counterparty on channel {}", chan.context.channel_id());
8546						if let Ok(msg) = self.get_channel_update_for_unicast(chan) {
8547							peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
8548								node_id: counterparty_node_id.clone(),
8549								msg,
8550							});
8551						}
8552					}
8553
8554					{
8555						let mut pending_events = self.pending_events.lock().unwrap();
8556						emit_channel_ready_event!(pending_events, chan);
8557					}
8558
8559					Ok(())
8560				} else {
8561					try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8562						"Got a channel_ready message for an unfunded channel!".into())), chan_phase_entry)
8563				}
8564			},
8565			hash_map::Entry::Vacant(_) => {
8566				Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8567			}
8568		}
8569	}
8570
8571	fn internal_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
8572		let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new();
8573		let mut finish_shutdown = None;
8574		{
8575			let per_peer_state = self.per_peer_state.read().unwrap();
8576			let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8577				.ok_or_else(|| {
8578					debug_assert!(false);
8579					MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8580				})?;
8581			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8582			let peer_state = &mut *peer_state_lock;
8583			if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) {
8584				let phase = chan_phase_entry.get_mut();
8585				match phase {
8586					ChannelPhase::Funded(chan) => {
8587						if !chan.received_shutdown() {
8588							let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8589							log_info!(logger, "Received a shutdown message from our counterparty for channel {}{}.",
8590								msg.channel_id,
8591								if chan.sent_shutdown() { " after we initiated shutdown" } else { "" });
8592						}
8593
8594						let funding_txo_opt = chan.context.get_funding_txo();
8595						let (shutdown, monitor_update_opt, htlcs) = try_chan_phase_entry!(self, peer_state,
8596							chan.shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_phase_entry);
8597						dropped_htlcs = htlcs;
8598
8599						if let Some(msg) = shutdown {
8600							// We can send the `shutdown` message before updating the `ChannelMonitor`
8601							// here as we don't need the monitor update to complete until we send a
8602							// `shutdown_signed`, which we'll delay if we're pending a monitor update.
8603							peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
8604								node_id: *counterparty_node_id,
8605								msg,
8606							});
8607						}
8608						// Update the monitor with the shutdown script if necessary.
8609						if let Some(monitor_update) = monitor_update_opt {
8610							handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
8611								peer_state_lock, peer_state, per_peer_state, chan);
8612						}
8613					},
8614					ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) |
8615					ChannelPhase::UnfundedInboundV2(_) | ChannelPhase::UnfundedOutboundV2(_) => {
8616						let context = phase.context_mut();
8617						let logger = WithChannelContext::from(&self.logger, context, None);
8618						log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
8619						let mut close_res = phase.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
8620						remove_channel_phase!(self, peer_state, chan_phase_entry, close_res);
8621						finish_shutdown = Some(close_res);
8622					},
8623				}
8624			} else {
8625				return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8626			}
8627		}
8628		for htlc_source in dropped_htlcs.drain(..) {
8629			let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id };
8630			let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
8631			self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
8632		}
8633		if let Some(shutdown_res) = finish_shutdown {
8634			self.finish_close_channel(shutdown_res);
8635		}
8636
8637		Ok(())
8638	}
8639
8640	fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
8641		let per_peer_state = self.per_peer_state.read().unwrap();
8642		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8643			.ok_or_else(|| {
8644				debug_assert!(false);
8645				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8646			})?;
8647		let (tx, chan_option, shutdown_result) = {
8648			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8649			let peer_state = &mut *peer_state_lock;
8650			match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
8651				hash_map::Entry::Occupied(mut chan_phase_entry) => {
8652					if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8653						let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8654						let (closing_signed, tx, shutdown_result) = try_chan_phase_entry!(self, peer_state, chan.closing_signed(&self.fee_estimator, &msg, &&logger), chan_phase_entry);
8655						debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown());
8656						if let Some(msg) = closing_signed {
8657							peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
8658								node_id: counterparty_node_id.clone(),
8659								msg,
8660							});
8661						}
8662						if let Some(mut close_res) = shutdown_result {
8663							// We're done with this channel, we've got a signed closing transaction and
8664							// will send the closing_signed back to the remote peer upon return. This
8665							// also implies there are no pending HTLCs left on the channel, so we can
8666							// fully delete it from tracking (the channel monitor is still around to
8667							// watch for old state broadcasts)!
8668							debug_assert!(tx.is_some());
8669							let channel_phase = remove_channel_phase!(self, peer_state, chan_phase_entry, close_res);
8670							(tx, Some(channel_phase), Some(close_res))
8671						} else {
8672							debug_assert!(tx.is_none());
8673							(tx, None, None)
8674						}
8675					} else {
8676						return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8677							"Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry);
8678					}
8679				},
8680				hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8681			}
8682		};
8683		if let Some(broadcast_tx) = tx {
8684			let channel_id = chan_option.as_ref().map(|channel| channel.context().channel_id());
8685			log_info!(WithContext::from(&self.logger, Some(*counterparty_node_id), channel_id, None), "Broadcasting {}", log_tx!(broadcast_tx));
8686			self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
8687		}
8688		if let Some(ChannelPhase::Funded(chan)) = chan_option {
8689			if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
8690				let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
8691				pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
8692					msg: update
8693				});
8694			}
8695		}
8696		mem::drop(per_peer_state);
8697		if let Some(shutdown_result) = shutdown_result {
8698			self.finish_close_channel(shutdown_result);
8699		}
8700		Ok(())
8701	}
8702
8703	fn internal_update_add_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), MsgHandleErrInternal> {
8704		//TODO: BOLT 4 points out a specific attack where a peer may re-send an onion packet and
8705		//determine the state of the payment based on our response/if we forward anything/the time
8706		//we take to respond. We should take care to avoid allowing such an attack.
8707		//
8708		//TODO: There exists a further attack where a node may garble the onion data, forward it to
8709		//us repeatedly garbled in different ways, and compare our error messages, which are
8710		//encrypted with the same key. It's not immediately obvious how to usefully exploit that,
8711		//but we should prevent it anyway.
8712
8713		// Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
8714		// closing a channel), so any changes are likely to be lost on restart!
8715
8716		let decoded_hop_res = self.decode_update_add_htlc_onion(msg, counterparty_node_id);
8717		let per_peer_state = self.per_peer_state.read().unwrap();
8718		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8719			.ok_or_else(|| {
8720				debug_assert!(false);
8721				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8722			})?;
8723		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8724		let peer_state = &mut *peer_state_lock;
8725		match peer_state.channel_by_id.entry(msg.channel_id) {
8726			hash_map::Entry::Occupied(mut chan_phase_entry) => {
8727				if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8728					let mut pending_forward_info = match decoded_hop_res {
8729						Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
8730							self.construct_pending_htlc_status(
8731								msg, counterparty_node_id, shared_secret, next_hop,
8732								chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt,
8733							),
8734						Err(e) => PendingHTLCStatus::Fail(e)
8735					};
8736					let logger = WithChannelContext::from(&self.logger, &chan.context, Some(msg.payment_hash));
8737					// If the update_add is completely bogus, the call will Err and we will close,
8738					// but if we've sent a shutdown and they haven't acknowledged it yet, we just
8739					// want to reject the new HTLC and fail it backwards instead of forwarding.
8740					if let Err((_, error_code)) = chan.can_accept_incoming_htlc(&msg, &self.fee_estimator, &logger) {
8741						if msg.blinding_point.is_some() {
8742							pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
8743								msgs::UpdateFailMalformedHTLC {
8744									channel_id: msg.channel_id,
8745									htlc_id: msg.htlc_id,
8746									sha256_of_onion: [0; 32],
8747									failure_code: INVALID_ONION_BLINDING,
8748								}
8749							))
8750						} else {
8751							match pending_forward_info {
8752								PendingHTLCStatus::Forward(PendingHTLCInfo {
8753									ref incoming_shared_secret, ref routing, ..
8754								}) => {
8755									let reason = if routing.blinded_failure().is_some() {
8756										HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
8757									} else if (error_code & 0x1000) != 0 {
8758										let error_data = self.get_htlc_inbound_temp_fail_data(error_code);
8759										HTLCFailReason::reason(error_code, error_data)
8760									} else {
8761										HTLCFailReason::from_failure_code(error_code)
8762									}.get_encrypted_failure_packet(incoming_shared_secret, &None);
8763									let msg = msgs::UpdateFailHTLC {
8764										channel_id: msg.channel_id,
8765										htlc_id: msg.htlc_id,
8766										reason
8767									};
8768									pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg));
8769								},
8770								_ => {},
8771							}
8772						}
8773					}
8774					try_chan_phase_entry!(self, peer_state, chan.update_add_htlc(&msg, pending_forward_info, &self.fee_estimator), chan_phase_entry);
8775				} else {
8776					return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8777						"Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry);
8778				}
8779			},
8780			hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8781		}
8782		Ok(())
8783	}
8784
8785	fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
8786		let funding_txo;
8787		let next_user_channel_id;
8788		let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = {
8789			let per_peer_state = self.per_peer_state.read().unwrap();
8790			let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8791				.ok_or_else(|| {
8792					debug_assert!(false);
8793					MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8794				})?;
8795			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8796			let peer_state = &mut *peer_state_lock;
8797			match peer_state.channel_by_id.entry(msg.channel_id) {
8798				hash_map::Entry::Occupied(mut chan_phase_entry) => {
8799					if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8800						let res = try_chan_phase_entry!(self, peer_state, chan.update_fulfill_htlc(&msg), chan_phase_entry);
8801						if let HTLCSource::PreviousHopData(prev_hop) = &res.0 {
8802							let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8803							log_trace!(logger,
8804								"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
8805								msg.channel_id);
8806							peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id)
8807								.or_insert_with(Vec::new)
8808								.push(RAAMonitorUpdateBlockingAction::from_prev_hop_data(&prev_hop));
8809						}
8810						// Note that we do not need to push an `actions_blocking_raa_monitor_updates`
8811						// entry here, even though we *do* need to block the next RAA monitor update.
8812						// We do this instead in the `claim_funds_internal` by attaching a
8813						// `ReleaseRAAChannelMonitorUpdate` action to the event generated when the
8814						// outbound HTLC is claimed. This is guaranteed to all complete before we
8815						// process the RAA as messages are processed from single peers serially.
8816						funding_txo = chan.context.get_funding_txo().expect("We won't accept a fulfill until funded");
8817						next_user_channel_id = chan.context.get_user_id();
8818						res
8819					} else {
8820						return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8821							"Got an update_fulfill_htlc message for an unfunded channel!".into())), chan_phase_entry);
8822					}
8823				},
8824				hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8825			}
8826		};
8827		self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(),
8828			Some(forwarded_htlc_value), skimmed_fee_msat, false, false, Some(*counterparty_node_id),
8829			funding_txo, msg.channel_id, Some(next_user_channel_id),
8830		);
8831
8832		Ok(())
8833	}
8834
8835	fn internal_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
8836		// Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
8837		// closing a channel), so any changes are likely to be lost on restart!
8838		let per_peer_state = self.per_peer_state.read().unwrap();
8839		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8840			.ok_or_else(|| {
8841				debug_assert!(false);
8842				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8843			})?;
8844		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8845		let peer_state = &mut *peer_state_lock;
8846		match peer_state.channel_by_id.entry(msg.channel_id) {
8847			hash_map::Entry::Occupied(mut chan_phase_entry) => {
8848				if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8849					try_chan_phase_entry!(self, peer_state, chan.update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan_phase_entry);
8850				} else {
8851					return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8852						"Got an update_fail_htlc message for an unfunded channel!".into())), chan_phase_entry);
8853				}
8854			},
8855			hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8856		}
8857		Ok(())
8858	}
8859
8860	fn internal_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
8861		// Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
8862		// closing a channel), so any changes are likely to be lost on restart!
8863		let per_peer_state = self.per_peer_state.read().unwrap();
8864		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8865			.ok_or_else(|| {
8866				debug_assert!(false);
8867				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8868			})?;
8869		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8870		let peer_state = &mut *peer_state_lock;
8871		match peer_state.channel_by_id.entry(msg.channel_id) {
8872			hash_map::Entry::Occupied(mut chan_phase_entry) => {
8873				if (msg.failure_code & 0x8000) == 0 {
8874					let chan_err = ChannelError::close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
8875					try_chan_phase_entry!(self, peer_state, Err(chan_err), chan_phase_entry);
8876				}
8877				if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8878					try_chan_phase_entry!(self, peer_state, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan_phase_entry);
8879				} else {
8880					return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8881						"Got an update_fail_malformed_htlc message for an unfunded channel!".into())), chan_phase_entry);
8882				}
8883				Ok(())
8884			},
8885			hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8886		}
8887	}
8888
8889	fn internal_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> {
8890		let best_block = *self.best_block.read().unwrap();
8891		let per_peer_state = self.per_peer_state.read().unwrap();
8892		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8893			.ok_or_else(|| {
8894				debug_assert!(false);
8895				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8896			})?;
8897		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8898		let peer_state = &mut *peer_state_lock;
8899		match peer_state.channel_by_id.entry(msg.channel_id) {
8900			hash_map::Entry::Occupied(mut chan_phase_entry) => {
8901				if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8902					let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8903					let funding_txo = chan.context.get_funding_txo();
8904
8905					if chan.interactive_tx_signing_session.is_some() {
8906						let monitor = try_chan_phase_entry!(
8907							self, peer_state, chan.commitment_signed_initial_v2(msg, best_block, &self.signer_provider, &&logger),
8908							chan_phase_entry);
8909						let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
8910						if let Ok(persist_state) = monitor_res {
8911							handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
8912								per_peer_state, chan, INITIAL_MONITOR);
8913						} else {
8914							let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8915							log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
8916							try_chan_phase_entry!(self, peer_state, Err(ChannelError::Close(
8917								(
8918									"Channel funding outpoint was a duplicate".to_owned(),
8919									ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) },
8920								)
8921							)), chan_phase_entry)
8922						}
8923					} else {
8924						let monitor_update_opt = try_chan_phase_entry!(
8925							self, peer_state, chan.commitment_signed(msg, &&logger), chan_phase_entry);
8926						if let Some(monitor_update) = monitor_update_opt {
8927							handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
8928								peer_state, per_peer_state, chan);
8929						}
8930					}
8931					Ok(())
8932				} else {
8933					return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8934						"Got a commitment_signed message for an unfunded channel!".into())), chan_phase_entry);
8935				}
8936			},
8937			hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8938		}
8939	}
8940
8941	fn push_decode_update_add_htlcs(&self, mut update_add_htlcs: (u64, Vec<msgs::UpdateAddHTLC>)) {
8942		let mut push_forward_event = self.forward_htlcs.lock().unwrap().is_empty();
8943		let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
8944		push_forward_event &= decode_update_add_htlcs.is_empty();
8945		let scid = update_add_htlcs.0;
8946		match decode_update_add_htlcs.entry(scid) {
8947			hash_map::Entry::Occupied(mut e) => { e.get_mut().append(&mut update_add_htlcs.1); },
8948			hash_map::Entry::Vacant(e) => { e.insert(update_add_htlcs.1); },
8949		}
8950		if push_forward_event { self.push_pending_forwards_ev(); }
8951	}
8952
8953	#[inline]
8954	fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Option<PublicKey>, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) {
8955		let push_forward_event = self.forward_htlcs_without_forward_event(per_source_pending_forwards);
8956		if push_forward_event { self.push_pending_forwards_ev() }
8957	}
8958
8959	#[inline]
8960	fn forward_htlcs_without_forward_event(&self, per_source_pending_forwards: &mut [(u64, Option<PublicKey>, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) -> bool {
8961		let mut push_forward_event = false;
8962		for &mut (prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
8963			let mut new_intercept_events = VecDeque::new();
8964			let mut failed_intercept_forwards = Vec::new();
8965			if !pending_forwards.is_empty() {
8966				for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
8967					let scid = match forward_info.routing {
8968						PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
8969						PendingHTLCRouting::Receive { .. } => 0,
8970						PendingHTLCRouting::ReceiveKeysend { .. } => 0,
8971					};
8972					// Pull this now to avoid introducing a lock order with `forward_htlcs`.
8973					let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid);
8974
8975					let decode_update_add_htlcs_empty = self.decode_update_add_htlcs.lock().unwrap().is_empty();
8976					let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
8977					let forward_htlcs_empty = forward_htlcs.is_empty();
8978					match forward_htlcs.entry(scid) {
8979						hash_map::Entry::Occupied(mut entry) => {
8980							entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
8981								prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint,
8982								prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info
8983							}));
8984						},
8985						hash_map::Entry::Vacant(entry) => {
8986							if !is_our_scid && forward_info.incoming_amt_msat.is_some() &&
8987							   fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, scid, &self.chain_hash)
8988							{
8989								let intercept_id = InterceptId(Sha256::hash(&forward_info.incoming_shared_secret).to_byte_array());
8990								let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
8991								match pending_intercepts.entry(intercept_id) {
8992									hash_map::Entry::Vacant(entry) => {
8993										new_intercept_events.push_back((events::Event::HTLCIntercepted {
8994											requested_next_hop_scid: scid,
8995											payment_hash: forward_info.payment_hash,
8996											inbound_amount_msat: forward_info.incoming_amt_msat.unwrap(),
8997											expected_outbound_amount_msat: forward_info.outgoing_amt_msat,
8998											intercept_id
8999										}, None));
9000										entry.insert(PendingAddHTLCInfo {
9001											prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint,
9002											prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info
9003										});
9004									},
9005									hash_map::Entry::Occupied(_) => {
9006										let logger = WithContext::from(&self.logger, None, Some(prev_channel_id), Some(forward_info.payment_hash));
9007										log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
9008										let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
9009											short_channel_id: prev_short_channel_id,
9010											user_channel_id: Some(prev_user_channel_id),
9011											counterparty_node_id: prev_counterparty_node_id,
9012											outpoint: prev_funding_outpoint,
9013											channel_id: prev_channel_id,
9014											htlc_id: prev_htlc_id,
9015											incoming_packet_shared_secret: forward_info.incoming_shared_secret,
9016											phantom_shared_secret: None,
9017											blinded_failure: forward_info.routing.blinded_failure(),
9018											cltv_expiry: forward_info.routing.incoming_cltv_expiry(),
9019										});
9020
9021										failed_intercept_forwards.push((htlc_source, forward_info.payment_hash,
9022												HTLCFailReason::from_failure_code(0x4000 | 10),
9023												HTLCDestination::InvalidForward { requested_forward_scid: scid },
9024										));
9025									}
9026								}
9027							} else {
9028								// We don't want to generate a PendingHTLCsForwardable event if only intercepted
9029								// payments are being processed.
9030								push_forward_event |= forward_htlcs_empty && decode_update_add_htlcs_empty;
9031								entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
9032									prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint,
9033									prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info
9034								})));
9035							}
9036						}
9037					}
9038				}
9039			}
9040
9041			for (htlc_source, payment_hash, failure_reason, destination) in failed_intercept_forwards.drain(..) {
9042				push_forward_event |= self.fail_htlc_backwards_internal_without_forward_event(&htlc_source, &payment_hash, &failure_reason, destination);
9043			}
9044
9045			if !new_intercept_events.is_empty() {
9046				let mut events = self.pending_events.lock().unwrap();
9047				events.append(&mut new_intercept_events);
9048			}
9049		}
9050		push_forward_event
9051	}
9052
9053	fn push_pending_forwards_ev(&self) {
9054		let mut pending_events = self.pending_events.lock().unwrap();
9055		let is_processing_events = self.pending_events_processor.load(Ordering::Acquire);
9056		let num_forward_events = pending_events.iter().filter(|(ev, _)|
9057			if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false }
9058		).count();
9059		// We only want to push a PendingHTLCsForwardable event if no others are queued. Processing
9060		// events is done in batches and they are not removed until we're done processing each
9061		// batch. Since handling a `PendingHTLCsForwardable` event will call back into the
9062		// `ChannelManager`, we'll still see the original forwarding event not removed. Phantom
9063		// payments will need an additional forwarding event before being claimed to make them look
9064		// real by taking more time.
9065		if (is_processing_events && num_forward_events <= 1) || num_forward_events < 1 {
9066			pending_events.push_back((Event::PendingHTLCsForwardable {
9067				time_forwardable: Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS),
9068			}, None));
9069		}
9070	}
9071
9072	/// Checks whether [`ChannelMonitorUpdate`]s generated by the receipt of a remote
9073	/// [`msgs::RevokeAndACK`] should be held for the given channel until some other action
9074	/// completes. Note that this needs to happen in the same [`PeerState`] mutex as any release of
9075	/// the [`ChannelMonitorUpdate`] in question.
9076	fn raa_monitor_updates_held(&self,
9077		actions_blocking_raa_monitor_updates: &BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
9078		channel_funding_outpoint: OutPoint, channel_id: ChannelId, counterparty_node_id: PublicKey
9079	) -> bool {
9080		actions_blocking_raa_monitor_updates
9081			.get(&channel_id).map(|v| !v.is_empty()).unwrap_or(false)
9082		|| self.pending_events.lock().unwrap().iter().any(|(_, action)| {
9083			action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
9084				channel_funding_outpoint,
9085				channel_id,
9086				counterparty_node_id,
9087			})
9088		})
9089	}
9090
9091	#[cfg(any(test, feature = "_test_utils"))]
9092	pub(crate) fn test_raa_monitor_updates_held(&self,
9093		counterparty_node_id: PublicKey, channel_id: ChannelId
9094	) -> bool {
9095		let per_peer_state = self.per_peer_state.read().unwrap();
9096		if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
9097			let mut peer_state_lck = peer_state_mtx.lock().unwrap();
9098			let peer_state = &mut *peer_state_lck;
9099
9100			if let Some(chan) = peer_state.channel_by_id.get(&channel_id) {
9101				return self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
9102					chan.context().get_funding_txo().unwrap(), channel_id, counterparty_node_id);
9103			}
9104		}
9105		false
9106	}
9107
9108	fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
9109		let htlcs_to_fail = {
9110			let per_peer_state = self.per_peer_state.read().unwrap();
9111			let mut peer_state_lock = per_peer_state.get(counterparty_node_id)
9112				.ok_or_else(|| {
9113					debug_assert!(false);
9114					MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
9115				}).map(|mtx| mtx.lock().unwrap())?;
9116			let peer_state = &mut *peer_state_lock;
9117			match peer_state.channel_by_id.entry(msg.channel_id) {
9118				hash_map::Entry::Occupied(mut chan_phase_entry) => {
9119					if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
9120						let logger = WithChannelContext::from(&self.logger, &chan.context, None);
9121						let funding_txo_opt = chan.context.get_funding_txo();
9122						let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
9123							self.raa_monitor_updates_held(
9124								&peer_state.actions_blocking_raa_monitor_updates, funding_txo, msg.channel_id,
9125								*counterparty_node_id)
9126						} else { false };
9127						let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self, peer_state,
9128							chan.revoke_and_ack(&msg, &self.fee_estimator, &&logger, mon_update_blocked), chan_phase_entry);
9129						if let Some(monitor_update) = monitor_update_opt {
9130							let funding_txo = funding_txo_opt
9131								.expect("Funding outpoint must have been set for RAA handling to succeed");
9132							handle_new_monitor_update!(self, funding_txo, monitor_update,
9133								peer_state_lock, peer_state, per_peer_state, chan);
9134						}
9135						htlcs_to_fail
9136					} else {
9137						return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
9138							"Got a revoke_and_ack message for an unfunded channel!".into())), chan_phase_entry);
9139					}
9140				},
9141				hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
9142			}
9143		};
9144		self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id);
9145		Ok(())
9146	}
9147
9148	fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
9149		let per_peer_state = self.per_peer_state.read().unwrap();
9150		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
9151			.ok_or_else(|| {
9152				debug_assert!(false);
9153				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
9154			})?;
9155		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9156		let peer_state = &mut *peer_state_lock;
9157		match peer_state.channel_by_id.entry(msg.channel_id) {
9158			hash_map::Entry::Occupied(mut chan_phase_entry) => {
9159				if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
9160					let logger = WithChannelContext::from(&self.logger, &chan.context, None);
9161					try_chan_phase_entry!(self, peer_state, chan.update_fee(&self.fee_estimator, &msg, &&logger), chan_phase_entry);
9162				} else {
9163					return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
9164						"Got an update_fee message for an unfunded channel!".into())), chan_phase_entry);
9165				}
9166			},
9167			hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
9168		}
9169		Ok(())
9170	}
9171
9172	fn internal_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> {
9173		let per_peer_state = self.per_peer_state.read().unwrap();
9174		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
9175			.ok_or_else(|| {
9176				debug_assert!(false);
9177				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
9178			})?;
9179		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9180		let peer_state = &mut *peer_state_lock;
9181		match peer_state.channel_by_id.entry(msg.channel_id) {
9182			hash_map::Entry::Occupied(mut chan_phase_entry) => {
9183				if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
9184					if !chan.context.is_usable() {
9185						return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
9186					}
9187
9188					peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
9189						msg: try_chan_phase_entry!(self, peer_state, chan.announcement_signatures(
9190							&self.node_signer, self.chain_hash, self.best_block.read().unwrap().height,
9191							msg, &self.default_configuration
9192						), chan_phase_entry),
9193						// Note that announcement_signatures fails if the channel cannot be announced,
9194						// so get_channel_update_for_broadcast will never fail by the time we get here.
9195						update_msg: Some(self.get_channel_update_for_broadcast(chan).unwrap()),
9196					});
9197				} else {
9198					return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
9199						"Got an announcement_signatures message for an unfunded channel!".into())), chan_phase_entry);
9200				}
9201			},
9202			hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
9203		}
9204		Ok(())
9205	}
9206
9207	/// Returns DoPersist if anything changed, otherwise either SkipPersistNoEvents or an Err.
9208	fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
9209		let (chan_counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&msg.contents.short_channel_id) {
9210			Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
9211			None => {
9212				// It's not a local channel
9213				return Ok(NotifyOption::SkipPersistNoEvents)
9214			}
9215		};
9216		let per_peer_state = self.per_peer_state.read().unwrap();
9217		let peer_state_mutex_opt = per_peer_state.get(&chan_counterparty_node_id);
9218		if peer_state_mutex_opt.is_none() {
9219			return Ok(NotifyOption::SkipPersistNoEvents)
9220		}
9221		let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
9222		let peer_state = &mut *peer_state_lock;
9223		match peer_state.channel_by_id.entry(chan_id) {
9224			hash_map::Entry::Occupied(mut chan_phase_entry) => {
9225				if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
9226					if chan.context.get_counterparty_node_id() != *counterparty_node_id {
9227						if chan.context.should_announce() {
9228							// If the announcement is about a channel of ours which is public, some
9229							// other peer may simply be forwarding all its gossip to us. Don't provide
9230							// a scary-looking error message and return Ok instead.
9231							return Ok(NotifyOption::SkipPersistNoEvents);
9232						}
9233						return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
9234					}
9235					let were_node_one = self.get_our_node_id().serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
9236					let msg_from_node_one = msg.contents.channel_flags & 1 == 0;
9237					if were_node_one == msg_from_node_one {
9238						return Ok(NotifyOption::SkipPersistNoEvents);
9239					} else {
9240						let logger = WithChannelContext::from(&self.logger, &chan.context, None);
9241						log_debug!(logger, "Received channel_update {:?} for channel {}.", msg, chan_id);
9242						let did_change = try_chan_phase_entry!(self, peer_state, chan.channel_update(&msg), chan_phase_entry);
9243						// If nothing changed after applying their update, we don't need to bother
9244						// persisting.
9245						if !did_change {
9246							return Ok(NotifyOption::SkipPersistNoEvents);
9247						}
9248					}
9249				} else {
9250					return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
9251						"Got a channel_update for an unfunded channel!".into())), chan_phase_entry);
9252				}
9253			},
9254			hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersistNoEvents)
9255		}
9256		Ok(NotifyOption::DoPersist)
9257	}
9258
9259	fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<NotifyOption, MsgHandleErrInternal> {
9260		let need_lnd_workaround = {
9261			let per_peer_state = self.per_peer_state.read().unwrap();
9262
9263			let peer_state_mutex = per_peer_state.get(counterparty_node_id)
9264				.ok_or_else(|| {
9265					debug_assert!(false);
9266					MsgHandleErrInternal::send_err_msg_no_close(
9267						format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
9268						msg.channel_id
9269					)
9270				})?;
9271			let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), None);
9272			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9273			let peer_state = &mut *peer_state_lock;
9274			match peer_state.channel_by_id.entry(msg.channel_id) {
9275				hash_map::Entry::Occupied(mut chan_phase_entry) => {
9276					if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
9277						// Currently, we expect all holding cell update_adds to be dropped on peer
9278						// disconnect, so Channel's reestablish will never hand us any holding cell
9279						// freed HTLCs to fail backwards. If in the future we no longer drop pending
9280						// add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
9281						let responses = try_chan_phase_entry!(self, peer_state, chan.channel_reestablish(
9282							msg, &&logger, &self.node_signer, self.chain_hash,
9283							&self.default_configuration, &*self.best_block.read().unwrap()), chan_phase_entry);
9284						let mut channel_update = None;
9285						if let Some(msg) = responses.shutdown_msg {
9286							peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
9287								node_id: counterparty_node_id.clone(),
9288								msg,
9289							});
9290						} else if chan.context.is_usable() {
9291							// If the channel is in a usable state (ie the channel is not being shut
9292							// down), send a unicast channel_update to our counterparty to make sure
9293							// they have the latest channel parameters.
9294							if let Ok(msg) = self.get_channel_update_for_unicast(chan) {
9295								channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
9296									node_id: chan.context.get_counterparty_node_id(),
9297									msg,
9298								});
9299							}
9300						}
9301						let need_lnd_workaround = chan.context.workaround_lnd_bug_4006.take();
9302						let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption(
9303							&mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.order,
9304							Vec::new(), Vec::new(), None, responses.channel_ready, responses.announcement_sigs, None);
9305						debug_assert!(htlc_forwards.is_none());
9306						debug_assert!(decode_update_add_htlcs.is_none());
9307						if let Some(upd) = channel_update {
9308							peer_state.pending_msg_events.push(upd);
9309						}
9310						need_lnd_workaround
9311					} else {
9312						return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
9313							"Got a channel_reestablish message for an unfunded channel!".into())), chan_phase_entry);
9314					}
9315				},
9316				hash_map::Entry::Vacant(_) => {
9317					log_debug!(logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure",
9318						msg.channel_id);
9319					// Unfortunately, lnd doesn't force close on errors
9320					// (https://github.com/lightningnetwork/lnd/blob/abb1e3463f3a83bbb843d5c399869dbe930ad94f/htlcswitch/link.go#L2119).
9321					// One of the few ways to get an lnd counterparty to force close is by
9322					// replicating what they do when restoring static channel backups (SCBs). They
9323					// send an invalid `ChannelReestablish` with `0` commitment numbers and an
9324					// invalid `your_last_per_commitment_secret`.
9325					//
9326					// Since we received a `ChannelReestablish` for a channel that doesn't exist, we
9327					// can assume it's likely the channel closed from our point of view, but it
9328					// remains open on the counterparty's side. By sending this bogus
9329					// `ChannelReestablish` message now as a response to theirs, we trigger them to
9330					// force close broadcasting their latest state. If the closing transaction from
9331					// our point of view remains unconfirmed, it'll enter a race with the
9332					// counterparty's to-be-broadcast latest commitment transaction.
9333					peer_state.pending_msg_events.push(MessageSendEvent::SendChannelReestablish {
9334						node_id: *counterparty_node_id,
9335						msg: msgs::ChannelReestablish {
9336							channel_id: msg.channel_id,
9337							next_local_commitment_number: 0,
9338							next_remote_commitment_number: 0,
9339							your_last_per_commitment_secret: [1u8; 32],
9340							my_current_per_commitment_point: PublicKey::from_slice(&[2u8; 33]).unwrap(),
9341							next_funding_txid: None,
9342						},
9343					});
9344					return Err(MsgHandleErrInternal::send_err_msg_no_close(
9345						format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}",
9346							counterparty_node_id), msg.channel_id)
9347					)
9348				}
9349			}
9350		};
9351
9352		if let Some(channel_ready_msg) = need_lnd_workaround {
9353			self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
9354		}
9355		Ok(NotifyOption::SkipPersistHandleEvents)
9356	}
9357
9358	/// Process pending events from the [`chain::Watch`], returning whether any events were processed.
9359	fn process_pending_monitor_events(&self) -> bool {
9360		debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
9361
9362		let mut failed_channels = Vec::new();
9363		let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
9364		let has_pending_monitor_events = !pending_monitor_events.is_empty();
9365		for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
9366			for monitor_event in monitor_events.drain(..) {
9367				match monitor_event {
9368					MonitorEvent::HTLCEvent(htlc_update) => {
9369						let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id), Some(htlc_update.payment_hash));
9370						if let Some(preimage) = htlc_update.payment_preimage {
9371							log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
9372							self.claim_funds_internal(htlc_update.source, preimage,
9373								htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
9374								false, counterparty_node_id, funding_outpoint, channel_id, None);
9375						} else {
9376							log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
9377							let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
9378							let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
9379							self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
9380						}
9381					},
9382					MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => {
9383						let counterparty_node_id_opt = match counterparty_node_id {
9384							Some(cp_id) => Some(cp_id),
9385							None => {
9386								// TODO: Once we can rely on the counterparty_node_id from the
9387								// monitor event, this and the outpoint_to_peer map should be removed.
9388								let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
9389								outpoint_to_peer.get(&funding_outpoint).cloned()
9390							}
9391						};
9392						if let Some(counterparty_node_id) = counterparty_node_id_opt {
9393							let per_peer_state = self.per_peer_state.read().unwrap();
9394							if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
9395								let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9396								let peer_state = &mut *peer_state_lock;
9397								let pending_msg_events = &mut peer_state.pending_msg_events;
9398								if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) {
9399									let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
9400										reason
9401									} else {
9402										ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
9403									};
9404									let mut shutdown_res = chan_phase_entry.get_mut().context_mut().force_shutdown(false, reason.clone());
9405									let chan_phase = remove_channel_phase!(self, peer_state, chan_phase_entry, shutdown_res);
9406									failed_channels.push(shutdown_res);
9407									if let ChannelPhase::Funded(chan) = chan_phase {
9408										if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
9409											let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9410											pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
9411												msg: update
9412											});
9413										}
9414										pending_msg_events.push(events::MessageSendEvent::HandleError {
9415											node_id: chan.context.get_counterparty_node_id(),
9416											action: msgs::ErrorAction::DisconnectPeer {
9417												msg: Some(msgs::ErrorMessage {
9418													channel_id: chan.context.channel_id(),
9419													data: reason.to_string()
9420												})
9421											},
9422										});
9423									}
9424								}
9425							}
9426						}
9427					},
9428					MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => {
9429						self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref());
9430					},
9431				}
9432			}
9433		}
9434
9435		for failure in failed_channels.drain(..) {
9436			self.finish_close_channel(failure);
9437		}
9438
9439		has_pending_monitor_events
9440	}
9441
9442	/// In chanmon_consistency_target, we'd like to be able to restore monitor updating without
9443	/// handling all pending events (i.e. not PendingHTLCsForwardable). Thus, we expose monitor
9444	/// update events as a separate process method here.
9445	#[cfg(fuzzing)]
9446	pub fn process_monitor_events(&self) {
9447		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
9448		self.process_pending_monitor_events();
9449	}
9450
9451	/// Check the holding cell in each channel and free any pending HTLCs in them if possible.
9452	/// Returns whether there were any updates such as if pending HTLCs were freed or a monitor
9453	/// update was applied.
9454	fn check_free_holding_cells(&self) -> bool {
9455		let mut has_monitor_update = false;
9456		let mut failed_htlcs = Vec::new();
9457
9458		// Walk our list of channels and find any that need to update. Note that when we do find an
9459		// update, if it includes actions that must be taken afterwards, we have to drop the
9460		// per-peer state lock as well as the top level per_peer_state lock. Thus, we loop until we
9461		// manage to go through all our peers without finding a single channel to update.
9462		'peer_loop: loop {
9463			let per_peer_state = self.per_peer_state.read().unwrap();
9464			for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
9465				'chan_loop: loop {
9466					let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9467					let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
9468					for (channel_id, chan) in peer_state.channel_by_id.iter_mut().filter_map(
9469						|(chan_id, phase)| if let ChannelPhase::Funded(chan) = phase { Some((chan_id, chan)) } else { None }
9470					) {
9471						let counterparty_node_id = chan.context.get_counterparty_node_id();
9472						let funding_txo = chan.context.get_funding_txo();
9473						let (monitor_opt, holding_cell_failed_htlcs) =
9474							chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &&WithChannelContext::from(&self.logger, &chan.context, None));
9475						if !holding_cell_failed_htlcs.is_empty() {
9476							failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
9477						}
9478						if let Some(monitor_update) = monitor_opt {
9479							has_monitor_update = true;
9480
9481							handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
9482								peer_state_lock, peer_state, per_peer_state, chan);
9483							continue 'peer_loop;
9484						}
9485					}
9486					break 'chan_loop;
9487				}
9488			}
9489			break 'peer_loop;
9490		}
9491
9492		let has_update = has_monitor_update || !failed_htlcs.is_empty();
9493		for (failures, channel_id, counterparty_node_id) in failed_htlcs.drain(..) {
9494			self.fail_holding_cell_htlcs(failures, channel_id, &counterparty_node_id);
9495		}
9496
9497		has_update
9498	}
9499
9500	/// When a call to a [`ChannelSigner`] method returns an error, this indicates that the signer
9501	/// is (temporarily) unavailable, and the operation should be retried later.
9502	///
9503	/// This method allows for that retry - either checking for any signer-pending messages to be
9504	/// attempted in every channel, or in the specifically provided channel.
9505	///
9506	/// [`ChannelSigner`]: crate::sign::ChannelSigner
9507	pub fn signer_unblocked(&self, channel_opt: Option<(PublicKey, ChannelId)>) {
9508		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
9509
9510		// Returns whether we should remove this channel as it's just been closed.
9511		let unblock_chan = |phase: &mut ChannelPhase<SP>, pending_msg_events: &mut Vec<MessageSendEvent>| -> Option<ShutdownResult> {
9512			let node_id = phase.context().get_counterparty_node_id();
9513			match phase {
9514				ChannelPhase::Funded(chan) => {
9515					let msgs = chan.signer_maybe_unblocked(&self.logger);
9516					let cu_msg = msgs.commitment_update.map(|updates| events::MessageSendEvent::UpdateHTLCs {
9517						node_id,
9518						updates,
9519					});
9520					let raa_msg = msgs.revoke_and_ack.map(|msg| events::MessageSendEvent::SendRevokeAndACK {
9521						node_id,
9522						msg,
9523					});
9524					match (cu_msg, raa_msg) {
9525						(Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::CommitmentFirst => {
9526							pending_msg_events.push(cu);
9527							pending_msg_events.push(raa);
9528						},
9529						(Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::RevokeAndACKFirst => {
9530							pending_msg_events.push(raa);
9531							pending_msg_events.push(cu);
9532						},
9533						(Some(cu), _) => pending_msg_events.push(cu),
9534						(_, Some(raa)) => pending_msg_events.push(raa),
9535						(_, _) => {},
9536					}
9537					if let Some(msg) = msgs.funding_signed {
9538						pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
9539							node_id,
9540							msg,
9541						});
9542					}
9543					if let Some(msg) = msgs.channel_ready {
9544						send_channel_ready!(self, pending_msg_events, chan, msg);
9545					}
9546					if let Some(msg) = msgs.closing_signed {
9547						pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
9548							node_id,
9549							msg,
9550						});
9551					}
9552					if let Some(broadcast_tx) = msgs.signed_closing_tx {
9553						let channel_id = chan.context.channel_id();
9554						let counterparty_node_id = chan.context.get_counterparty_node_id();
9555						let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None);
9556						log_info!(logger, "Broadcasting closing tx {}", log_tx!(broadcast_tx));
9557						self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
9558
9559						if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
9560							pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
9561								msg: update
9562							});
9563						}
9564					}
9565					msgs.shutdown_result
9566				}
9567				ChannelPhase::UnfundedOutboundV1(chan) => {
9568					let (open_channel, funding_created) = chan.signer_maybe_unblocked(self.chain_hash.clone(), &self.logger);
9569					if let Some(msg) = open_channel {
9570						pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
9571							node_id,
9572							msg,
9573						});
9574					}
9575					if let Some(msg) = funding_created {
9576						pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
9577							node_id,
9578							msg,
9579						});
9580					}
9581					None
9582				}
9583				ChannelPhase::UnfundedInboundV1(chan) => {
9584					let logger = WithChannelContext::from(&self.logger, &chan.context, None);
9585					if let Some(msg) = chan.signer_maybe_unblocked(&&logger) {
9586						pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
9587							node_id,
9588							msg,
9589						});
9590					}
9591					None
9592				},
9593				ChannelPhase::UnfundedInboundV2(_) | ChannelPhase::UnfundedOutboundV2(_) => None,
9594			}
9595		};
9596
9597		let mut shutdown_results = Vec::new();
9598		let per_peer_state = self.per_peer_state.read().unwrap();
9599		let per_peer_state_iter = per_peer_state.iter().filter(|(cp_id, _)| {
9600			if let Some((counterparty_node_id, _)) = channel_opt {
9601				**cp_id == counterparty_node_id
9602			} else { true }
9603		});
9604		for (_cp_id, peer_state_mutex) in per_peer_state_iter {
9605			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9606			let peer_state = &mut *peer_state_lock;
9607			peer_state.channel_by_id.retain(|_, chan| {
9608				let shutdown_result = match channel_opt {
9609					Some((_, channel_id)) if chan.context().channel_id() != channel_id => None,
9610					_ => unblock_chan(chan, &mut peer_state.pending_msg_events),
9611				};
9612				if let Some(mut shutdown_result) = shutdown_result {
9613					let context = &chan.context();
9614					let logger = WithChannelContext::from(&self.logger, context, None);
9615					log_trace!(logger, "Removing channel {} now that the signer is unblocked", context.channel_id());
9616					locked_close_channel!(self, peer_state, context, shutdown_result);
9617					shutdown_results.push(shutdown_result);
9618					false
9619				} else {
9620					true
9621				}
9622			});
9623		}
9624		drop(per_peer_state);
9625		for shutdown_result in shutdown_results.drain(..) {
9626			self.finish_close_channel(shutdown_result);
9627		}
9628	}
9629
9630	/// Check whether any channels have finished removing all pending updates after a shutdown
9631	/// exchange and can now send a closing_signed.
9632	/// Returns whether any closing_signed messages were generated.
9633	fn maybe_generate_initial_closing_signed(&self) -> bool {
9634		let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new();
9635		let mut has_update = false;
9636		let mut shutdown_results = Vec::new();
9637		{
9638			let per_peer_state = self.per_peer_state.read().unwrap();
9639
9640			for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
9641				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9642				let peer_state = &mut *peer_state_lock;
9643				let pending_msg_events = &mut peer_state.pending_msg_events;
9644				peer_state.channel_by_id.retain(|channel_id, phase| {
9645					match phase {
9646						ChannelPhase::Funded(chan) => {
9647							let logger = WithChannelContext::from(&self.logger, &chan.context, None);
9648							match chan.maybe_propose_closing_signed(&self.fee_estimator, &&logger) {
9649								Ok((msg_opt, tx_opt, shutdown_result_opt)) => {
9650									if let Some(msg) = msg_opt {
9651										has_update = true;
9652										pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
9653											node_id: chan.context.get_counterparty_node_id(), msg,
9654										});
9655									}
9656									debug_assert_eq!(shutdown_result_opt.is_some(), chan.is_shutdown());
9657									if let Some(mut shutdown_result) = shutdown_result_opt {
9658										locked_close_channel!(self, peer_state, &chan.context, shutdown_result);
9659										shutdown_results.push(shutdown_result);
9660									}
9661									if let Some(tx) = tx_opt {
9662										// We're done with this channel. We got a closing_signed and sent back
9663										// a closing_signed with a closing transaction to broadcast.
9664										if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
9665											let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9666											pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
9667												msg: update
9668											});
9669										}
9670
9671										log_info!(logger, "Broadcasting {}", log_tx!(tx));
9672										self.tx_broadcaster.broadcast_transactions(&[&tx]);
9673										false
9674									} else { true }
9675								},
9676								Err(e) => {
9677									has_update = true;
9678									let (close_channel, res) = convert_chan_phase_err!(self, peer_state, e, chan, channel_id, FUNDED_CHANNEL);
9679									handle_errors.push((chan.context.get_counterparty_node_id(), Err(res)));
9680									!close_channel
9681								}
9682							}
9683						},
9684						_ => true, // Retain unfunded channels if present.
9685					}
9686				});
9687			}
9688		}
9689
9690		for (counterparty_node_id, err) in handle_errors.drain(..) {
9691			let _ = handle_error!(self, err, counterparty_node_id);
9692		}
9693
9694		for shutdown_result in shutdown_results.drain(..) {
9695			self.finish_close_channel(shutdown_result);
9696		}
9697
9698		has_update
9699	}
9700
9701	/// Utility for creating a BOLT11 invoice that can be verified by [`ChannelManager`] without
9702	/// storing any additional state. It achieves this by including a [`PaymentSecret`] in the
9703	/// invoice which it uses to verify that the invoice has not expired and the payment amount is
9704	/// sufficient, reproducing the [`PaymentPreimage`] if applicable.
9705	pub fn create_bolt11_invoice(
9706		&self, params: Bolt11InvoiceParameters,
9707	) -> Result<Bolt11Invoice, SignOrCreationError<()>> {
9708		let Bolt11InvoiceParameters {
9709			amount_msats, description, invoice_expiry_delta_secs, min_final_cltv_expiry_delta,
9710			payment_hash,
9711		} = params;
9712
9713		let currency =
9714			Network::from_chain_hash(self.chain_hash).map(Into::into).unwrap_or(Currency::Bitcoin);
9715
9716		#[cfg(feature = "std")]
9717		let duration_since_epoch = {
9718			use std::time::SystemTime;
9719			SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
9720				.expect("SystemTime::now() should be after SystemTime::UNIX_EPOCH")
9721		};
9722
9723		// This may be up to 2 hours in the future because of bitcoin's block time rule or about
9724		// 10-30 minutes in the past if a block hasn't been found recently. This should be fine as
9725		// the default invoice expiration is 2 hours, though shorter expirations may be problematic.
9726		#[cfg(not(feature = "std"))]
9727		let duration_since_epoch =
9728			Duration::from_secs(self.highest_seen_timestamp.load(Ordering::Acquire) as u64);
9729
9730		if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta {
9731			if min_final_cltv_expiry_delta.saturating_add(3) < MIN_FINAL_CLTV_EXPIRY_DELTA {
9732				return Err(SignOrCreationError::CreationError(CreationError::MinFinalCltvExpiryDeltaTooShort));
9733			}
9734		}
9735
9736		let (payment_hash, payment_secret) = match payment_hash {
9737			Some(payment_hash) => {
9738				let payment_secret = self
9739					.create_inbound_payment_for_hash(
9740						payment_hash, amount_msats,
9741						invoice_expiry_delta_secs.unwrap_or(DEFAULT_EXPIRY_TIME as u32),
9742						min_final_cltv_expiry_delta,
9743					)
9744					.map_err(|()| SignOrCreationError::CreationError(CreationError::InvalidAmount))?;
9745				(payment_hash, payment_secret)
9746			},
9747			None => {
9748				self
9749					.create_inbound_payment(
9750						amount_msats, invoice_expiry_delta_secs.unwrap_or(DEFAULT_EXPIRY_TIME as u32),
9751						min_final_cltv_expiry_delta,
9752					)
9753					.map_err(|()| SignOrCreationError::CreationError(CreationError::InvalidAmount))?
9754			},
9755		};
9756
9757		log_trace!(self.logger, "Creating invoice with payment hash {}", &payment_hash);
9758
9759		let invoice = Bolt11InvoiceBuilder::new(currency);
9760		let invoice = match description {
9761			Bolt11InvoiceDescription::Direct(description) => invoice.description(description.into_inner().0),
9762			Bolt11InvoiceDescription::Hash(hash) => invoice.description_hash(hash.0),
9763		};
9764
9765		let mut invoice = invoice
9766			.duration_since_epoch(duration_since_epoch)
9767			.payee_pub_key(self.get_our_node_id())
9768			.payment_hash(Hash::from_slice(&payment_hash.0).unwrap())
9769			.payment_secret(payment_secret)
9770			.basic_mpp()
9771			.min_final_cltv_expiry_delta(
9772				// Add a buffer of 3 to the delta if present, otherwise use LDK's minimum.
9773				min_final_cltv_expiry_delta.map(|x| x.saturating_add(3)).unwrap_or(MIN_FINAL_CLTV_EXPIRY_DELTA).into()
9774			);
9775
9776		if let Some(invoice_expiry_delta_secs) = invoice_expiry_delta_secs{
9777			invoice = invoice.expiry_time(Duration::from_secs(invoice_expiry_delta_secs.into()));
9778		}
9779
9780		if let Some(amount_msats) = amount_msats {
9781			invoice = invoice.amount_milli_satoshis(amount_msats);
9782		}
9783
9784		let channels = self.list_channels();
9785		let route_hints = super::invoice_utils::sort_and_filter_channels(channels, amount_msats, &self.logger);
9786		for hint in route_hints {
9787			invoice = invoice.private_route(hint);
9788		}
9789
9790		let raw_invoice = invoice.build_raw().map_err(|e| SignOrCreationError::CreationError(e))?;
9791		let signature = self.node_signer.sign_invoice(&raw_invoice, Recipient::Node);
9792
9793		raw_invoice
9794			.sign(|_| signature)
9795			.map(|invoice| Bolt11Invoice::from_signed(invoice).unwrap())
9796			.map_err(|e| SignOrCreationError::SignError(e))
9797	}
9798}
9799
9800/// Parameters used with [`create_bolt11_invoice`].
9801///
9802/// [`create_bolt11_invoice`]: ChannelManager::create_bolt11_invoice
9803pub struct Bolt11InvoiceParameters {
9804	/// The amount for the invoice, if any.
9805	pub amount_msats: Option<u64>,
9806
9807	/// The description for what the invoice is for, or hash of such description.
9808	pub description: Bolt11InvoiceDescription,
9809
9810	/// The invoice expiration relative to its creation time. If not set, the invoice will expire in
9811	/// [`DEFAULT_EXPIRY_TIME`] by default.
9812	///
9813	/// The creation time used is the duration since the Unix epoch for `std` builds. For non-`std`
9814	/// builds, the highest block timestamp seen is used instead. In the latter case, use a long
9815	/// enough expiry to account for the average block time.
9816	pub invoice_expiry_delta_secs: Option<u32>,
9817
9818	/// The minimum `cltv_expiry` for the last HTLC in the route. If not set, will use
9819	/// [`MIN_FINAL_CLTV_EXPIRY_DELTA`].
9820	///
9821	/// If set, must be at least [`MIN_FINAL_CLTV_EXPIRY_DELTA`], and a three-block buffer will be
9822	/// added as well to allow for up to a few new block confirmations during routing.
9823	pub min_final_cltv_expiry_delta: Option<u16>,
9824
9825	/// The payment hash used in the invoice. If not set, a payment hash will be generated using a
9826	/// preimage that can be reproduced by [`ChannelManager`] without storing any state.
9827	///
9828	/// Uses the payment hash if set. This may be useful if you're building an on-chain swap or
9829	/// involving another protocol where the payment hash is also involved outside the scope of
9830	/// lightning.
9831	pub payment_hash: Option<PaymentHash>,
9832}
9833
9834impl Default for Bolt11InvoiceParameters {
9835	fn default() -> Self {
9836		Self {
9837			amount_msats: None,
9838			description: Bolt11InvoiceDescription::Direct(Description::empty()),
9839			invoice_expiry_delta_secs: None,
9840			min_final_cltv_expiry_delta: None,
9841			payment_hash: None,
9842		}
9843	}
9844}
9845
9846macro_rules! create_offer_builder { ($self: ident, $builder: ty) => {
9847	/// Creates an [`OfferBuilder`] such that the [`Offer`] it builds is recognized by the
9848	/// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer's
9849	/// expiration will be `absolute_expiry` if `Some`, otherwise it will not expire.
9850	///
9851	/// # Privacy
9852	///
9853	/// Uses [`MessageRouter`] to construct a [`BlindedMessagePath`] for the offer based on the given
9854	/// `absolute_expiry` according to [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`]. See those docs for
9855	/// privacy implications as well as those of the parameterized [`Router`], which implements
9856	/// [`MessageRouter`].
9857	///
9858	/// Also, uses a derived signing pubkey in the offer for recipient privacy.
9859	///
9860	/// # Limitations
9861	///
9862	/// Requires a direct connection to the introduction node in the responding [`InvoiceRequest`]'s
9863	/// reply path.
9864	///
9865	/// # Errors
9866	///
9867	/// Errors if the parameterized [`Router`] is unable to create a blinded path for the offer.
9868	///
9869	/// [`Offer`]: crate::offers::offer::Offer
9870	/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
9871	pub fn create_offer_builder(
9872		&$self, absolute_expiry: Option<Duration>
9873	) -> Result<$builder, Bolt12SemanticError> {
9874		let node_id = $self.get_our_node_id();
9875		let expanded_key = &$self.inbound_payment_key;
9876		let entropy = &*$self.entropy_source;
9877		let secp_ctx = &$self.secp_ctx;
9878
9879		let nonce = Nonce::from_entropy_source(entropy);
9880		let context = OffersContext::InvoiceRequest { nonce };
9881		let path = $self.create_blinded_paths_using_absolute_expiry(context, absolute_expiry)
9882			.and_then(|paths| paths.into_iter().next().ok_or(()))
9883			.map_err(|_| Bolt12SemanticError::MissingPaths)?;
9884		let builder = OfferBuilder::deriving_signing_pubkey(node_id, expanded_key, nonce, secp_ctx)
9885			.chain_hash($self.chain_hash)
9886			.path(path);
9887
9888		let builder = match absolute_expiry {
9889			None => builder,
9890			Some(absolute_expiry) => builder.absolute_expiry(absolute_expiry),
9891		};
9892
9893		Ok(builder.into())
9894	}
9895} }
9896
9897macro_rules! create_refund_builder { ($self: ident, $builder: ty) => {
9898	/// Creates a [`RefundBuilder`] such that the [`Refund`] it builds is recognized by the
9899	/// [`ChannelManager`] when handling [`Bolt12Invoice`] messages for the refund.
9900	///
9901	/// # Payment
9902	///
9903	/// The provided `payment_id` is used to ensure that only one invoice is paid for the refund.
9904	/// See [Avoiding Duplicate Payments] for other requirements once the payment has been sent.
9905	///
9906	/// The builder will have the provided expiration set. Any changes to the expiration on the
9907	/// returned builder will not be honored by [`ChannelManager`]. For non-`std`, the highest seen
9908	/// block time minus two hours is used for the current time when determining if the refund has
9909	/// expired.
9910	///
9911	/// To revoke the refund, use [`ChannelManager::abandon_payment`] prior to receiving the
9912	/// invoice. If abandoned, or an invoice isn't received before expiration, the payment will fail
9913	/// with an [`Event::PaymentFailed`].
9914	///
9915	/// If `max_total_routing_fee_msat` is not specified, The default from
9916	/// [`RouteParameters::from_payment_params_and_value`] is applied.
9917	///
9918	/// # Privacy
9919	///
9920	/// Uses [`MessageRouter`] to construct a [`BlindedMessagePath`] for the refund based on the given
9921	/// `absolute_expiry` according to [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`]. See those docs for
9922	/// privacy implications as well as those of the parameterized [`Router`], which implements
9923	/// [`MessageRouter`].
9924	///
9925	/// Also, uses a derived payer id in the refund for payer privacy.
9926	///
9927	/// # Limitations
9928	///
9929	/// Requires a direct connection to an introduction node in the responding
9930	/// [`Bolt12Invoice::payment_paths`].
9931	///
9932	/// # Errors
9933	///
9934	/// Errors if:
9935	/// - a duplicate `payment_id` is provided given the caveats in the aforementioned link,
9936	/// - `amount_msats` is invalid, or
9937	/// - the parameterized [`Router`] is unable to create a blinded path for the refund.
9938	///
9939	/// [`Refund`]: crate::offers::refund::Refund
9940	/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
9941	/// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths
9942	/// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments
9943	pub fn create_refund_builder(
9944		&$self, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId,
9945		retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>
9946	) -> Result<$builder, Bolt12SemanticError> {
9947		let node_id = $self.get_our_node_id();
9948		let expanded_key = &$self.inbound_payment_key;
9949		let entropy = &*$self.entropy_source;
9950		let secp_ctx = &$self.secp_ctx;
9951
9952		let nonce = Nonce::from_entropy_source(entropy);
9953		let context = OffersContext::OutboundPayment { payment_id, nonce, hmac: None };
9954		let path = $self.create_blinded_paths_using_absolute_expiry(context, Some(absolute_expiry))
9955			.and_then(|paths| paths.into_iter().next().ok_or(()))
9956			.map_err(|_| Bolt12SemanticError::MissingPaths)?;
9957
9958		let builder = RefundBuilder::deriving_signing_pubkey(
9959			node_id, expanded_key, nonce, secp_ctx, amount_msats, payment_id
9960		)?
9961			.chain_hash($self.chain_hash)
9962			.absolute_expiry(absolute_expiry)
9963			.path(path);
9964
9965		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop($self);
9966
9967		let expiration = StaleExpiration::AbsoluteTimeout(absolute_expiry);
9968		$self.pending_outbound_payments
9969			.add_new_awaiting_invoice(
9970				payment_id, expiration, retry_strategy, max_total_routing_fee_msat, None,
9971			)
9972			.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?;
9973
9974		Ok(builder.into())
9975	}
9976} }
9977
9978/// Defines the maximum number of [`OffersMessage`] including different reply paths to be sent
9979/// along different paths.
9980/// Sending multiple requests increases the chances of successful delivery in case some
9981/// paths are unavailable. However, only one invoice for a given [`PaymentId`] will be paid,
9982/// even if multiple invoices are received.
9983const OFFERS_MESSAGE_REQUEST_LIMIT: usize = 10;
9984
9985impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
9986where
9987	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
9988	T::Target: BroadcasterInterface,
9989	ES::Target: EntropySource,
9990	NS::Target: NodeSigner,
9991	SP::Target: SignerProvider,
9992	F::Target: FeeEstimator,
9993	R::Target: Router,
9994	MR::Target: MessageRouter,
9995	L::Target: Logger,
9996{
9997	#[cfg(not(c_bindings))]
9998	create_offer_builder!(self, OfferBuilder<DerivedMetadata, secp256k1::All>);
9999	#[cfg(not(c_bindings))]
10000	create_refund_builder!(self, RefundBuilder<secp256k1::All>);
10001
10002	#[cfg(c_bindings)]
10003	create_offer_builder!(self, OfferWithDerivedMetadataBuilder);
10004	#[cfg(c_bindings)]
10005	create_refund_builder!(self, RefundMaybeWithDerivedMetadataBuilder);
10006
10007	/// Pays for an [`Offer`] using the given parameters by creating an [`InvoiceRequest`] and
10008	/// enqueuing it to be sent via an onion message. [`ChannelManager`] will pay the actual
10009	/// [`Bolt12Invoice`] once it is received.
10010	///
10011	/// Uses [`InvoiceRequestBuilder`] such that the [`InvoiceRequest`] it builds is recognized by
10012	/// the [`ChannelManager`] when handling a [`Bolt12Invoice`] message in response to the request.
10013	/// The optional parameters are used in the builder, if `Some`:
10014	/// - `quantity` for [`InvoiceRequest::quantity`] which must be set if
10015	///   [`Offer::expects_quantity`] is `true`.
10016	/// - `amount_msats` if overpaying what is required for the given `quantity` is desired, and
10017	/// - `payer_note` for [`InvoiceRequest::payer_note`].
10018	///
10019	/// If `max_total_routing_fee_msat` is not specified, The default from
10020	/// [`RouteParameters::from_payment_params_and_value`] is applied.
10021	///
10022	/// # Payment
10023	///
10024	/// The provided `payment_id` is used to ensure that only one invoice is paid for the request
10025	/// when received. See [Avoiding Duplicate Payments] for other requirements once the payment has
10026	/// been sent.
10027	///
10028	/// To revoke the request, use [`ChannelManager::abandon_payment`] prior to receiving the
10029	/// invoice. If abandoned, or an invoice isn't received in a reasonable amount of time, the
10030	/// payment will fail with an [`Event::PaymentFailed`].
10031	///
10032	/// # Privacy
10033	///
10034	/// For payer privacy, uses a derived payer id and uses [`MessageRouter::create_blinded_paths`]
10035	/// to construct a [`BlindedMessagePath`] for the reply path. For further privacy implications, see the
10036	/// docs of the parameterized [`Router`], which implements [`MessageRouter`].
10037	///
10038	/// # Limitations
10039	///
10040	/// Requires a direct connection to an introduction node in [`Offer::paths`] or to
10041	/// [`Offer::issuer_signing_pubkey`], if empty. A similar restriction applies to the responding
10042	/// [`Bolt12Invoice::payment_paths`].
10043	///
10044	/// # Errors
10045	///
10046	/// Errors if:
10047	/// - a duplicate `payment_id` is provided given the caveats in the aforementioned link,
10048	/// - the provided parameters are invalid for the offer,
10049	/// - the offer is for an unsupported chain, or
10050	/// - the parameterized [`Router`] is unable to create a blinded reply path for the invoice
10051	///   request.
10052	///
10053	/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
10054	/// [`InvoiceRequest::quantity`]: crate::offers::invoice_request::InvoiceRequest::quantity
10055	/// [`InvoiceRequest::payer_note`]: crate::offers::invoice_request::InvoiceRequest::payer_note
10056	/// [`InvoiceRequestBuilder`]: crate::offers::invoice_request::InvoiceRequestBuilder
10057	/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
10058	/// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths
10059	/// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments
10060	pub fn pay_for_offer(
10061		&self, offer: &Offer, quantity: Option<u64>, amount_msats: Option<u64>,
10062		payer_note: Option<String>, payment_id: PaymentId, retry_strategy: Retry,
10063		max_total_routing_fee_msat: Option<u64>
10064	) -> Result<(), Bolt12SemanticError> {
10065		self.pay_for_offer_intern(offer, quantity, amount_msats, payer_note, payment_id, None, |invoice_request, nonce| {
10066			let expiration = StaleExpiration::TimerTicks(1);
10067			let retryable_invoice_request = RetryableInvoiceRequest {
10068				invoice_request: invoice_request.clone(),
10069				nonce,
10070			};
10071			self.pending_outbound_payments
10072				.add_new_awaiting_invoice(
10073					payment_id, expiration, retry_strategy, max_total_routing_fee_msat,
10074					Some(retryable_invoice_request)
10075				)
10076				.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
10077		})
10078	}
10079
10080	fn pay_for_offer_intern<CPP: FnOnce(&InvoiceRequest, Nonce) -> Result<(), Bolt12SemanticError>>(
10081		&self, offer: &Offer, quantity: Option<u64>, amount_msats: Option<u64>,
10082		payer_note: Option<String>, payment_id: PaymentId,
10083		human_readable_name: Option<HumanReadableName>, create_pending_payment: CPP,
10084	) -> Result<(), Bolt12SemanticError> {
10085		let expanded_key = &self.inbound_payment_key;
10086		let entropy = &*self.entropy_source;
10087		let secp_ctx = &self.secp_ctx;
10088
10089		let nonce = Nonce::from_entropy_source(entropy);
10090		let builder: InvoiceRequestBuilder<secp256k1::All> = offer
10091			.request_invoice(expanded_key, nonce, secp_ctx, payment_id)?
10092			.into();
10093		let builder = builder.chain_hash(self.chain_hash)?;
10094
10095		let builder = match quantity {
10096			None => builder,
10097			Some(quantity) => builder.quantity(quantity)?,
10098		};
10099		let builder = match amount_msats {
10100			None => builder,
10101			Some(amount_msats) => builder.amount_msats(amount_msats)?,
10102		};
10103		let builder = match payer_note {
10104			None => builder,
10105			Some(payer_note) => builder.payer_note(payer_note),
10106		};
10107		let builder = match human_readable_name {
10108			None => builder,
10109			Some(hrn) => builder.sourced_from_human_readable_name(hrn),
10110		};
10111		let invoice_request = builder.build_and_sign()?;
10112
10113		let hmac = payment_id.hmac_for_offer_payment(nonce, expanded_key);
10114		let context = MessageContext::Offers(
10115			OffersContext::OutboundPayment { payment_id, nonce, hmac: Some(hmac) }
10116		);
10117		let reply_paths = self.create_blinded_paths(context)
10118			.map_err(|_| Bolt12SemanticError::MissingPaths)?;
10119
10120		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
10121
10122		create_pending_payment(&invoice_request, nonce)?;
10123
10124		self.enqueue_invoice_request(invoice_request, reply_paths)
10125	}
10126
10127	fn enqueue_invoice_request(
10128		&self,
10129		invoice_request: InvoiceRequest,
10130		reply_paths: Vec<BlindedMessagePath>,
10131	) -> Result<(), Bolt12SemanticError> {
10132		let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
10133		if !invoice_request.paths().is_empty() {
10134			reply_paths
10135				.iter()
10136				.flat_map(|reply_path| invoice_request.paths().iter().map(move |path| (path, reply_path)))
10137				.take(OFFERS_MESSAGE_REQUEST_LIMIT)
10138				.for_each(|(path, reply_path)| {
10139					let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
10140						destination: Destination::BlindedPath(path.clone()),
10141						reply_path: reply_path.clone(),
10142					};
10143					let message = OffersMessage::InvoiceRequest(invoice_request.clone());
10144					pending_offers_messages.push((message, instructions));
10145				});
10146		} else if let Some(node_id) = invoice_request.issuer_signing_pubkey() {
10147			for reply_path in reply_paths {
10148				let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
10149					destination: Destination::Node(node_id),
10150					reply_path,
10151				};
10152				let message = OffersMessage::InvoiceRequest(invoice_request.clone());
10153				pending_offers_messages.push((message, instructions));
10154			}
10155		} else {
10156			debug_assert!(false);
10157			return Err(Bolt12SemanticError::MissingIssuerSigningPubkey);
10158		}
10159
10160		Ok(())
10161	}
10162
10163	/// Creates a [`Bolt12Invoice`] for a [`Refund`] and enqueues it to be sent via an onion
10164	/// message.
10165	///
10166	/// The resulting invoice uses a [`PaymentHash`] recognized by the [`ChannelManager`] and a
10167	/// [`BlindedPaymentPath`] containing the [`PaymentSecret`] needed to reconstruct the
10168	/// corresponding [`PaymentPreimage`]. It is returned purely for informational purposes.
10169	///
10170	/// # Limitations
10171	///
10172	/// Requires a direct connection to an introduction node in [`Refund::paths`] or to
10173	/// [`Refund::payer_signing_pubkey`], if empty. This request is best effort; an invoice will be
10174	/// sent to each node meeting the aforementioned criteria, but there's no guarantee that they
10175	/// will be received and no retries will be made.
10176	///
10177	/// # Errors
10178	///
10179	/// Errors if:
10180	/// - the refund is for an unsupported chain, or
10181	/// - the parameterized [`Router`] is unable to create a blinded payment path or reply path for
10182	///   the invoice.
10183	///
10184	/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
10185	pub fn request_refund_payment(
10186		&self, refund: &Refund
10187	) -> Result<Bolt12Invoice, Bolt12SemanticError> {
10188		let expanded_key = &self.inbound_payment_key;
10189		let entropy = &*self.entropy_source;
10190		let secp_ctx = &self.secp_ctx;
10191
10192		let amount_msats = refund.amount_msats();
10193		let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
10194
10195		if refund.chain() != self.chain_hash {
10196			return Err(Bolt12SemanticError::UnsupportedChain);
10197		}
10198
10199		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
10200
10201		match self.create_inbound_payment(Some(amount_msats), relative_expiry, None) {
10202			Ok((payment_hash, payment_secret)) => {
10203				let payment_context = PaymentContext::Bolt12Refund(Bolt12RefundContext {});
10204				let payment_paths = self.create_blinded_payment_paths(
10205					amount_msats, payment_secret, payment_context
10206				)
10207					.map_err(|_| Bolt12SemanticError::MissingPaths)?;
10208
10209				#[cfg(feature = "std")]
10210				let builder = refund.respond_using_derived_keys(
10211					payment_paths, payment_hash, expanded_key, entropy
10212				)?;
10213				#[cfg(not(feature = "std"))]
10214				let created_at = Duration::from_secs(
10215					self.highest_seen_timestamp.load(Ordering::Acquire) as u64
10216				);
10217				#[cfg(not(feature = "std"))]
10218				let builder = refund.respond_using_derived_keys_no_std(
10219					payment_paths, payment_hash, created_at, expanded_key, entropy
10220				)?;
10221				let builder: InvoiceBuilder<DerivedSigningPubkey> = builder.into();
10222				let invoice = builder.allow_mpp().build_and_sign(secp_ctx)?;
10223
10224				let nonce = Nonce::from_entropy_source(entropy);
10225				let hmac = payment_hash.hmac_for_offer_payment(nonce, expanded_key);
10226				let context = MessageContext::Offers(OffersContext::InboundPayment {
10227					payment_hash: invoice.payment_hash(), nonce, hmac
10228				});
10229				let reply_paths = self.create_blinded_paths(context)
10230					.map_err(|_| Bolt12SemanticError::MissingPaths)?;
10231
10232				let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
10233				if refund.paths().is_empty() {
10234					for reply_path in reply_paths {
10235						let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
10236							destination: Destination::Node(refund.payer_signing_pubkey()),
10237							reply_path,
10238						};
10239						let message = OffersMessage::Invoice(invoice.clone());
10240						pending_offers_messages.push((message, instructions));
10241					}
10242				} else {
10243					reply_paths
10244						.iter()
10245						.flat_map(|reply_path| refund.paths().iter().map(move |path| (path, reply_path)))
10246						.take(OFFERS_MESSAGE_REQUEST_LIMIT)
10247						.for_each(|(path, reply_path)| {
10248							let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
10249								destination: Destination::BlindedPath(path.clone()),
10250								reply_path: reply_path.clone(),
10251							};
10252							let message = OffersMessage::Invoice(invoice.clone());
10253							pending_offers_messages.push((message, instructions));
10254						});
10255				}
10256
10257				Ok(invoice)
10258			},
10259			Err(()) => Err(Bolt12SemanticError::InvalidAmount),
10260		}
10261	}
10262
10263	/// Pays for an [`Offer`] looked up using [BIP 353] Human Readable Names resolved by the DNS
10264	/// resolver(s) at `dns_resolvers` which resolve names according to bLIP 32.
10265	///
10266	/// If the wallet supports paying on-chain schemes, you should instead use
10267	/// [`OMNameResolver::resolve_name`] and [`OMNameResolver::handle_dnssec_proof_for_uri`] (by
10268	/// implementing [`DNSResolverMessageHandler`]) directly to look up a URI and then delegate to
10269	/// your normal URI handling.
10270	///
10271	/// If `max_total_routing_fee_msat` is not specified, the default from
10272	/// [`RouteParameters::from_payment_params_and_value`] is applied.
10273	///
10274	/// # Payment
10275	///
10276	/// The provided `payment_id` is used to ensure that only one invoice is paid for the request
10277	/// when received. See [Avoiding Duplicate Payments] for other requirements once the payment has
10278	/// been sent.
10279	///
10280	/// To revoke the request, use [`ChannelManager::abandon_payment`] prior to receiving the
10281	/// invoice. If abandoned, or an invoice isn't received in a reasonable amount of time, the
10282	/// payment will fail with an [`Event::InvoiceRequestFailed`].
10283	///
10284	/// # Privacy
10285	///
10286	/// For payer privacy, uses a derived payer id and uses [`MessageRouter::create_blinded_paths`]
10287	/// to construct a [`BlindedPath`] for the reply path. For further privacy implications, see the
10288	/// docs of the parameterized [`Router`], which implements [`MessageRouter`].
10289	///
10290	/// # Limitations
10291	///
10292	/// Requires a direct connection to the given [`Destination`] as well as an introduction node in
10293	/// [`Offer::paths`] or to [`Offer::signing_pubkey`], if empty. A similar restriction applies to
10294	/// the responding [`Bolt12Invoice::payment_paths`].
10295	///
10296	/// # Errors
10297	///
10298	/// Errors if:
10299	/// - a duplicate `payment_id` is provided given the caveats in the aforementioned link,
10300	///
10301	/// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths
10302	/// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments
10303	#[cfg(feature = "dnssec")]
10304	pub fn pay_for_offer_from_human_readable_name(
10305		&self, name: HumanReadableName, amount_msats: u64, payment_id: PaymentId,
10306		retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>,
10307		dns_resolvers: Vec<Destination>,
10308	) -> Result<(), ()> {
10309		let (onion_message, context) =
10310			self.hrn_resolver.resolve_name(payment_id, name, &*self.entropy_source)?;
10311		let reply_paths = self.create_blinded_paths(MessageContext::DNSResolver(context))?;
10312		let expiration = StaleExpiration::TimerTicks(1);
10313		self.pending_outbound_payments.add_new_awaiting_offer(payment_id, expiration, retry_strategy, max_total_routing_fee_msat, amount_msats)?;
10314		let message_params = dns_resolvers
10315			.iter()
10316			.flat_map(|destination| reply_paths.iter().map(move |path| (path, destination)))
10317			.take(OFFERS_MESSAGE_REQUEST_LIMIT);
10318		for (reply_path, destination) in message_params {
10319			self.pending_dns_onion_messages.lock().unwrap().push((
10320				DNSResolverMessage::DNSSECQuery(onion_message.clone()),
10321				MessageSendInstructions::WithSpecifiedReplyPath {
10322					destination: destination.clone(),
10323					reply_path: reply_path.clone(),
10324				},
10325			));
10326		}
10327		Ok(())
10328	}
10329
10330	/// Gets a payment secret and payment hash for use in an invoice given to a third party wishing
10331	/// to pay us.
10332	///
10333	/// This differs from [`create_inbound_payment_for_hash`] only in that it generates the
10334	/// [`PaymentHash`] and [`PaymentPreimage`] for you.
10335	///
10336	/// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentClaimable`] event, which
10337	/// will have the [`PaymentClaimable::purpose`] return `Some` for [`PaymentPurpose::preimage`]. That
10338	/// should then be passed directly to [`claim_funds`].
10339	///
10340	/// See [`create_inbound_payment_for_hash`] for detailed documentation on behavior and requirements.
10341	///
10342	/// Note that a malicious eavesdropper can intuit whether an inbound payment was created by
10343	/// `create_inbound_payment` or `create_inbound_payment_for_hash` based on runtime.
10344	///
10345	/// # Note
10346	///
10347	/// If you register an inbound payment with this method, then serialize the `ChannelManager`, then
10348	/// deserialize it with a node running 0.0.103 and earlier, the payment will fail to be received.
10349	///
10350	/// Errors if `min_value_msat` is greater than total bitcoin supply.
10351	///
10352	/// If `min_final_cltv_expiry_delta` is set to some value, then the payment will not be receivable
10353	/// on versions of LDK prior to 0.0.114.
10354	///
10355	/// [`claim_funds`]: Self::claim_funds
10356	/// [`PaymentClaimable`]: events::Event::PaymentClaimable
10357	/// [`PaymentClaimable::purpose`]: events::Event::PaymentClaimable::purpose
10358	/// [`PaymentPurpose::preimage`]: events::PaymentPurpose::preimage
10359	/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
10360	pub fn create_inbound_payment(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32,
10361		min_final_cltv_expiry_delta: Option<u16>) -> Result<(PaymentHash, PaymentSecret), ()> {
10362		inbound_payment::create(&self.inbound_payment_key, min_value_msat, invoice_expiry_delta_secs,
10363			&self.entropy_source, self.highest_seen_timestamp.load(Ordering::Acquire) as u64,
10364			min_final_cltv_expiry_delta)
10365	}
10366
10367	/// Gets a [`PaymentSecret`] for a given [`PaymentHash`], for which the payment preimage is
10368	/// stored external to LDK.
10369	///
10370	/// A [`PaymentClaimable`] event will only be generated if the [`PaymentSecret`] matches a
10371	/// payment secret fetched via this method or [`create_inbound_payment`], and which is at least
10372	/// the `min_value_msat` provided here, if one is provided.
10373	///
10374	/// The [`PaymentHash`] (and corresponding [`PaymentPreimage`]) should be globally unique, though
10375	/// note that LDK will not stop you from registering duplicate payment hashes for inbound
10376	/// payments.
10377	///
10378	/// `min_value_msat` should be set if the invoice being generated contains a value. Any payment
10379	/// received for the returned [`PaymentHash`] will be required to be at least `min_value_msat`
10380	/// before a [`PaymentClaimable`] event will be generated, ensuring that we do not provide the
10381	/// sender "proof-of-payment" unless they have paid the required amount.
10382	///
10383	/// `invoice_expiry_delta_secs` describes the number of seconds that the invoice is valid for
10384	/// in excess of the current time. This should roughly match the expiry time set in the invoice.
10385	/// After this many seconds, we will remove the inbound payment, resulting in any attempts to
10386	/// pay the invoice failing. The BOLT spec suggests 3,600 secs as a default validity time for
10387	/// invoices when no timeout is set.
10388	///
10389	/// Note that we use block header time to time-out pending inbound payments (with some margin
10390	/// to compensate for the inaccuracy of block header timestamps). Thus, in practice we will
10391	/// accept a payment and generate a [`PaymentClaimable`] event for some time after the expiry.
10392	/// If you need exact expiry semantics, you should enforce them upon receipt of
10393	/// [`PaymentClaimable`].
10394	///
10395	/// Note that invoices generated for inbound payments should have their `min_final_cltv_expiry_delta`
10396	/// set to at least [`MIN_FINAL_CLTV_EXPIRY_DELTA`].
10397	///
10398	/// Note that a malicious eavesdropper can intuit whether an inbound payment was created by
10399	/// `create_inbound_payment` or `create_inbound_payment_for_hash` based on runtime.
10400	///
10401	/// # Note
10402	///
10403	/// If you register an inbound payment with this method, then serialize the `ChannelManager`, then
10404	/// deserialize it with a node running 0.0.103 and earlier, the payment will fail to be received.
10405	///
10406	/// Errors if `min_value_msat` is greater than total bitcoin supply.
10407	///
10408	/// If `min_final_cltv_expiry_delta` is set to some value, then the payment will not be receivable
10409	/// on versions of LDK prior to 0.0.114.
10410	///
10411	/// [`create_inbound_payment`]: Self::create_inbound_payment
10412	/// [`PaymentClaimable`]: events::Event::PaymentClaimable
10413	pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>,
10414		invoice_expiry_delta_secs: u32, min_final_cltv_expiry: Option<u16>) -> Result<PaymentSecret, ()> {
10415		inbound_payment::create_from_hash(&self.inbound_payment_key, min_value_msat, payment_hash,
10416			invoice_expiry_delta_secs, self.highest_seen_timestamp.load(Ordering::Acquire) as u64,
10417			min_final_cltv_expiry)
10418	}
10419
10420	/// Gets an LDK-generated payment preimage from a payment hash and payment secret that were
10421	/// previously returned from [`create_inbound_payment`].
10422	///
10423	/// [`create_inbound_payment`]: Self::create_inbound_payment
10424	pub fn get_payment_preimage(&self, payment_hash: PaymentHash, payment_secret: PaymentSecret) -> Result<PaymentPreimage, APIError> {
10425		inbound_payment::get_payment_preimage(payment_hash, payment_secret, &self.inbound_payment_key)
10426	}
10427
10428	/// Creates a collection of blinded paths by delegating to [`MessageRouter`] based on
10429	/// the path's intended lifetime.
10430	///
10431	/// Whether or not the path is compact depends on whether the path is short-lived or long-lived,
10432	/// respectively, based on the given `absolute_expiry` as seconds since the Unix epoch. See
10433	/// [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`].
10434	fn create_blinded_paths_using_absolute_expiry(
10435		&self, context: OffersContext, absolute_expiry: Option<Duration>,
10436	) -> Result<Vec<BlindedMessagePath>, ()> {
10437		let now = self.duration_since_epoch();
10438		let max_short_lived_absolute_expiry = now.saturating_add(MAX_SHORT_LIVED_RELATIVE_EXPIRY);
10439
10440		if absolute_expiry.unwrap_or(Duration::MAX) <= max_short_lived_absolute_expiry {
10441			self.create_compact_blinded_paths(context)
10442		} else {
10443			self.create_blinded_paths(MessageContext::Offers(context))
10444		}
10445	}
10446
10447	pub(super) fn duration_since_epoch(&self) -> Duration {
10448		#[cfg(not(feature = "std"))]
10449		let now = Duration::from_secs(
10450			self.highest_seen_timestamp.load(Ordering::Acquire) as u64
10451		);
10452		#[cfg(feature = "std")]
10453		let now = std::time::SystemTime::now()
10454			.duration_since(std::time::SystemTime::UNIX_EPOCH)
10455			.expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
10456
10457		now
10458	}
10459
10460	/// Creates a collection of blinded paths by delegating to
10461	/// [`MessageRouter::create_blinded_paths`].
10462	///
10463	/// Errors if the `MessageRouter` errors.
10464	fn create_blinded_paths(&self, context: MessageContext) -> Result<Vec<BlindedMessagePath>, ()> {
10465		let recipient = self.get_our_node_id();
10466		let secp_ctx = &self.secp_ctx;
10467
10468		let peers = self.per_peer_state.read().unwrap()
10469			.iter()
10470			.map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
10471			.filter(|(_, peer)| peer.is_connected)
10472			.filter(|(_, peer)| peer.latest_features.supports_onion_messages())
10473			.map(|(node_id, _)| *node_id)
10474			.collect::<Vec<_>>();
10475
10476		self.message_router
10477			.create_blinded_paths(recipient, context, peers, secp_ctx)
10478			.and_then(|paths| (!paths.is_empty()).then(|| paths).ok_or(()))
10479	}
10480
10481	/// Creates a collection of blinded paths by delegating to
10482	/// [`MessageRouter::create_compact_blinded_paths`].
10483	///
10484	/// Errors if the `MessageRouter` errors.
10485	fn create_compact_blinded_paths(&self, context: OffersContext) -> Result<Vec<BlindedMessagePath>, ()> {
10486		let recipient = self.get_our_node_id();
10487		let secp_ctx = &self.secp_ctx;
10488
10489		let peers = self.per_peer_state.read().unwrap()
10490			.iter()
10491			.map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
10492			.filter(|(_, peer)| peer.is_connected)
10493			.filter(|(_, peer)| peer.latest_features.supports_onion_messages())
10494			.map(|(node_id, peer)| MessageForwardNode {
10495				node_id: *node_id,
10496				short_channel_id: peer.channel_by_id
10497					.iter()
10498					.filter(|(_, channel)| channel.context().is_usable())
10499					.min_by_key(|(_, channel)| channel.context().channel_creation_height)
10500					.and_then(|(_, channel)| channel.context().get_short_channel_id()),
10501			})
10502			.collect::<Vec<_>>();
10503
10504		self.message_router
10505			.create_compact_blinded_paths(recipient, MessageContext::Offers(context), peers, secp_ctx)
10506			.and_then(|paths| (!paths.is_empty()).then(|| paths).ok_or(()))
10507	}
10508
10509	/// Creates multi-hop blinded payment paths for the given `amount_msats` by delegating to
10510	/// [`Router::create_blinded_payment_paths`].
10511	fn create_blinded_payment_paths(
10512		&self, amount_msats: u64, payment_secret: PaymentSecret, payment_context: PaymentContext
10513	) -> Result<Vec<BlindedPaymentPath>, ()> {
10514		let expanded_key = &self.inbound_payment_key;
10515		let entropy = &*self.entropy_source;
10516		let secp_ctx = &self.secp_ctx;
10517
10518		let first_hops = self.list_usable_channels();
10519		let payee_node_id = self.get_our_node_id();
10520		let max_cltv_expiry = self.best_block.read().unwrap().height + CLTV_FAR_FAR_AWAY
10521			+ LATENCY_GRACE_PERIOD_BLOCKS;
10522
10523		let payee_tlvs = UnauthenticatedReceiveTlvs {
10524			payment_secret,
10525			payment_constraints: PaymentConstraints {
10526				max_cltv_expiry,
10527				htlc_minimum_msat: 1,
10528			},
10529			payment_context,
10530		};
10531		let nonce = Nonce::from_entropy_source(entropy);
10532		let payee_tlvs = payee_tlvs.authenticate(nonce, expanded_key);
10533
10534		self.router.create_blinded_payment_paths(
10535			payee_node_id, first_hops, payee_tlvs, amount_msats, secp_ctx
10536		)
10537	}
10538
10539	/// Gets a fake short channel id for use in receiving [phantom node payments]. These fake scids
10540	/// are used when constructing the phantom invoice's route hints.
10541	///
10542	/// [phantom node payments]: crate::sign::PhantomKeysManager
10543	pub fn get_phantom_scid(&self) -> u64 {
10544		let best_block_height = self.best_block.read().unwrap().height;
10545		let short_to_chan_info = self.short_to_chan_info.read().unwrap();
10546		loop {
10547			let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
10548			// Ensure the generated scid doesn't conflict with a real channel.
10549			match short_to_chan_info.get(&scid_candidate) {
10550				Some(_) => continue,
10551				None => return scid_candidate
10552			}
10553		}
10554	}
10555
10556	/// Gets route hints for use in receiving [phantom node payments].
10557	///
10558	/// [phantom node payments]: crate::sign::PhantomKeysManager
10559	pub fn get_phantom_route_hints(&self) -> PhantomRouteHints {
10560		PhantomRouteHints {
10561			channels: self.list_usable_channels(),
10562			phantom_scid: self.get_phantom_scid(),
10563			real_node_pubkey: self.get_our_node_id(),
10564		}
10565	}
10566
10567	/// Gets a fake short channel id for use in receiving intercepted payments. These fake scids are
10568	/// used when constructing the route hints for HTLCs intended to be intercepted. See
10569	/// [`ChannelManager::forward_intercepted_htlc`].
10570	///
10571	/// Note that this method is not guaranteed to return unique values, you may need to call it a few
10572	/// times to get a unique scid.
10573	pub fn get_intercept_scid(&self) -> u64 {
10574		let best_block_height = self.best_block.read().unwrap().height;
10575		let short_to_chan_info = self.short_to_chan_info.read().unwrap();
10576		loop {
10577			let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
10578			// Ensure the generated scid doesn't conflict with a real channel.
10579			if short_to_chan_info.contains_key(&scid_candidate) { continue }
10580			return scid_candidate
10581		}
10582	}
10583
10584	/// Gets inflight HTLC information by processing pending outbound payments that are in
10585	/// our channels. May be used during pathfinding to account for in-use channel liquidity.
10586	pub fn compute_inflight_htlcs(&self) -> InFlightHtlcs {
10587		let mut inflight_htlcs = InFlightHtlcs::new();
10588
10589		let per_peer_state = self.per_peer_state.read().unwrap();
10590		for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
10591			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10592			let peer_state = &mut *peer_state_lock;
10593			for chan in peer_state.channel_by_id.values().filter_map(
10594				|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
10595			) {
10596				for (htlc_source, _) in chan.inflight_htlc_sources() {
10597					if let HTLCSource::OutboundRoute { path, .. } = htlc_source {
10598						inflight_htlcs.process_path(path, self.get_our_node_id());
10599					}
10600				}
10601			}
10602		}
10603
10604		inflight_htlcs
10605	}
10606
10607	#[cfg(any(test, feature = "_test_utils"))]
10608	pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
10609		let events = core::cell::RefCell::new(Vec::new());
10610		let event_handler = |event: events::Event| Ok(events.borrow_mut().push(event));
10611		self.process_pending_events(&event_handler);
10612		events.into_inner()
10613	}
10614
10615	#[cfg(feature = "_test_utils")]
10616	pub fn push_pending_event(&self, event: events::Event) {
10617		let mut events = self.pending_events.lock().unwrap();
10618		events.push_back((event, None));
10619	}
10620
10621	#[cfg(test)]
10622	pub fn pop_pending_event(&self) -> Option<events::Event> {
10623		let mut events = self.pending_events.lock().unwrap();
10624		events.pop_front().map(|(e, _)| e)
10625	}
10626
10627	#[cfg(test)]
10628	pub fn has_pending_payments(&self) -> bool {
10629		self.pending_outbound_payments.has_pending_payments()
10630	}
10631
10632	#[cfg(test)]
10633	pub fn clear_pending_payments(&self) {
10634		self.pending_outbound_payments.clear_pending_payments()
10635	}
10636
10637	/// When something which was blocking a channel from updating its [`ChannelMonitor`] (e.g. an
10638	/// [`Event`] being handled) completes, this should be called to restore the channel to normal
10639	/// operation. It will double-check that nothing *else* is also blocking the same channel from
10640	/// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly.
10641	fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey,
10642		channel_funding_outpoint: OutPoint, channel_id: ChannelId,
10643		mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
10644
10645		let logger = WithContext::from(
10646			&self.logger, Some(counterparty_node_id), Some(channel_id), None
10647		);
10648		loop {
10649			let per_peer_state = self.per_peer_state.read().unwrap();
10650			if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
10651				let mut peer_state_lck = peer_state_mtx.lock().unwrap();
10652				let peer_state = &mut *peer_state_lck;
10653				if let Some(blocker) = completed_blocker.take() {
10654					// Only do this on the first iteration of the loop.
10655					if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
10656						.get_mut(&channel_id)
10657					{
10658						blockers.retain(|iter| iter != &blocker);
10659					}
10660				}
10661
10662				if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
10663					channel_funding_outpoint, channel_id, counterparty_node_id) {
10664					// Check that, while holding the peer lock, we don't have anything else
10665					// blocking monitor updates for this channel. If we do, release the monitor
10666					// update(s) when those blockers complete.
10667					log_trace!(logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
10668						&channel_id);
10669					break;
10670				}
10671
10672				if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(
10673					channel_id) {
10674					if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
10675						debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint);
10676						if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
10677							log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor",
10678								channel_id);
10679							handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
10680								peer_state_lck, peer_state, per_peer_state, chan);
10681							if further_update_exists {
10682								// If there are more `ChannelMonitorUpdate`s to process, restart at the
10683								// top of the loop.
10684								continue;
10685							}
10686						} else {
10687							log_trace!(logger, "Unlocked monitor updating for channel {} without monitors to update",
10688								channel_id);
10689						}
10690					}
10691				}
10692			} else {
10693				log_debug!(logger,
10694					"Got a release post-RAA monitor update for peer {} but the channel is gone",
10695					log_pubkey!(counterparty_node_id));
10696			}
10697			break;
10698		}
10699	}
10700
10701	fn handle_post_event_actions(&self, actions: Vec<EventCompletionAction>) {
10702		for action in actions {
10703			match action {
10704				EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
10705					channel_funding_outpoint, channel_id, counterparty_node_id
10706				} => {
10707					self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, channel_id, None);
10708				}
10709			}
10710		}
10711	}
10712
10713	/// Processes any events asynchronously in the order they were generated since the last call
10714	/// using the given event handler.
10715	///
10716	/// See the trait-level documentation of [`EventsProvider`] for requirements.
10717	pub async fn process_pending_events_async<Future: core::future::Future<Output = Result<(), ReplayEvent>>, H: Fn(Event) -> Future>(
10718		&self, handler: H
10719	) {
10720		let mut ev;
10721		process_events_body!(self, ev, { handler(ev).await });
10722	}
10723}
10724
10725impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
10726where
10727	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
10728	T::Target: BroadcasterInterface,
10729	ES::Target: EntropySource,
10730	NS::Target: NodeSigner,
10731	SP::Target: SignerProvider,
10732	F::Target: FeeEstimator,
10733	R::Target: Router,
10734	MR::Target: MessageRouter,
10735	L::Target: Logger,
10736{
10737	/// Returns `MessageSendEvent`s strictly ordered per-peer, in the order they were generated.
10738	/// The returned array will contain `MessageSendEvent`s for different peers if
10739	/// `MessageSendEvent`s to more than one peer exists, but `MessageSendEvent`s to the same peer
10740	/// is always placed next to each other.
10741	///
10742	/// Note that that while `MessageSendEvent`s are strictly ordered per-peer, the peer order for
10743	/// the chunks of `MessageSendEvent`s for different peers is random. I.e. if the array contains
10744	/// `MessageSendEvent`s  for both `node_a` and `node_b`, the `MessageSendEvent`s for `node_a`
10745	/// will randomly be placed first or last in the returned array.
10746	///
10747	/// Note that even though `BroadcastChannelAnnouncement` and `BroadcastChannelUpdate`
10748	/// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be placed among
10749	/// the `MessageSendEvent`s to the specific peer they were generated under.
10750	fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
10751		let events = RefCell::new(Vec::new());
10752		PersistenceNotifierGuard::optionally_notify(self, || {
10753			let mut result = NotifyOption::SkipPersistNoEvents;
10754
10755			// TODO: This behavior should be documented. It's unintuitive that we query
10756			// ChannelMonitors when clearing other events.
10757			if self.process_pending_monitor_events() {
10758				result = NotifyOption::DoPersist;
10759			}
10760
10761			if self.check_free_holding_cells() {
10762				result = NotifyOption::DoPersist;
10763			}
10764			if self.maybe_generate_initial_closing_signed() {
10765				result = NotifyOption::DoPersist;
10766			}
10767
10768			let mut is_any_peer_connected = false;
10769			let mut pending_events = Vec::new();
10770			let per_peer_state = self.per_peer_state.read().unwrap();
10771			for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
10772				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10773				let peer_state = &mut *peer_state_lock;
10774				if peer_state.pending_msg_events.len() > 0 {
10775					pending_events.append(&mut peer_state.pending_msg_events);
10776				}
10777				if peer_state.is_connected {
10778					is_any_peer_connected = true
10779				}
10780			}
10781
10782			// Ensure that we are connected to some peers before getting broadcast messages.
10783			if is_any_peer_connected {
10784				let mut broadcast_msgs = self.pending_broadcast_messages.lock().unwrap();
10785				pending_events.append(&mut broadcast_msgs);
10786			}
10787
10788			if !pending_events.is_empty() {
10789				events.replace(pending_events);
10790			}
10791
10792			result
10793		});
10794		events.into_inner()
10795	}
10796}
10797
10798impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> EventsProvider for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
10799where
10800	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
10801	T::Target: BroadcasterInterface,
10802	ES::Target: EntropySource,
10803	NS::Target: NodeSigner,
10804	SP::Target: SignerProvider,
10805	F::Target: FeeEstimator,
10806	R::Target: Router,
10807	MR::Target: MessageRouter,
10808	L::Target: Logger,
10809{
10810	/// Processes events that must be periodically handled.
10811	///
10812	/// An [`EventHandler`] may safely call back to the provider in order to handle an event.
10813	/// However, it must not call [`Writeable::write`] as doing so would result in a deadlock.
10814	fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
10815		let mut ev;
10816		process_events_body!(self, ev, handler.handle_event(ev));
10817	}
10818}
10819
10820impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> chain::Listen for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
10821where
10822	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
10823	T::Target: BroadcasterInterface,
10824	ES::Target: EntropySource,
10825	NS::Target: NodeSigner,
10826	SP::Target: SignerProvider,
10827	F::Target: FeeEstimator,
10828	R::Target: Router,
10829	MR::Target: MessageRouter,
10830	L::Target: Logger,
10831{
10832	fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
10833		{
10834			let best_block = self.best_block.read().unwrap();
10835			assert_eq!(best_block.block_hash, header.prev_blockhash,
10836				"Blocks must be connected in chain-order - the connected header must build on the last connected header");
10837			assert_eq!(best_block.height, height - 1,
10838				"Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
10839		}
10840
10841		self.transactions_confirmed(header, txdata, height);
10842		self.best_block_updated(header, height);
10843	}
10844
10845	fn block_disconnected(&self, header: &Header, height: u32) {
10846		let _persistence_guard =
10847			PersistenceNotifierGuard::optionally_notify_skipping_background_events(
10848				self, || -> NotifyOption { NotifyOption::DoPersist });
10849		let new_height = height - 1;
10850		{
10851			let mut best_block = self.best_block.write().unwrap();
10852			assert_eq!(best_block.block_hash, header.block_hash(),
10853				"Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
10854			assert_eq!(best_block.height, height,
10855				"Blocks must be disconnected in chain-order - the disconnected block must have the correct height");
10856			*best_block = BestBlock::new(header.prev_blockhash, new_height)
10857		}
10858
10859		self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)));
10860	}
10861}
10862
10863impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> chain::Confirm for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
10864where
10865	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
10866	T::Target: BroadcasterInterface,
10867	ES::Target: EntropySource,
10868	NS::Target: NodeSigner,
10869	SP::Target: SignerProvider,
10870	F::Target: FeeEstimator,
10871	R::Target: Router,
10872	MR::Target: MessageRouter,
10873	L::Target: Logger,
10874{
10875	fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) {
10876		// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
10877		// during initialization prior to the chain_monitor being fully configured in some cases.
10878		// See the docs for `ChannelManagerReadArgs` for more.
10879
10880		let block_hash = header.block_hash();
10881		log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);
10882
10883		let _persistence_guard =
10884			PersistenceNotifierGuard::optionally_notify_skipping_background_events(
10885				self, || -> NotifyOption { NotifyOption::DoPersist });
10886		self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None))
10887			.map(|(a, b)| (a, Vec::new(), b)));
10888
10889		let last_best_block_height = self.best_block.read().unwrap().height;
10890		if height < last_best_block_height {
10891			let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
10892			self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)));
10893		}
10894	}
10895
10896	fn best_block_updated(&self, header: &Header, height: u32) {
10897		// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
10898		// during initialization prior to the chain_monitor being fully configured in some cases.
10899		// See the docs for `ChannelManagerReadArgs` for more.
10900
10901		let block_hash = header.block_hash();
10902		log_trace!(self.logger, "New best block: {} at height {}", block_hash, height);
10903
10904		let _persistence_guard =
10905			PersistenceNotifierGuard::optionally_notify_skipping_background_events(
10906				self, || -> NotifyOption { NotifyOption::DoPersist });
10907		*self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
10908
10909		let mut min_anchor_feerate = None;
10910		let mut min_non_anchor_feerate = None;
10911		if self.background_events_processed_since_startup.load(Ordering::Relaxed) {
10912			// If we're past the startup phase, update our feerate cache
10913			let mut last_days_feerates = self.last_days_feerates.lock().unwrap();
10914			if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS {
10915				last_days_feerates.pop_front();
10916			}
10917			let anchor_feerate = self.fee_estimator
10918				.bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedAnchorChannelRemoteFee);
10919			let non_anchor_feerate = self.fee_estimator
10920				.bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee);
10921			last_days_feerates.push_back((anchor_feerate, non_anchor_feerate));
10922			if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS {
10923				min_anchor_feerate = last_days_feerates.iter().map(|(f, _)| f).min().copied();
10924				min_non_anchor_feerate = last_days_feerates.iter().map(|(_, f)| f).min().copied();
10925			}
10926		}
10927
10928		self.do_chain_event(Some(height), |channel| {
10929			let logger = WithChannelContext::from(&self.logger, &channel.context, None);
10930			if channel.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
10931				if let Some(feerate) = min_anchor_feerate {
10932					channel.check_for_stale_feerate(&logger, feerate)?;
10933				}
10934			} else {
10935				if let Some(feerate) = min_non_anchor_feerate {
10936					channel.check_for_stale_feerate(&logger, feerate)?;
10937				}
10938			}
10939			channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None))
10940		});
10941
10942		macro_rules! max_time {
10943			($timestamp: expr) => {
10944				loop {
10945					// Update $timestamp to be the max of its current value and the block
10946					// timestamp. This should keep us close to the current time without relying on
10947					// having an explicit local time source.
10948					// Just in case we end up in a race, we loop until we either successfully
10949					// update $timestamp or decide we don't need to.
10950					let old_serial = $timestamp.load(Ordering::Acquire);
10951					if old_serial >= header.time as usize { break; }
10952					if $timestamp.compare_exchange(old_serial, header.time as usize, Ordering::AcqRel, Ordering::Relaxed).is_ok() {
10953						break;
10954					}
10955				}
10956			}
10957		}
10958		max_time!(self.highest_seen_timestamp);
10959		#[cfg(feature = "dnssec")] {
10960			let timestamp = self.highest_seen_timestamp.load(Ordering::Relaxed) as u32;
10961			self.hrn_resolver.new_best_block(height, timestamp);
10962		}
10963	}
10964
10965	fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option<BlockHash>)> {
10966		let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
10967		for (_cp_id, peer_state_mutex) in self.per_peer_state.read().unwrap().iter() {
10968			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10969			let peer_state = &mut *peer_state_lock;
10970			for chan in peer_state.channel_by_id.values().filter_map(|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }) {
10971				let txid_opt = chan.context.get_funding_txo();
10972				let height_opt = chan.context.get_funding_tx_confirmation_height();
10973				let hash_opt = chan.context.get_funding_tx_confirmed_in();
10974				if let (Some(funding_txo), Some(conf_height), Some(block_hash)) = (txid_opt, height_opt, hash_opt) {
10975					res.push((funding_txo.txid, conf_height, Some(block_hash)));
10976				}
10977			}
10978		}
10979		res
10980	}
10981
10982	fn transaction_unconfirmed(&self, txid: &Txid) {
10983		let _persistence_guard =
10984			PersistenceNotifierGuard::optionally_notify_skipping_background_events(
10985				self, || -> NotifyOption { NotifyOption::DoPersist });
10986		self.do_chain_event(None, |channel| {
10987			if let Some(funding_txo) = channel.context.get_funding_txo() {
10988				if funding_txo.txid == *txid {
10989					channel.funding_transaction_unconfirmed(&&WithChannelContext::from(&self.logger, &channel.context, None)).map(|()| (None, Vec::new(), None))
10990				} else { Ok((None, Vec::new(), None)) }
10991			} else { Ok((None, Vec::new(), None)) }
10992		});
10993	}
10994}
10995
10996impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
10997where
10998	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
10999	T::Target: BroadcasterInterface,
11000	ES::Target: EntropySource,
11001	NS::Target: NodeSigner,
11002	SP::Target: SignerProvider,
11003	F::Target: FeeEstimator,
11004	R::Target: Router,
11005	MR::Target: MessageRouter,
11006	L::Target: Logger,
11007{
11008	/// Calls a function which handles an on-chain event (blocks dis/connected, transactions
11009	/// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by
11010	/// the function.
11011	fn do_chain_event<FN: Fn(&mut Channel<SP>) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>>
11012			(&self, height_opt: Option<u32>, f: FN) {
11013		// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
11014		// during initialization prior to the chain_monitor being fully configured in some cases.
11015		// See the docs for `ChannelManagerReadArgs` for more.
11016
11017		let mut failed_channels = Vec::new();
11018		let mut timed_out_htlcs = Vec::new();
11019		{
11020			let per_peer_state = self.per_peer_state.read().unwrap();
11021			for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
11022				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11023				let peer_state = &mut *peer_state_lock;
11024				let pending_msg_events = &mut peer_state.pending_msg_events;
11025
11026				peer_state.channel_by_id.retain(|_, phase| {
11027					match phase {
11028						// Retain unfunded channels.
11029						ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) |
11030						ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => true,
11031						ChannelPhase::Funded(channel) => {
11032							let res = f(channel);
11033							if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
11034								for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
11035									let failure_code = 0x1000|14; /* expiry_too_soon */
11036									let data = self.get_htlc_inbound_temp_fail_data(failure_code);
11037									timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
11038										HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() }));
11039								}
11040								let logger = WithChannelContext::from(&self.logger, &channel.context, None);
11041								if let Some(channel_ready) = channel_ready_opt {
11042									send_channel_ready!(self, pending_msg_events, channel, channel_ready);
11043									if channel.context.is_usable() {
11044										log_trace!(logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", channel.context.channel_id());
11045										if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
11046											pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
11047												node_id: channel.context.get_counterparty_node_id(),
11048												msg,
11049											});
11050										}
11051									} else {
11052										log_trace!(logger, "Sending channel_ready WITHOUT channel_update for {}", channel.context.channel_id());
11053									}
11054								}
11055
11056								{
11057									let mut pending_events = self.pending_events.lock().unwrap();
11058									emit_channel_ready_event!(pending_events, channel);
11059								}
11060
11061								if let Some(height) = height_opt {
11062									// (re-)broadcast signed `channel_announcement`s and
11063									// `channel_update`s for any channels less than a week old.
11064									let funding_conf_height =
11065										channel.context.get_funding_tx_confirmation_height().unwrap_or(height);
11066									// To avoid broadcast storms after each block, only
11067									// re-broadcast every hour (6 blocks) after the initial
11068									// broadcast, or if this is the first time we're ready to
11069									// broadcast this channel.
11070									let rebroadcast_announcement = funding_conf_height < height + 1008
11071										&& funding_conf_height % 6 == height % 6;
11072									#[allow(unused_mut, unused_assignments)]
11073									let mut should_announce = announcement_sigs.is_some() || rebroadcast_announcement;
11074									// Most of our tests were written when we only broadcasted
11075									// `channel_announcement`s once and then never re-broadcasted
11076									// them again, so disable the re-broadcasting entirely in tests
11077									#[cfg(test)]
11078									{
11079										should_announce = announcement_sigs.is_some();
11080									}
11081									if should_announce {
11082										if let Some(announcement) = channel.get_signed_channel_announcement(
11083											&self.node_signer, self.chain_hash, height, &self.default_configuration,
11084										) {
11085											pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
11086												msg: announcement,
11087												// Note that get_signed_channel_announcement fails
11088												// if the channel cannot be announced, so
11089												// get_channel_update_for_broadcast will never fail
11090												// by the time we get here.
11091												update_msg: Some(self.get_channel_update_for_broadcast(channel).unwrap()),
11092											});
11093										}
11094									}
11095								}
11096								if let Some(announcement_sigs) = announcement_sigs {
11097									log_trace!(logger, "Sending announcement_signatures for channel {}", channel.context.channel_id());
11098									pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
11099										node_id: channel.context.get_counterparty_node_id(),
11100										msg: announcement_sigs,
11101									});
11102								}
11103								if channel.is_our_channel_ready() {
11104									if let Some(real_scid) = channel.context.get_short_channel_id() {
11105										// If we sent a 0conf channel_ready, and now have an SCID, we add it
11106										// to the short_to_chan_info map here. Note that we check whether we
11107										// can relay using the real SCID at relay-time (i.e.
11108										// enforce option_scid_alias then), and if the funding tx is ever
11109										// un-confirmed we force-close the channel, ensuring short_to_chan_info
11110										// is always consistent.
11111										let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
11112										let scid_insert = short_to_chan_info.insert(real_scid, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
11113										assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.context.get_counterparty_node_id(), channel.context.channel_id()),
11114											"SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
11115											fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
11116									}
11117								}
11118							} else if let Err(reason) = res {
11119								// It looks like our counterparty went on-chain or funding transaction was
11120								// reorged out of the main chain. Close the channel.
11121								let reason_message = format!("{}", reason);
11122								let mut close_res = channel.context.force_shutdown(true, reason);
11123								locked_close_channel!(self, peer_state, &channel.context, close_res);
11124								failed_channels.push(close_res);
11125								if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
11126									let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
11127									pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
11128										msg: update
11129									});
11130								}
11131								pending_msg_events.push(events::MessageSendEvent::HandleError {
11132									node_id: channel.context.get_counterparty_node_id(),
11133									action: msgs::ErrorAction::DisconnectPeer {
11134										msg: Some(msgs::ErrorMessage {
11135											channel_id: channel.context.channel_id(),
11136											data: reason_message,
11137										})
11138									},
11139								});
11140								return false;
11141							}
11142							true
11143						}
11144					}
11145				});
11146			}
11147		}
11148
11149		if let Some(height) = height_opt {
11150			self.claimable_payments.lock().unwrap().claimable_payments.retain(|payment_hash, payment| {
11151				payment.htlcs.retain(|htlc| {
11152					// If height is approaching the number of blocks we think it takes us to get
11153					// our commitment transaction confirmed before the HTLC expires, plus the
11154					// number of blocks we generally consider it to take to do a commitment update,
11155					// just give up on it and fail the HTLC.
11156					if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
11157						let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
11158						htlc_msat_height_data.extend_from_slice(&height.to_be_bytes());
11159
11160						timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(),
11161							HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data),
11162							HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
11163						false
11164					} else { true }
11165				});
11166				!payment.htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
11167			});
11168
11169			let mut intercepted_htlcs = self.pending_intercepted_htlcs.lock().unwrap();
11170			intercepted_htlcs.retain(|_, htlc| {
11171				if height >= htlc.forward_info.outgoing_cltv_value - HTLC_FAIL_BACK_BUFFER {
11172					let prev_hop_data = HTLCSource::PreviousHopData(HTLCPreviousHopData {
11173						short_channel_id: htlc.prev_short_channel_id,
11174						user_channel_id: Some(htlc.prev_user_channel_id),
11175						htlc_id: htlc.prev_htlc_id,
11176						incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret,
11177						phantom_shared_secret: None,
11178						counterparty_node_id: htlc.prev_counterparty_node_id,
11179						outpoint: htlc.prev_funding_outpoint,
11180						channel_id: htlc.prev_channel_id,
11181						blinded_failure: htlc.forward_info.routing.blinded_failure(),
11182						cltv_expiry: htlc.forward_info.routing.incoming_cltv_expiry(),
11183					});
11184
11185					let requested_forward_scid /* intercept scid */ = match htlc.forward_info.routing {
11186						PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
11187						_ => unreachable!(),
11188					};
11189					timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash,
11190							HTLCFailReason::from_failure_code(0x2000 | 2),
11191							HTLCDestination::InvalidForward { requested_forward_scid }));
11192					let logger = WithContext::from(
11193						&self.logger, None, Some(htlc.prev_channel_id), Some(htlc.forward_info.payment_hash)
11194					);
11195					log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
11196					false
11197				} else { true }
11198			});
11199		}
11200
11201		for failure in failed_channels {
11202			self.finish_close_channel(failure);
11203		}
11204
11205		for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) {
11206			self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, destination);
11207		}
11208	}
11209
11210	/// Gets a [`Future`] that completes when this [`ChannelManager`] may need to be persisted or
11211	/// may have events that need processing.
11212	///
11213	/// In order to check if this [`ChannelManager`] needs persisting, call
11214	/// [`Self::get_and_clear_needs_persistence`].
11215	///
11216	/// Note that callbacks registered on the [`Future`] MUST NOT call back into this
11217	/// [`ChannelManager`] and should instead register actions to be taken later.
11218	pub fn get_event_or_persistence_needed_future(&self) -> Future {
11219		self.event_persist_notifier.get_future()
11220	}
11221
11222	/// Returns true if this [`ChannelManager`] needs to be persisted.
11223	///
11224	/// See [`Self::get_event_or_persistence_needed_future`] for retrieving a [`Future`] that
11225	/// indicates this should be checked.
11226	pub fn get_and_clear_needs_persistence(&self) -> bool {
11227		self.needs_persist_flag.swap(false, Ordering::AcqRel)
11228	}
11229
11230	#[cfg(any(test, feature = "_test_utils"))]
11231	pub fn get_event_or_persist_condvar_value(&self) -> bool {
11232		self.event_persist_notifier.notify_pending()
11233	}
11234
11235	/// Gets the latest best block which was connected either via the [`chain::Listen`] or
11236	/// [`chain::Confirm`] interfaces.
11237	pub fn current_best_block(&self) -> BestBlock {
11238		self.best_block.read().unwrap().clone()
11239	}
11240
11241	/// Fetches the set of [`NodeFeatures`] flags that are provided by or required by
11242	/// [`ChannelManager`].
11243	pub fn node_features(&self) -> NodeFeatures {
11244		provided_node_features(&self.default_configuration)
11245	}
11246
11247	/// Fetches the set of [`Bolt11InvoiceFeatures`] flags that are provided by or required by
11248	/// [`ChannelManager`].
11249	///
11250	/// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice"
11251	/// or not. Thus, this method is not public.
11252	#[cfg(any(feature = "_test_utils", test))]
11253	pub fn bolt11_invoice_features(&self) -> Bolt11InvoiceFeatures {
11254		provided_bolt11_invoice_features(&self.default_configuration)
11255	}
11256
11257	/// Fetches the set of [`Bolt12InvoiceFeatures`] flags that are provided by or required by
11258	/// [`ChannelManager`].
11259	fn bolt12_invoice_features(&self) -> Bolt12InvoiceFeatures {
11260		provided_bolt12_invoice_features(&self.default_configuration)
11261	}
11262
11263	/// Fetches the set of [`ChannelFeatures`] flags that are provided by or required by
11264	/// [`ChannelManager`].
11265	pub fn channel_features(&self) -> ChannelFeatures {
11266		provided_channel_features(&self.default_configuration)
11267	}
11268
11269	/// Fetches the set of [`ChannelTypeFeatures`] flags that are provided by or required by
11270	/// [`ChannelManager`].
11271	pub fn channel_type_features(&self) -> ChannelTypeFeatures {
11272		provided_channel_type_features(&self.default_configuration)
11273	}
11274
11275	/// Fetches the set of [`InitFeatures`] flags that are provided by or required by
11276	/// [`ChannelManager`].
11277	pub fn init_features(&self) -> InitFeatures {
11278		provided_init_features(&self.default_configuration)
11279	}
11280}
11281
11282impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
11283	ChannelMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
11284where
11285	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
11286	T::Target: BroadcasterInterface,
11287	ES::Target: EntropySource,
11288	NS::Target: NodeSigner,
11289	SP::Target: SignerProvider,
11290	F::Target: FeeEstimator,
11291	R::Target: Router,
11292	MR::Target: MessageRouter,
11293	L::Target: Logger,
11294{
11295	fn handle_open_channel(&self, counterparty_node_id: PublicKey, msg: &msgs::OpenChannel) {
11296		// Note that we never need to persist the updated ChannelManager for an inbound
11297		// open_channel message - pre-funded channels are never written so there should be no
11298		// change to the contents.
11299		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11300			let res = self.internal_open_channel(&counterparty_node_id, OpenChannelMessageRef::V1(msg));
11301			let persist = match &res {
11302				Err(e) if e.closes_channel() => {
11303					debug_assert!(false, "We shouldn't close a new channel");
11304					NotifyOption::DoPersist
11305				},
11306				_ => NotifyOption::SkipPersistHandleEvents,
11307			};
11308			let _ = handle_error!(self, res, counterparty_node_id);
11309			persist
11310		});
11311	}
11312
11313	fn handle_open_channel_v2(&self, counterparty_node_id: PublicKey, msg: &msgs::OpenChannelV2) {
11314		// Note that we never need to persist the updated ChannelManager for an inbound
11315		// open_channel message - pre-funded channels are never written so there should be no
11316		// change to the contents.
11317		#[cfg(dual_funding)]
11318		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11319			let res = self.internal_open_channel(&counterparty_node_id, OpenChannelMessageRef::V2(msg));
11320			let persist = match &res {
11321				Err(e) if e.closes_channel() => {
11322					debug_assert!(false, "We shouldn't close a new channel");
11323					NotifyOption::DoPersist
11324				},
11325				_ => NotifyOption::SkipPersistHandleEvents,
11326			};
11327			let _ = handle_error!(self, res, counterparty_node_id);
11328			persist
11329		});
11330		#[cfg(not(dual_funding))]
11331		let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11332			"Dual-funded channels not supported".to_owned(),
11333			msg.common_fields.temporary_channel_id.clone())), counterparty_node_id);
11334	}
11335
11336	fn handle_accept_channel(&self, counterparty_node_id: PublicKey, msg: &msgs::AcceptChannel) {
11337		// Note that we never need to persist the updated ChannelManager for an inbound
11338		// accept_channel message - pre-funded channels are never written so there should be no
11339		// change to the contents.
11340		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11341			let _ = handle_error!(self, self.internal_accept_channel(&counterparty_node_id, msg), counterparty_node_id);
11342			NotifyOption::SkipPersistHandleEvents
11343		});
11344	}
11345
11346	fn handle_accept_channel_v2(&self, counterparty_node_id: PublicKey, msg: &msgs::AcceptChannelV2) {
11347		let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11348			"Dual-funded channels not supported".to_owned(),
11349			msg.common_fields.temporary_channel_id.clone())), counterparty_node_id);
11350	}
11351
11352	fn handle_funding_created(&self, counterparty_node_id: PublicKey, msg: &msgs::FundingCreated) {
11353		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11354		let _ = handle_error!(self, self.internal_funding_created(&counterparty_node_id, msg), counterparty_node_id);
11355	}
11356
11357	fn handle_funding_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::FundingSigned) {
11358		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11359		let _ = handle_error!(self, self.internal_funding_signed(&counterparty_node_id, msg), counterparty_node_id);
11360	}
11361
11362	fn handle_channel_ready(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelReady) {
11363		// Note that we never need to persist the updated ChannelManager for an inbound
11364		// channel_ready message - while the channel's state will change, any channel_ready message
11365		// will ultimately be re-sent on startup and the `ChannelMonitor` won't be updated so we
11366		// will not force-close the channel on startup.
11367		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11368			let res = self.internal_channel_ready(&counterparty_node_id, msg);
11369			let persist = match &res {
11370				Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11371				_ => NotifyOption::SkipPersistHandleEvents,
11372			};
11373			let _ = handle_error!(self, res, counterparty_node_id);
11374			persist
11375		});
11376	}
11377
11378	fn handle_stfu(&self, counterparty_node_id: PublicKey, msg: &msgs::Stfu) {
11379		let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11380			"Quiescence not supported".to_owned(),
11381			msg.channel_id.clone())), counterparty_node_id);
11382	}
11383
11384	#[cfg(splicing)]
11385	fn handle_splice_init(&self, counterparty_node_id: PublicKey, msg: &msgs::SpliceInit) {
11386		let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11387			"Splicing not supported".to_owned(),
11388			msg.channel_id.clone())), counterparty_node_id);
11389	}
11390
11391	#[cfg(splicing)]
11392	fn handle_splice_ack(&self, counterparty_node_id: PublicKey, msg: &msgs::SpliceAck) {
11393		let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11394			"Splicing not supported (splice_ack)".to_owned(),
11395			msg.channel_id.clone())), counterparty_node_id);
11396	}
11397
11398	#[cfg(splicing)]
11399	fn handle_splice_locked(&self, counterparty_node_id: PublicKey, msg: &msgs::SpliceLocked) {
11400		let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11401			"Splicing not supported (splice_locked)".to_owned(),
11402			msg.channel_id.clone())), counterparty_node_id);
11403	}
11404
11405	fn handle_shutdown(&self, counterparty_node_id: PublicKey, msg: &msgs::Shutdown) {
11406		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11407		let _ = handle_error!(self, self.internal_shutdown(&counterparty_node_id, msg), counterparty_node_id);
11408	}
11409
11410	fn handle_closing_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::ClosingSigned) {
11411		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11412		let _ = handle_error!(self, self.internal_closing_signed(&counterparty_node_id, msg), counterparty_node_id);
11413	}
11414
11415	fn handle_update_add_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateAddHTLC) {
11416		// Note that we never need to persist the updated ChannelManager for an inbound
11417		// update_add_htlc message - the message itself doesn't change our channel state only the
11418		// `commitment_signed` message afterwards will.
11419		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11420			let res = self.internal_update_add_htlc(&counterparty_node_id, msg);
11421			let persist = match &res {
11422				Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11423				Err(_) => NotifyOption::SkipPersistHandleEvents,
11424				Ok(()) => NotifyOption::SkipPersistNoEvents,
11425			};
11426			let _ = handle_error!(self, res, counterparty_node_id);
11427			persist
11428		});
11429	}
11430
11431	fn handle_update_fulfill_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFulfillHTLC) {
11432		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11433		let _ = handle_error!(self, self.internal_update_fulfill_htlc(&counterparty_node_id, msg), counterparty_node_id);
11434	}
11435
11436	fn handle_update_fail_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFailHTLC) {
11437		// Note that we never need to persist the updated ChannelManager for an inbound
11438		// update_fail_htlc message - the message itself doesn't change our channel state only the
11439		// `commitment_signed` message afterwards will.
11440		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11441			let res = self.internal_update_fail_htlc(&counterparty_node_id, msg);
11442			let persist = match &res {
11443				Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11444				Err(_) => NotifyOption::SkipPersistHandleEvents,
11445				Ok(()) => NotifyOption::SkipPersistNoEvents,
11446			};
11447			let _ = handle_error!(self, res, counterparty_node_id);
11448			persist
11449		});
11450	}
11451
11452	fn handle_update_fail_malformed_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFailMalformedHTLC) {
11453		// Note that we never need to persist the updated ChannelManager for an inbound
11454		// update_fail_malformed_htlc message - the message itself doesn't change our channel state
11455		// only the `commitment_signed` message afterwards will.
11456		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11457			let res = self.internal_update_fail_malformed_htlc(&counterparty_node_id, msg);
11458			let persist = match &res {
11459				Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11460				Err(_) => NotifyOption::SkipPersistHandleEvents,
11461				Ok(()) => NotifyOption::SkipPersistNoEvents,
11462			};
11463			let _ = handle_error!(self, res, counterparty_node_id);
11464			persist
11465		});
11466	}
11467
11468	fn handle_commitment_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::CommitmentSigned) {
11469		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11470		let _ = handle_error!(self, self.internal_commitment_signed(&counterparty_node_id, msg), counterparty_node_id);
11471	}
11472
11473	fn handle_revoke_and_ack(&self, counterparty_node_id: PublicKey, msg: &msgs::RevokeAndACK) {
11474		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11475		let _ = handle_error!(self, self.internal_revoke_and_ack(&counterparty_node_id, msg), counterparty_node_id);
11476	}
11477
11478	fn handle_update_fee(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFee) {
11479		// Note that we never need to persist the updated ChannelManager for an inbound
11480		// update_fee message - the message itself doesn't change our channel state only the
11481		// `commitment_signed` message afterwards will.
11482		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11483			let res = self.internal_update_fee(&counterparty_node_id, msg);
11484			let persist = match &res {
11485				Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11486				Err(_) => NotifyOption::SkipPersistHandleEvents,
11487				Ok(()) => NotifyOption::SkipPersistNoEvents,
11488			};
11489			let _ = handle_error!(self, res, counterparty_node_id);
11490			persist
11491		});
11492	}
11493
11494	fn handle_announcement_signatures(&self, counterparty_node_id: PublicKey, msg: &msgs::AnnouncementSignatures) {
11495		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11496		let _ = handle_error!(self, self.internal_announcement_signatures(&counterparty_node_id, msg), counterparty_node_id);
11497	}
11498
11499	fn handle_channel_update(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelUpdate) {
11500		PersistenceNotifierGuard::optionally_notify(self, || {
11501			if let Ok(persist) = handle_error!(self, self.internal_channel_update(&counterparty_node_id, msg), counterparty_node_id) {
11502				persist
11503			} else {
11504				NotifyOption::DoPersist
11505			}
11506		});
11507	}
11508
11509	fn handle_channel_reestablish(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelReestablish) {
11510		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11511			let res = self.internal_channel_reestablish(&counterparty_node_id, msg);
11512			let persist = match &res {
11513				Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11514				Err(_) => NotifyOption::SkipPersistHandleEvents,
11515				Ok(persist) => *persist,
11516			};
11517			let _ = handle_error!(self, res, counterparty_node_id);
11518			persist
11519		});
11520	}
11521
11522	fn peer_disconnected(&self, counterparty_node_id: PublicKey) {
11523		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(
11524			self, || NotifyOption::SkipPersistHandleEvents);
11525		let mut failed_channels = Vec::new();
11526		let mut per_peer_state = self.per_peer_state.write().unwrap();
11527		let remove_peer = {
11528			log_debug!(
11529				WithContext::from(&self.logger, Some(counterparty_node_id), None, None),
11530				"Marking channels with {} disconnected and generating channel_updates.",
11531				log_pubkey!(counterparty_node_id)
11532			);
11533			if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
11534				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11535				let peer_state = &mut *peer_state_lock;
11536				let pending_msg_events = &mut peer_state.pending_msg_events;
11537				peer_state.channel_by_id.retain(|_, phase| {
11538					let context = match phase {
11539						ChannelPhase::Funded(chan) => {
11540							let logger = WithChannelContext::from(&self.logger, &chan.context, None);
11541							if chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok() {
11542								// We only retain funded channels that are not shutdown.
11543								return true;
11544							}
11545							&mut chan.context
11546						},
11547						// If we get disconnected and haven't yet committed to a funding
11548						// transaction, we can replay the `open_channel` on reconnection, so don't
11549						// bother dropping the channel here. However, if we already committed to
11550						// the funding transaction we don't yet support replaying the funding
11551						// handshake (and bailing if the peer rejects it), so we force-close in
11552						// that case.
11553						ChannelPhase::UnfundedOutboundV1(chan) if chan.is_resumable() => return true,
11554						ChannelPhase::UnfundedOutboundV1(chan) => &mut chan.context,
11555						// Unfunded inbound channels will always be removed.
11556						ChannelPhase::UnfundedInboundV1(chan) => {
11557							&mut chan.context
11558						},
11559						ChannelPhase::UnfundedOutboundV2(chan) => {
11560							&mut chan.context
11561						},
11562						ChannelPhase::UnfundedInboundV2(chan) => {
11563							&mut chan.context
11564						},
11565					};
11566					// Clean up for removal.
11567					let mut close_res = context.force_shutdown(false, ClosureReason::DisconnectedPeer);
11568					locked_close_channel!(self, peer_state, &context, close_res);
11569					failed_channels.push(close_res);
11570					false
11571				});
11572				// Note that we don't bother generating any events for pre-accept channels -
11573				// they're not considered "channels" yet from the PoV of our events interface.
11574				peer_state.inbound_channel_request_by_id.clear();
11575				pending_msg_events.retain(|msg| {
11576					match msg {
11577						// V1 Channel Establishment
11578						&events::MessageSendEvent::SendAcceptChannel { .. } => false,
11579						&events::MessageSendEvent::SendOpenChannel { .. } => false,
11580						&events::MessageSendEvent::SendFundingCreated { .. } => false,
11581						&events::MessageSendEvent::SendFundingSigned { .. } => false,
11582						// V2 Channel Establishment
11583						&events::MessageSendEvent::SendAcceptChannelV2 { .. } => false,
11584						&events::MessageSendEvent::SendOpenChannelV2 { .. } => false,
11585						// Common Channel Establishment
11586						&events::MessageSendEvent::SendChannelReady { .. } => false,
11587						&events::MessageSendEvent::SendAnnouncementSignatures { .. } => false,
11588						// Quiescence
11589						&events::MessageSendEvent::SendStfu { .. } => false,
11590						// Splicing
11591						&events::MessageSendEvent::SendSpliceInit { .. } => false,
11592						&events::MessageSendEvent::SendSpliceAck { .. } => false,
11593						&events::MessageSendEvent::SendSpliceLocked { .. } => false,
11594						// Interactive Transaction Construction
11595						&events::MessageSendEvent::SendTxAddInput { .. } => false,
11596						&events::MessageSendEvent::SendTxAddOutput { .. } => false,
11597						&events::MessageSendEvent::SendTxRemoveInput { .. } => false,
11598						&events::MessageSendEvent::SendTxRemoveOutput { .. } => false,
11599						&events::MessageSendEvent::SendTxComplete { .. } => false,
11600						&events::MessageSendEvent::SendTxSignatures { .. } => false,
11601						&events::MessageSendEvent::SendTxInitRbf { .. } => false,
11602						&events::MessageSendEvent::SendTxAckRbf { .. } => false,
11603						&events::MessageSendEvent::SendTxAbort { .. } => false,
11604						// Channel Operations
11605						&events::MessageSendEvent::UpdateHTLCs { .. } => false,
11606						&events::MessageSendEvent::SendRevokeAndACK { .. } => false,
11607						&events::MessageSendEvent::SendClosingSigned { .. } => false,
11608						&events::MessageSendEvent::SendShutdown { .. } => false,
11609						&events::MessageSendEvent::SendChannelReestablish { .. } => false,
11610						&events::MessageSendEvent::HandleError { .. } => false,
11611						// Gossip
11612						&events::MessageSendEvent::SendChannelAnnouncement { .. } => false,
11613						&events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
11614						// [`ChannelManager::pending_broadcast_events`] holds the [`BroadcastChannelUpdate`]
11615						// This check here is to ensure exhaustivity.
11616						&events::MessageSendEvent::BroadcastChannelUpdate { .. } => {
11617							debug_assert!(false, "This event shouldn't have been here");
11618							false
11619						},
11620						&events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
11621						&events::MessageSendEvent::SendChannelUpdate { .. } => false,
11622						&events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
11623						&events::MessageSendEvent::SendShortIdsQuery { .. } => false,
11624						&events::MessageSendEvent::SendReplyChannelRange { .. } => false,
11625						&events::MessageSendEvent::SendGossipTimestampFilter { .. } => false,
11626					}
11627				});
11628				debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect");
11629				peer_state.is_connected = false;
11630				peer_state.ok_to_remove(true)
11631			} else { debug_assert!(false, "Unconnected peer disconnected"); true }
11632		};
11633		if remove_peer {
11634			per_peer_state.remove(&counterparty_node_id);
11635		}
11636		mem::drop(per_peer_state);
11637
11638		for failure in failed_channels.drain(..) {
11639			self.finish_close_channel(failure);
11640		}
11641	}
11642
11643	fn peer_connected(&self, counterparty_node_id: PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> {
11644		let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None);
11645		if !init_msg.features.supports_static_remote_key() {
11646			log_debug!(logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
11647			return Err(());
11648		}
11649
11650		let mut res = Ok(());
11651
11652		PersistenceNotifierGuard::optionally_notify(self, || {
11653			// If we have too many peers connected which don't have funded channels, disconnect the
11654			// peer immediately (as long as it doesn't have funded channels). If we have a bunch of
11655			// unfunded channels taking up space in memory for disconnected peers, we still let new
11656			// peers connect, but we'll reject new channels from them.
11657			let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected);
11658			let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
11659
11660			{
11661				let mut peer_state_lock = self.per_peer_state.write().unwrap();
11662				match peer_state_lock.entry(counterparty_node_id.clone()) {
11663					hash_map::Entry::Vacant(e) => {
11664						if inbound_peer_limited {
11665							res = Err(());
11666							return NotifyOption::SkipPersistNoEvents;
11667						}
11668						e.insert(Mutex::new(PeerState {
11669							channel_by_id: new_hash_map(),
11670							inbound_channel_request_by_id: new_hash_map(),
11671							latest_features: init_msg.features.clone(),
11672							pending_msg_events: Vec::new(),
11673							in_flight_monitor_updates: BTreeMap::new(),
11674							monitor_update_blocked_actions: BTreeMap::new(),
11675							actions_blocking_raa_monitor_updates: BTreeMap::new(),
11676							closed_channel_monitor_update_ids: BTreeMap::new(),
11677							is_connected: true,
11678						}));
11679					},
11680					hash_map::Entry::Occupied(e) => {
11681						let mut peer_state = e.get().lock().unwrap();
11682						peer_state.latest_features = init_msg.features.clone();
11683
11684						let best_block_height = self.best_block.read().unwrap().height;
11685						if inbound_peer_limited &&
11686							Self::unfunded_channel_count(&*peer_state, best_block_height) ==
11687							peer_state.channel_by_id.len()
11688						{
11689							res = Err(());
11690							return NotifyOption::SkipPersistNoEvents;
11691						}
11692
11693						debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
11694						peer_state.is_connected = true;
11695					},
11696				}
11697			}
11698
11699			log_debug!(logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
11700
11701			let per_peer_state = self.per_peer_state.read().unwrap();
11702			if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
11703				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11704				let peer_state = &mut *peer_state_lock;
11705				let pending_msg_events = &mut peer_state.pending_msg_events;
11706
11707				for (_, phase) in peer_state.channel_by_id.iter_mut() {
11708					match phase {
11709						ChannelPhase::Funded(chan) => {
11710							let logger = WithChannelContext::from(&self.logger, &chan.context, None);
11711							pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
11712								node_id: chan.context.get_counterparty_node_id(),
11713								msg: chan.get_channel_reestablish(&&logger),
11714							});
11715						}
11716
11717						ChannelPhase::UnfundedOutboundV1(chan) => {
11718							let logger = WithChannelContext::from(&self.logger, &chan.context, None);
11719							if let Some(msg) = chan.get_open_channel(self.chain_hash, &&logger) {
11720								pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
11721									node_id: chan.context.get_counterparty_node_id(),
11722									msg,
11723								});
11724							}
11725						}
11726
11727						ChannelPhase::UnfundedOutboundV2(chan) => {
11728							pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 {
11729								node_id: chan.context.get_counterparty_node_id(),
11730								msg: chan.get_open_channel_v2(self.chain_hash),
11731							});
11732						},
11733
11734						ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedInboundV2(_) => {
11735							// Since unfunded inbound channel maps are cleared upon disconnecting a peer,
11736							// they are not persisted and won't be recovered after a crash.
11737							// Therefore, they shouldn't exist at this point.
11738							debug_assert!(false);
11739						}
11740					}
11741				}
11742			}
11743
11744			return NotifyOption::SkipPersistHandleEvents;
11745			//TODO: Also re-broadcast announcement_signatures
11746		});
11747		res
11748	}
11749
11750	fn handle_error(&self, counterparty_node_id: PublicKey, msg: &msgs::ErrorMessage) {
11751		match &msg.data as &str {
11752			"cannot co-op close channel w/ active htlcs"|
11753			"link failed to shutdown" =>
11754			{
11755				// LND hasn't properly handled shutdown messages ever, and force-closes any time we
11756				// send one while HTLCs are still present. The issue is tracked at
11757				// https://github.com/lightningnetwork/lnd/issues/6039 and has had multiple patches
11758				// to fix it but none so far have managed to land upstream. The issue appears to be
11759				// very low priority for the LND team despite being marked "P1".
11760				// We're not going to bother handling this in a sensible way, instead simply
11761				// repeating the Shutdown message on repeat until morale improves.
11762				if !msg.channel_id.is_zero() {
11763					PersistenceNotifierGuard::optionally_notify(
11764						self,
11765						|| -> NotifyOption {
11766							let per_peer_state = self.per_peer_state.read().unwrap();
11767							let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
11768							if peer_state_mutex_opt.is_none() { return NotifyOption::SkipPersistNoEvents; }
11769							let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
11770							if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) {
11771								if let Some(msg) = chan.get_outbound_shutdown() {
11772									peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
11773										node_id: counterparty_node_id,
11774										msg,
11775									});
11776								}
11777								peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
11778									node_id: counterparty_node_id,
11779									action: msgs::ErrorAction::SendWarningMessage {
11780										msg: msgs::WarningMessage {
11781											channel_id: msg.channel_id,
11782											data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned()
11783										},
11784										log_level: Level::Trace,
11785									}
11786								});
11787								// This can happen in a fairly tight loop, so we absolutely cannot trigger
11788								// a `ChannelManager` write here.
11789								return NotifyOption::SkipPersistHandleEvents;
11790							}
11791							NotifyOption::SkipPersistNoEvents
11792						}
11793					);
11794				}
11795				return;
11796			}
11797			_ => {}
11798		}
11799
11800		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11801
11802		if msg.channel_id.is_zero() {
11803			let channel_ids: Vec<ChannelId> = {
11804				let per_peer_state = self.per_peer_state.read().unwrap();
11805				let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
11806				if peer_state_mutex_opt.is_none() { return; }
11807				let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
11808				let peer_state = &mut *peer_state_lock;
11809				// Note that we don't bother generating any events for pre-accept channels -
11810				// they're not considered "channels" yet from the PoV of our events interface.
11811				peer_state.inbound_channel_request_by_id.clear();
11812				peer_state.channel_by_id.keys().cloned().collect()
11813			};
11814			for channel_id in channel_ids {
11815				// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
11816				let _ = self.force_close_channel_with_peer(&channel_id, &counterparty_node_id, Some(&msg.data), true);
11817			}
11818		} else {
11819			{
11820				// First check if we can advance the channel type and try again.
11821				let per_peer_state = self.per_peer_state.read().unwrap();
11822				let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
11823				if peer_state_mutex_opt.is_none() { return; }
11824				let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
11825				let peer_state = &mut *peer_state_lock;
11826				match peer_state.channel_by_id.get_mut(&msg.channel_id) {
11827					Some(ChannelPhase::UnfundedOutboundV1(ref mut chan)) => {
11828						let logger = WithChannelContext::from(&self.logger, &chan.context, None);
11829						if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator, &&logger) {
11830							peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
11831								node_id: counterparty_node_id,
11832								msg,
11833							});
11834							return;
11835						}
11836					},
11837					Some(ChannelPhase::UnfundedOutboundV2(ref mut chan)) => {
11838						if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
11839							peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 {
11840								node_id: counterparty_node_id,
11841								msg,
11842							});
11843							return;
11844						}
11845					},
11846					None | Some(ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedInboundV2(_) | ChannelPhase::Funded(_)) => (),
11847				}
11848			}
11849
11850			// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
11851			let _ = self.force_close_channel_with_peer(&msg.channel_id, &counterparty_node_id, Some(&msg.data), true);
11852		}
11853	}
11854
11855	fn provided_node_features(&self) -> NodeFeatures {
11856		provided_node_features(&self.default_configuration)
11857	}
11858
11859	fn provided_init_features(&self, _their_init_features: PublicKey) -> InitFeatures {
11860		provided_init_features(&self.default_configuration)
11861	}
11862
11863	fn get_chain_hashes(&self) -> Option<Vec<ChainHash>> {
11864		Some(vec![self.chain_hash])
11865	}
11866
11867	fn handle_tx_add_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddInput) {
11868		// Note that we never need to persist the updated ChannelManager for an inbound
11869		// tx_add_input message - interactive transaction construction does not need to
11870		// be persisted before any signatures are exchanged.
11871		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11872			let _ = handle_error!(self, self.internal_tx_add_input(counterparty_node_id, msg), counterparty_node_id);
11873			NotifyOption::SkipPersistHandleEvents
11874		});
11875	}
11876
11877	fn handle_tx_add_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddOutput) {
11878		// Note that we never need to persist the updated ChannelManager for an inbound
11879		// tx_add_output message - interactive transaction construction does not need to
11880		// be persisted before any signatures are exchanged.
11881		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11882			let _ = handle_error!(self, self.internal_tx_add_output(counterparty_node_id, msg), counterparty_node_id);
11883			NotifyOption::SkipPersistHandleEvents
11884		});
11885	}
11886
11887	fn handle_tx_remove_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveInput) {
11888		// Note that we never need to persist the updated ChannelManager for an inbound
11889		// tx_remove_input message - interactive transaction construction does not need to
11890		// be persisted before any signatures are exchanged.
11891		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11892			let _ = handle_error!(self, self.internal_tx_remove_input(counterparty_node_id, msg), counterparty_node_id);
11893			NotifyOption::SkipPersistHandleEvents
11894		});
11895	}
11896
11897	fn handle_tx_remove_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveOutput) {
11898		// Note that we never need to persist the updated ChannelManager for an inbound
11899		// tx_remove_output message - interactive transaction construction does not need to
11900		// be persisted before any signatures are exchanged.
11901		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11902			let _ = handle_error!(self, self.internal_tx_remove_output(counterparty_node_id, msg), counterparty_node_id);
11903			NotifyOption::SkipPersistHandleEvents
11904		});
11905	}
11906
11907	fn handle_tx_complete(&self, counterparty_node_id: PublicKey, msg: &msgs::TxComplete) {
11908		// Note that we never need to persist the updated ChannelManager for an inbound
11909		// tx_complete message - interactive transaction construction does not need to
11910		// be persisted before any signatures are exchanged.
11911		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11912			let _ = handle_error!(self, self.internal_tx_complete(counterparty_node_id, msg), counterparty_node_id);
11913			NotifyOption::SkipPersistHandleEvents
11914		});
11915	}
11916
11917	fn handle_tx_signatures(&self, counterparty_node_id: PublicKey, msg: &msgs::TxSignatures) {
11918		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11919		let _ = handle_error!(self, self.internal_tx_signatures(&counterparty_node_id, msg), counterparty_node_id);
11920	}
11921
11922	fn handle_tx_init_rbf(&self, counterparty_node_id: PublicKey, msg: &msgs::TxInitRbf) {
11923		let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11924			"Dual-funded channels not supported".to_owned(),
11925			msg.channel_id.clone())), counterparty_node_id);
11926	}
11927
11928	fn handle_tx_ack_rbf(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAckRbf) {
11929		let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11930			"Dual-funded channels not supported".to_owned(),
11931			msg.channel_id.clone())), counterparty_node_id);
11932	}
11933
11934	fn handle_tx_abort(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAbort) {
11935		// Note that we never need to persist the updated ChannelManager for an inbound
11936		// tx_abort message - interactive transaction construction does not need to
11937		// be persisted before any signatures are exchanged.
11938		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11939			let _ = handle_error!(self, self.internal_tx_abort(&counterparty_node_id, msg), counterparty_node_id);
11940			NotifyOption::SkipPersistHandleEvents
11941		});
11942	}
11943
11944	fn message_received(&self) {
11945		for (payment_id, retryable_invoice_request) in self
11946			.pending_outbound_payments
11947			.release_invoice_requests_awaiting_invoice()
11948		{
11949			let RetryableInvoiceRequest { invoice_request, nonce } = retryable_invoice_request;
11950			let hmac = payment_id.hmac_for_offer_payment(nonce, &self.inbound_payment_key);
11951			let context = MessageContext::Offers(OffersContext::OutboundPayment {
11952				payment_id,
11953				nonce,
11954				hmac: Some(hmac)
11955			});
11956			match self.create_blinded_paths(context) {
11957				Ok(reply_paths) => match self.enqueue_invoice_request(invoice_request, reply_paths) {
11958					Ok(_) => {}
11959					Err(_) => {
11960						log_warn!(self.logger,
11961							"Retry failed for an invoice request with payment_id: {}",
11962							payment_id
11963						);
11964					}
11965				},
11966				Err(_) => {
11967					log_warn!(self.logger,
11968						"Retry failed for an invoice request with payment_id: {}. \
11969							Reason: router could not find a blinded path to include as the reply path",
11970						payment_id
11971					);
11972				}
11973			}
11974		}
11975	}
11976}
11977
11978impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
11979OffersMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
11980where
11981	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
11982	T::Target: BroadcasterInterface,
11983	ES::Target: EntropySource,
11984	NS::Target: NodeSigner,
11985	SP::Target: SignerProvider,
11986	F::Target: FeeEstimator,
11987	R::Target: Router,
11988	MR::Target: MessageRouter,
11989	L::Target: Logger,
11990{
11991	fn handle_message(
11992		&self, message: OffersMessage, context: Option<OffersContext>, responder: Option<Responder>,
11993	) -> Option<(OffersMessage, ResponseInstruction)> {
11994		let secp_ctx = &self.secp_ctx;
11995		let expanded_key = &self.inbound_payment_key;
11996
11997		macro_rules! handle_pay_invoice_res {
11998			($res: expr, $invoice: expr, $logger: expr) => {{
11999				let error = match $res {
12000					Err(Bolt12PaymentError::UnknownRequiredFeatures) => {
12001						log_trace!(
12002							$logger, "Invoice requires unknown features: {:?}",
12003							$invoice.invoice_features()
12004						);
12005						InvoiceError::from(Bolt12SemanticError::UnknownRequiredFeatures)
12006					},
12007					Err(Bolt12PaymentError::SendingFailed(e)) => {
12008						log_trace!($logger, "Failed paying invoice: {:?}", e);
12009						InvoiceError::from_string(format!("{:?}", e))
12010					},
12011					#[cfg(async_payments)]
12012					Err(Bolt12PaymentError::BlindedPathCreationFailed) => {
12013						let err_msg = "Failed to create a blinded path back to ourselves";
12014						log_trace!($logger, "{}", err_msg);
12015						InvoiceError::from_string(err_msg.to_string())
12016					},
12017					Err(Bolt12PaymentError::UnexpectedInvoice)
12018						| Err(Bolt12PaymentError::DuplicateInvoice)
12019						| Ok(()) => return None,
12020				};
12021
12022				match responder {
12023					Some(responder) => return Some((OffersMessage::InvoiceError(error), responder.respond())),
12024					None => {
12025						log_trace!($logger, "No reply path to send error: {:?}", error);
12026						return None
12027					},
12028				}
12029			}}
12030		}
12031
12032		match message {
12033			OffersMessage::InvoiceRequest(invoice_request) => {
12034				let responder = match responder {
12035					Some(responder) => responder,
12036					None => return None,
12037				};
12038
12039				let nonce = match context {
12040					None if invoice_request.metadata().is_some() => None,
12041					Some(OffersContext::InvoiceRequest { nonce }) => Some(nonce),
12042					_ => return None,
12043				};
12044
12045				let invoice_request = match nonce {
12046					Some(nonce) => match invoice_request.verify_using_recipient_data(
12047						nonce, expanded_key, secp_ctx,
12048					) {
12049						Ok(invoice_request) => invoice_request,
12050						Err(()) => return None,
12051					},
12052					None => match invoice_request.verify_using_metadata(expanded_key, secp_ctx) {
12053						Ok(invoice_request) => invoice_request,
12054						Err(()) => return None,
12055					},
12056				};
12057
12058				let amount_msats = match InvoiceBuilder::<DerivedSigningPubkey>::amount_msats(
12059					&invoice_request.inner
12060				) {
12061					Ok(amount_msats) => amount_msats,
12062					Err(error) => return Some((OffersMessage::InvoiceError(error.into()), responder.respond())),
12063				};
12064
12065				let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
12066				let (payment_hash, payment_secret) = match self.create_inbound_payment(
12067					Some(amount_msats), relative_expiry, None
12068				) {
12069					Ok((payment_hash, payment_secret)) => (payment_hash, payment_secret),
12070					Err(()) => {
12071						let error = Bolt12SemanticError::InvalidAmount;
12072						return Some((OffersMessage::InvoiceError(error.into()), responder.respond()));
12073					},
12074				};
12075
12076				let payment_context = PaymentContext::Bolt12Offer(Bolt12OfferContext {
12077					offer_id: invoice_request.offer_id,
12078					invoice_request: invoice_request.fields(),
12079				});
12080				let payment_paths = match self.create_blinded_payment_paths(
12081					amount_msats, payment_secret, payment_context
12082				) {
12083					Ok(payment_paths) => payment_paths,
12084					Err(()) => {
12085						let error = Bolt12SemanticError::MissingPaths;
12086						return Some((OffersMessage::InvoiceError(error.into()), responder.respond()));
12087					},
12088				};
12089
12090				#[cfg(not(feature = "std"))]
12091				let created_at = Duration::from_secs(
12092					self.highest_seen_timestamp.load(Ordering::Acquire) as u64
12093				);
12094
12095				let response = if invoice_request.keys.is_some() {
12096					#[cfg(feature = "std")]
12097					let builder = invoice_request.respond_using_derived_keys(
12098						payment_paths, payment_hash
12099					);
12100					#[cfg(not(feature = "std"))]
12101					let builder = invoice_request.respond_using_derived_keys_no_std(
12102						payment_paths, payment_hash, created_at
12103					);
12104					builder
12105						.map(InvoiceBuilder::<DerivedSigningPubkey>::from)
12106						.and_then(|builder| builder.allow_mpp().build_and_sign(secp_ctx))
12107						.map_err(InvoiceError::from)
12108				} else {
12109					#[cfg(feature = "std")]
12110					let builder = invoice_request.respond_with(payment_paths, payment_hash);
12111					#[cfg(not(feature = "std"))]
12112					let builder = invoice_request.respond_with_no_std(
12113						payment_paths, payment_hash, created_at
12114					);
12115					builder
12116						.map(InvoiceBuilder::<ExplicitSigningPubkey>::from)
12117						.and_then(|builder| builder.allow_mpp().build())
12118						.map_err(InvoiceError::from)
12119						.and_then(|invoice| {
12120							#[cfg(c_bindings)]
12121							let mut invoice = invoice;
12122							invoice
12123								.sign(|invoice: &UnsignedBolt12Invoice|
12124									self.node_signer.sign_bolt12_invoice(invoice)
12125								)
12126								.map_err(InvoiceError::from)
12127						})
12128				};
12129
12130				match response {
12131					Ok(invoice) => {
12132						let nonce = Nonce::from_entropy_source(&*self.entropy_source);
12133						let hmac = payment_hash.hmac_for_offer_payment(nonce, expanded_key);
12134						let context = MessageContext::Offers(OffersContext::InboundPayment { payment_hash, nonce, hmac });
12135						Some((OffersMessage::Invoice(invoice), responder.respond_with_reply_path(context)))
12136					},
12137					Err(error) => Some((OffersMessage::InvoiceError(error.into()), responder.respond())),
12138				}
12139			},
12140			OffersMessage::Invoice(invoice) => {
12141				let payment_id = match self.verify_bolt12_invoice(&invoice, context.as_ref()) {
12142					Ok(payment_id) => payment_id,
12143					Err(()) => return None,
12144				};
12145
12146				let logger = WithContext::from(
12147					&self.logger, None, None, Some(invoice.payment_hash()),
12148				);
12149
12150				if self.default_configuration.manually_handle_bolt12_invoices {
12151					let event = Event::InvoiceReceived {
12152						payment_id, invoice, context, responder,
12153					};
12154					self.pending_events.lock().unwrap().push_back((event, None));
12155					return None;
12156				}
12157
12158				let res = self.send_payment_for_verified_bolt12_invoice(&invoice, payment_id);
12159				handle_pay_invoice_res!(res, invoice, logger);
12160			},
12161			#[cfg(async_payments)]
12162			OffersMessage::StaticInvoice(invoice) => {
12163				let payment_id = match context {
12164					Some(OffersContext::OutboundPayment { payment_id, nonce, hmac: Some(hmac) }) => {
12165						if payment_id.verify_for_offer_payment(hmac, nonce, expanded_key).is_err() {
12166							return None
12167						}
12168						payment_id
12169					},
12170					_ => return None
12171				};
12172				let res = self.initiate_async_payment(&invoice, payment_id);
12173				handle_pay_invoice_res!(res, invoice, self.logger);
12174			},
12175			OffersMessage::InvoiceError(invoice_error) => {
12176				let payment_hash = match context {
12177					Some(OffersContext::InboundPayment { payment_hash, nonce, hmac }) => {
12178						match payment_hash.verify_for_offer_payment(hmac, nonce, expanded_key) {
12179							Ok(_) => Some(payment_hash),
12180							Err(_) => None,
12181						}
12182					},
12183					_ => None,
12184				};
12185
12186				let logger = WithContext::from(&self.logger, None, None, payment_hash);
12187				log_trace!(logger, "Received invoice_error: {}", invoice_error);
12188
12189				match context {
12190					Some(OffersContext::OutboundPayment { payment_id, nonce, hmac: Some(hmac) }) => {
12191						if let Ok(()) = payment_id.verify_for_offer_payment(hmac, nonce, expanded_key) {
12192							self.abandon_payment_with_reason(
12193								payment_id, PaymentFailureReason::InvoiceRequestRejected,
12194							);
12195						}
12196					},
12197					_ => {},
12198				}
12199
12200				None
12201			},
12202		}
12203	}
12204
12205	fn release_pending_messages(&self) -> Vec<(OffersMessage, MessageSendInstructions)> {
12206		core::mem::take(&mut self.pending_offers_messages.lock().unwrap())
12207	}
12208}
12209
12210impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
12211AsyncPaymentsMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
12212where
12213	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
12214	T::Target: BroadcasterInterface,
12215	ES::Target: EntropySource,
12216	NS::Target: NodeSigner,
12217	SP::Target: SignerProvider,
12218	F::Target: FeeEstimator,
12219	R::Target: Router,
12220	MR::Target: MessageRouter,
12221	L::Target: Logger,
12222{
12223	fn handle_held_htlc_available(
12224		&self, _message: HeldHtlcAvailable, _responder: Option<Responder>
12225	) -> Option<(ReleaseHeldHtlc, ResponseInstruction)> {
12226		None
12227	}
12228
12229	fn handle_release_held_htlc(&self, _message: ReleaseHeldHtlc, _context: AsyncPaymentsContext) {
12230		#[cfg(async_payments)] {
12231			let AsyncPaymentsContext::OutboundPayment { payment_id, hmac, nonce } = _context;
12232			if payment_id.verify_for_async_payment(hmac, nonce, &self.inbound_payment_key).is_err() { return }
12233			if let Err(e) = self.send_payment_for_static_invoice(payment_id) {
12234				log_trace!(
12235					self.logger, "Failed to release held HTLC with payment id {}: {:?}", payment_id, e
12236				);
12237			}
12238		}
12239	}
12240
12241	fn release_pending_messages(&self) -> Vec<(AsyncPaymentsMessage, MessageSendInstructions)> {
12242		core::mem::take(&mut self.pending_async_payments_messages.lock().unwrap())
12243	}
12244}
12245
12246#[cfg(feature = "dnssec")]
12247impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
12248DNSResolverMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
12249where
12250	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
12251	T::Target: BroadcasterInterface,
12252	ES::Target: EntropySource,
12253	NS::Target: NodeSigner,
12254	SP::Target: SignerProvider,
12255	F::Target: FeeEstimator,
12256	R::Target: Router,
12257	MR::Target: MessageRouter,
12258	L::Target: Logger,
12259{
12260	fn handle_dnssec_query(
12261		&self, _message: DNSSECQuery, _responder: Option<Responder>,
12262	) -> Option<(DNSResolverMessage, ResponseInstruction)> {
12263		None
12264	}
12265
12266	fn handle_dnssec_proof(&self, message: DNSSECProof, context: DNSResolverContext) {
12267		let offer_opt = self.hrn_resolver.handle_dnssec_proof_for_offer(message, context);
12268		#[cfg_attr(not(feature = "_test_utils"), allow(unused_mut))]
12269		if let Some((completed_requests, mut offer)) = offer_opt {
12270			for (name, payment_id) in completed_requests {
12271				#[cfg(feature = "_test_utils")]
12272				if let Some(replacement_offer) = self.testing_dnssec_proof_offer_resolution_override.lock().unwrap().remove(&name) {
12273					// If we have multiple pending requests we may end up over-using the override
12274					// offer, but tests can deal with that.
12275					offer = replacement_offer;
12276				}
12277				if let Ok(amt_msats) = self.pending_outbound_payments.amt_msats_for_payment_awaiting_offer(payment_id) {
12278					let offer_pay_res =
12279						self.pay_for_offer_intern(&offer, None, Some(amt_msats), None, payment_id, Some(name),
12280							|invoice_request, nonce| {
12281								let retryable_invoice_request = RetryableInvoiceRequest {
12282									invoice_request: invoice_request.clone(),
12283									nonce,
12284								};
12285								self.pending_outbound_payments
12286									.received_offer(payment_id, Some(retryable_invoice_request))
12287									.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
12288						});
12289					if offer_pay_res.is_err() {
12290						// The offer we tried to pay is the canonical current offer for the name we
12291						// wanted to pay. If we can't pay it, there's no way to recover so fail the
12292						// payment.
12293						// Note that the PaymentFailureReason should be ignored for an
12294						// AwaitingInvoice payment.
12295						self.pending_outbound_payments.abandon_payment(
12296							payment_id, PaymentFailureReason::RouteNotFound, &self.pending_events,
12297						);
12298					}
12299				}
12300			}
12301		}
12302	}
12303
12304	fn release_pending_messages(&self) -> Vec<(DNSResolverMessage, MessageSendInstructions)> {
12305		core::mem::take(&mut self.pending_dns_onion_messages.lock().unwrap())
12306	}
12307}
12308
12309impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
12310NodeIdLookUp for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
12311where
12312	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
12313	T::Target: BroadcasterInterface,
12314	ES::Target: EntropySource,
12315	NS::Target: NodeSigner,
12316	SP::Target: SignerProvider,
12317	F::Target: FeeEstimator,
12318	R::Target: Router,
12319	MR::Target: MessageRouter,
12320	L::Target: Logger,
12321{
12322	fn next_node_id(&self, short_channel_id: u64) -> Option<PublicKey> {
12323		self.short_to_chan_info.read().unwrap().get(&short_channel_id).map(|(pubkey, _)| *pubkey)
12324	}
12325}
12326
12327/// Fetches the set of [`NodeFeatures`] flags that are provided by or required by
12328/// [`ChannelManager`].
12329pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures {
12330	let mut node_features = provided_init_features(config).to_context();
12331	node_features.set_keysend_optional();
12332	node_features
12333}
12334
12335/// Fetches the set of [`Bolt11InvoiceFeatures`] flags that are provided by or required by
12336/// [`ChannelManager`].
12337///
12338/// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice"
12339/// or not. Thus, this method is not public.
12340#[cfg(any(feature = "_test_utils", test))]
12341pub(crate) fn provided_bolt11_invoice_features(config: &UserConfig) -> Bolt11InvoiceFeatures {
12342	provided_init_features(config).to_context()
12343}
12344
12345/// Fetches the set of [`Bolt12InvoiceFeatures`] flags that are provided by or required by
12346/// [`ChannelManager`].
12347pub(crate) fn provided_bolt12_invoice_features(config: &UserConfig) -> Bolt12InvoiceFeatures {
12348	provided_init_features(config).to_context()
12349}
12350
12351/// Fetches the set of [`ChannelFeatures`] flags that are provided by or required by
12352/// [`ChannelManager`].
12353pub(crate) fn provided_channel_features(config: &UserConfig) -> ChannelFeatures {
12354	provided_init_features(config).to_context()
12355}
12356
12357/// Fetches the set of [`ChannelTypeFeatures`] flags that are provided by or required by
12358/// [`ChannelManager`].
12359pub(crate) fn provided_channel_type_features(config: &UserConfig) -> ChannelTypeFeatures {
12360	ChannelTypeFeatures::from_init(&provided_init_features(config))
12361}
12362
12363/// Fetches the set of [`InitFeatures`] flags that are provided by or required by
12364/// [`ChannelManager`].
12365pub fn provided_init_features(config: &UserConfig) -> InitFeatures {
12366	// Note that if new features are added here which other peers may (eventually) require, we
12367	// should also add the corresponding (optional) bit to the [`ChannelMessageHandler`] impl for
12368	// [`ErroringMessageHandler`].
12369	let mut features = InitFeatures::empty();
12370	features.set_data_loss_protect_required();
12371	features.set_upfront_shutdown_script_optional();
12372	features.set_variable_length_onion_required();
12373	features.set_static_remote_key_required();
12374	features.set_payment_secret_required();
12375	features.set_basic_mpp_optional();
12376	features.set_wumbo_optional();
12377	features.set_shutdown_any_segwit_optional();
12378	features.set_channel_type_optional();
12379	features.set_scid_privacy_optional();
12380	features.set_zero_conf_optional();
12381	features.set_route_blinding_optional();
12382	if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx {
12383		features.set_anchors_zero_fee_htlc_tx_optional();
12384	}
12385	#[cfg(dual_funding)]
12386	features.set_dual_fund_optional();
12387	features
12388}
12389
12390const SERIALIZATION_VERSION: u8 = 1;
12391const MIN_SERIALIZATION_VERSION: u8 = 1;
12392
12393impl_writeable_tlv_based!(PhantomRouteHints, {
12394	(2, channels, required_vec),
12395	(4, phantom_scid, required),
12396	(6, real_node_pubkey, required),
12397});
12398
12399impl_writeable_tlv_based!(BlindedForward, {
12400	(0, inbound_blinding_point, required),
12401	(1, failure, (default_value, BlindedFailure::FromIntroductionNode)),
12402	(3, next_blinding_override, option),
12403});
12404
12405impl_writeable_tlv_based_enum!(PendingHTLCRouting,
12406	(0, Forward) => {
12407		(0, onion_packet, required),
12408		(1, blinded, option),
12409		(2, short_channel_id, required),
12410		(3, incoming_cltv_expiry, option),
12411	},
12412	(1, Receive) => {
12413		(0, payment_data, required),
12414		(1, phantom_shared_secret, option),
12415		(2, incoming_cltv_expiry, required),
12416		(3, payment_metadata, option),
12417		(5, custom_tlvs, optional_vec),
12418		(7, requires_blinded_error, (default_value, false)),
12419		(9, payment_context, option),
12420	},
12421	(2, ReceiveKeysend) => {
12422		(0, payment_preimage, required),
12423		(1, requires_blinded_error, (default_value, false)),
12424		(2, incoming_cltv_expiry, required),
12425		(3, payment_metadata, option),
12426		(4, payment_data, option), // Added in 0.0.116
12427		(5, custom_tlvs, optional_vec),
12428		(7, has_recipient_created_payment_secret, (default_value, false)),
12429	},
12430);
12431
12432impl_writeable_tlv_based!(PendingHTLCInfo, {
12433	(0, routing, required),
12434	(2, incoming_shared_secret, required),
12435	(4, payment_hash, required),
12436	(6, outgoing_amt_msat, required),
12437	(8, outgoing_cltv_value, required),
12438	(9, incoming_amt_msat, option),
12439	(10, skimmed_fee_msat, option),
12440});
12441
12442
12443impl Writeable for HTLCFailureMsg {
12444	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
12445		match self {
12446			HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id, htlc_id, reason }) => {
12447				0u8.write(writer)?;
12448				channel_id.write(writer)?;
12449				htlc_id.write(writer)?;
12450				reason.write(writer)?;
12451			},
12452			HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
12453				channel_id, htlc_id, sha256_of_onion, failure_code
12454			}) => {
12455				1u8.write(writer)?;
12456				channel_id.write(writer)?;
12457				htlc_id.write(writer)?;
12458				sha256_of_onion.write(writer)?;
12459				failure_code.write(writer)?;
12460			},
12461		}
12462		Ok(())
12463	}
12464}
12465
12466impl Readable for HTLCFailureMsg {
12467	fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
12468		let id: u8 = Readable::read(reader)?;
12469		match id {
12470			0 => {
12471				Ok(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
12472					channel_id: Readable::read(reader)?,
12473					htlc_id: Readable::read(reader)?,
12474					reason: Readable::read(reader)?,
12475				}))
12476			},
12477			1 => {
12478				Ok(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
12479					channel_id: Readable::read(reader)?,
12480					htlc_id: Readable::read(reader)?,
12481					sha256_of_onion: Readable::read(reader)?,
12482					failure_code: Readable::read(reader)?,
12483				}))
12484			},
12485			// In versions prior to 0.0.101, HTLCFailureMsg objects were written with type 0 or 1 but
12486			// weren't length-prefixed and thus didn't support reading the TLV stream suffix of the network
12487			// messages contained in the variants.
12488			// In version 0.0.101, support for reading the variants with these types was added, and
12489			// we should migrate to writing these variants when UpdateFailHTLC or
12490			// UpdateFailMalformedHTLC get TLV fields.
12491			2 => {
12492				let length: BigSize = Readable::read(reader)?;
12493				let mut s = FixedLengthReader::new(reader, length.0);
12494				let res = Readable::read(&mut s)?;
12495				s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
12496				Ok(HTLCFailureMsg::Relay(res))
12497			},
12498			3 => {
12499				let length: BigSize = Readable::read(reader)?;
12500				let mut s = FixedLengthReader::new(reader, length.0);
12501				let res = Readable::read(&mut s)?;
12502				s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
12503				Ok(HTLCFailureMsg::Malformed(res))
12504			},
12505			_ => Err(DecodeError::UnknownRequiredFeature),
12506		}
12507	}
12508}
12509
12510impl_writeable_tlv_based_enum_legacy!(PendingHTLCStatus, ;
12511	(0, Forward),
12512	(1, Fail),
12513);
12514
12515impl_writeable_tlv_based_enum!(BlindedFailure,
12516	(0, FromIntroductionNode) => {},
12517	(2, FromBlindedNode) => {},
12518);
12519
12520impl_writeable_tlv_based!(HTLCPreviousHopData, {
12521	(0, short_channel_id, required),
12522	(1, phantom_shared_secret, option),
12523	(2, outpoint, required),
12524	(3, blinded_failure, option),
12525	(4, htlc_id, required),
12526	(5, cltv_expiry, option),
12527	(6, incoming_packet_shared_secret, required),
12528	(7, user_channel_id, option),
12529	// Note that by the time we get past the required read for type 2 above, outpoint will be
12530	// filled in, so we can safely unwrap it here.
12531	(9, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(outpoint.0.unwrap()))),
12532	(11, counterparty_node_id, option),
12533});
12534
12535impl Writeable for ClaimableHTLC {
12536	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
12537		let (payment_data, keysend_preimage) = match &self.onion_payload {
12538			OnionPayload::Invoice { _legacy_hop_data } => {
12539				(_legacy_hop_data.as_ref(), None)
12540			},
12541			OnionPayload::Spontaneous(preimage) => (None, Some(preimage)),
12542		};
12543		write_tlv_fields!(writer, {
12544			(0, self.prev_hop, required),
12545			(1, self.total_msat, required),
12546			(2, self.value, required),
12547			(3, self.sender_intended_value, required),
12548			(4, payment_data, option),
12549			(5, self.total_value_received, option),
12550			(6, self.cltv_expiry, required),
12551			(8, keysend_preimage, option),
12552			(10, self.counterparty_skimmed_fee_msat, option),
12553		});
12554		Ok(())
12555	}
12556}
12557
12558impl Readable for ClaimableHTLC {
12559	fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
12560		_init_and_read_len_prefixed_tlv_fields!(reader, {
12561			(0, prev_hop, required),
12562			(1, total_msat, option),
12563			(2, value_ser, required),
12564			(3, sender_intended_value, option),
12565			(4, payment_data_opt, option),
12566			(5, total_value_received, option),
12567			(6, cltv_expiry, required),
12568			(8, keysend_preimage, option),
12569			(10, counterparty_skimmed_fee_msat, option),
12570		});
12571		let payment_data: Option<msgs::FinalOnionHopData> = payment_data_opt;
12572		let value = value_ser.0.unwrap();
12573		let onion_payload = match keysend_preimage {
12574			Some(p) => {
12575				if payment_data.is_some() {
12576					return Err(DecodeError::InvalidValue)
12577				}
12578				if total_msat.is_none() {
12579					total_msat = Some(value);
12580				}
12581				OnionPayload::Spontaneous(p)
12582			},
12583			None => {
12584				if total_msat.is_none() {
12585					if payment_data.is_none() {
12586						return Err(DecodeError::InvalidValue)
12587					}
12588					total_msat = Some(payment_data.as_ref().unwrap().total_msat);
12589				}
12590				OnionPayload::Invoice { _legacy_hop_data: payment_data }
12591			},
12592		};
12593		Ok(Self {
12594			prev_hop: prev_hop.0.unwrap(),
12595			timer_ticks: 0,
12596			value,
12597			sender_intended_value: sender_intended_value.unwrap_or(value),
12598			total_value_received,
12599			total_msat: total_msat.unwrap(),
12600			onion_payload,
12601			cltv_expiry: cltv_expiry.0.unwrap(),
12602			counterparty_skimmed_fee_msat,
12603		})
12604	}
12605}
12606
12607impl Readable for HTLCSource {
12608	fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
12609		let id: u8 = Readable::read(reader)?;
12610		match id {
12611			0 => {
12612				let mut session_priv: crate::util::ser::RequiredWrapper<SecretKey> = crate::util::ser::RequiredWrapper(None);
12613				let mut first_hop_htlc_msat: u64 = 0;
12614				let mut path_hops = Vec::new();
12615				let mut payment_id = None;
12616				let mut payment_params: Option<PaymentParameters> = None;
12617				let mut blinded_tail: Option<BlindedTail> = None;
12618				read_tlv_fields!(reader, {
12619					(0, session_priv, required),
12620					(1, payment_id, option),
12621					(2, first_hop_htlc_msat, required),
12622					(4, path_hops, required_vec),
12623					(5, payment_params, (option: ReadableArgs, 0)),
12624					(6, blinded_tail, option),
12625				});
12626				if payment_id.is_none() {
12627					// For backwards compat, if there was no payment_id written, use the session_priv bytes
12628					// instead.
12629					payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref()));
12630				}
12631				let path = Path { hops: path_hops, blinded_tail };
12632				if path.hops.len() == 0 {
12633					return Err(DecodeError::InvalidValue);
12634				}
12635				if let Some(params) = payment_params.as_mut() {
12636					if let Payee::Clear { ref mut final_cltv_expiry_delta, .. } = params.payee {
12637						if final_cltv_expiry_delta == &0 {
12638							*final_cltv_expiry_delta = path.final_cltv_expiry_delta().ok_or(DecodeError::InvalidValue)?;
12639						}
12640					}
12641				}
12642				Ok(HTLCSource::OutboundRoute {
12643					session_priv: session_priv.0.unwrap(),
12644					first_hop_htlc_msat,
12645					path,
12646					payment_id: payment_id.unwrap(),
12647				})
12648			}
12649			1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
12650			_ => Err(DecodeError::UnknownRequiredFeature),
12651		}
12652	}
12653}
12654
12655impl Writeable for HTLCSource {
12656	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), crate::io::Error> {
12657		match self {
12658			HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id } => {
12659				0u8.write(writer)?;
12660				let payment_id_opt = Some(payment_id);
12661				write_tlv_fields!(writer, {
12662					(0, session_priv, required),
12663					(1, payment_id_opt, option),
12664					(2, first_hop_htlc_msat, required),
12665					// 3 was previously used to write a PaymentSecret for the payment.
12666					(4, path.hops, required_vec),
12667					(5, None::<PaymentParameters>, option), // payment_params in LDK versions prior to 0.0.115
12668					(6, path.blinded_tail, option),
12669				 });
12670			}
12671			HTLCSource::PreviousHopData(ref field) => {
12672				1u8.write(writer)?;
12673				field.write(writer)?;
12674			}
12675		}
12676		Ok(())
12677	}
12678}
12679
12680impl_writeable_tlv_based!(PendingAddHTLCInfo, {
12681	(0, forward_info, required),
12682	(1, prev_user_channel_id, (default_value, 0)),
12683	(2, prev_short_channel_id, required),
12684	(4, prev_htlc_id, required),
12685	(6, prev_funding_outpoint, required),
12686	// Note that by the time we get past the required read for type 6 above, prev_funding_outpoint will be
12687	// filled in, so we can safely unwrap it here.
12688	(7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))),
12689	(9, prev_counterparty_node_id, option),
12690});
12691
12692impl Writeable for HTLCForwardInfo {
12693	fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
12694		const FAIL_HTLC_VARIANT_ID: u8 = 1;
12695		match self {
12696			Self::AddHTLC(info) => {
12697				0u8.write(w)?;
12698				info.write(w)?;
12699			},
12700			Self::FailHTLC { htlc_id, err_packet } => {
12701				FAIL_HTLC_VARIANT_ID.write(w)?;
12702				write_tlv_fields!(w, {
12703					(0, htlc_id, required),
12704					(2, err_packet, required),
12705				});
12706			},
12707			Self::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
12708				// Since this variant was added in 0.0.119, write this as `::FailHTLC` with an empty error
12709				// packet so older versions have something to fail back with, but serialize the real data as
12710				// optional TLVs for the benefit of newer versions.
12711				FAIL_HTLC_VARIANT_ID.write(w)?;
12712				let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
12713				write_tlv_fields!(w, {
12714					(0, htlc_id, required),
12715					(1, failure_code, required),
12716					(2, dummy_err_packet, required),
12717					(3, sha256_of_onion, required),
12718				});
12719			},
12720		}
12721		Ok(())
12722	}
12723}
12724
12725impl Readable for HTLCForwardInfo {
12726	fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
12727		let id: u8 = Readable::read(r)?;
12728		Ok(match id {
12729			0 => Self::AddHTLC(Readable::read(r)?),
12730			1 => {
12731				_init_and_read_len_prefixed_tlv_fields!(r, {
12732					(0, htlc_id, required),
12733					(1, malformed_htlc_failure_code, option),
12734					(2, err_packet, required),
12735					(3, sha256_of_onion, option),
12736				});
12737				if let Some(failure_code) = malformed_htlc_failure_code {
12738					Self::FailMalformedHTLC {
12739						htlc_id: _init_tlv_based_struct_field!(htlc_id, required),
12740						failure_code,
12741						sha256_of_onion: sha256_of_onion.ok_or(DecodeError::InvalidValue)?,
12742					}
12743				} else {
12744					Self::FailHTLC {
12745						htlc_id: _init_tlv_based_struct_field!(htlc_id, required),
12746						err_packet: _init_tlv_based_struct_field!(err_packet, required),
12747					}
12748				}
12749			},
12750			_ => return Err(DecodeError::InvalidValue),
12751		})
12752	}
12753}
12754
12755impl_writeable_tlv_based!(PendingInboundPayment, {
12756	(0, payment_secret, required),
12757	(2, expiry_time, required),
12758	(4, user_payment_id, required),
12759	(6, payment_preimage, required),
12760	(8, min_value_msat, required),
12761});
12762
12763impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> Writeable for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
12764where
12765	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
12766	T::Target: BroadcasterInterface,
12767	ES::Target: EntropySource,
12768	NS::Target: NodeSigner,
12769	SP::Target: SignerProvider,
12770	F::Target: FeeEstimator,
12771	R::Target: Router,
12772	MR::Target: MessageRouter,
12773	L::Target: Logger,
12774{
12775	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
12776		let _consistency_lock = self.total_consistency_lock.write().unwrap();
12777
12778		write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
12779
12780		self.chain_hash.write(writer)?;
12781		{
12782			let best_block = self.best_block.read().unwrap();
12783			best_block.height.write(writer)?;
12784			best_block.block_hash.write(writer)?;
12785		}
12786
12787		let per_peer_state = self.per_peer_state.write().unwrap();
12788
12789		let mut serializable_peer_count: u64 = 0;
12790		{
12791			let mut number_of_funded_channels = 0;
12792			for (_, peer_state_mutex) in per_peer_state.iter() {
12793				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
12794				let peer_state = &mut *peer_state_lock;
12795				if !peer_state.ok_to_remove(false) {
12796					serializable_peer_count += 1;
12797				}
12798
12799				number_of_funded_channels += peer_state.channel_by_id.iter().filter(
12800					|(_, phase)| if let ChannelPhase::Funded(chan) = phase { chan.context.is_funding_broadcast() } else { false }
12801				).count();
12802			}
12803
12804			(number_of_funded_channels as u64).write(writer)?;
12805
12806			for (_, peer_state_mutex) in per_peer_state.iter() {
12807				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
12808				let peer_state = &mut *peer_state_lock;
12809				for channel in peer_state.channel_by_id.iter().filter_map(
12810					|(_, phase)| if let ChannelPhase::Funded(channel) = phase {
12811						if channel.context.is_funding_broadcast() { Some(channel) } else { None }
12812					} else { None }
12813				) {
12814					channel.write(writer)?;
12815				}
12816			}
12817		}
12818
12819		{
12820			let forward_htlcs = self.forward_htlcs.lock().unwrap();
12821			(forward_htlcs.len() as u64).write(writer)?;
12822			for (short_channel_id, pending_forwards) in forward_htlcs.iter() {
12823				short_channel_id.write(writer)?;
12824				(pending_forwards.len() as u64).write(writer)?;
12825				for forward in pending_forwards {
12826					forward.write(writer)?;
12827				}
12828			}
12829		}
12830
12831		let mut decode_update_add_htlcs_opt = None;
12832		let decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
12833		if !decode_update_add_htlcs.is_empty() {
12834			decode_update_add_htlcs_opt = Some(decode_update_add_htlcs);
12835		}
12836
12837		let claimable_payments = self.claimable_payments.lock().unwrap();
12838		let pending_outbound_payments = self.pending_outbound_payments.pending_outbound_payments.lock().unwrap();
12839
12840		let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
12841		let mut htlc_onion_fields: Vec<&_> = Vec::new();
12842		(claimable_payments.claimable_payments.len() as u64).write(writer)?;
12843		for (payment_hash, payment) in claimable_payments.claimable_payments.iter() {
12844			payment_hash.write(writer)?;
12845			(payment.htlcs.len() as u64).write(writer)?;
12846			for htlc in payment.htlcs.iter() {
12847				htlc.write(writer)?;
12848			}
12849			htlc_purposes.push(&payment.purpose);
12850			htlc_onion_fields.push(&payment.onion_fields);
12851		}
12852
12853		let mut monitor_update_blocked_actions_per_peer = None;
12854		let mut peer_states = Vec::new();
12855		for (_, peer_state_mutex) in per_peer_state.iter() {
12856			// Because we're holding the owning `per_peer_state` write lock here there's no chance
12857			// of a lockorder violation deadlock - no other thread can be holding any
12858			// per_peer_state lock at all.
12859			peer_states.push(peer_state_mutex.unsafe_well_ordered_double_lock_self());
12860		}
12861
12862		(serializable_peer_count).write(writer)?;
12863		for ((peer_pubkey, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
12864			// Peers which we have no channels to should be dropped once disconnected. As we
12865			// disconnect all peers when shutting down and serializing the ChannelManager, we
12866			// consider all peers as disconnected here. There's therefore no need write peers with
12867			// no channels.
12868			if !peer_state.ok_to_remove(false) {
12869				peer_pubkey.write(writer)?;
12870				peer_state.latest_features.write(writer)?;
12871				if !peer_state.monitor_update_blocked_actions.is_empty() {
12872					monitor_update_blocked_actions_per_peer
12873						.get_or_insert_with(Vec::new)
12874						.push((*peer_pubkey, &peer_state.monitor_update_blocked_actions));
12875				}
12876			}
12877		}
12878
12879		let events = self.pending_events.lock().unwrap();
12880		// LDK versions prior to 0.0.115 don't support post-event actions, thus if there's no
12881		// actions at all, skip writing the required TLV. Otherwise, pre-0.0.115 versions will
12882		// refuse to read the new ChannelManager.
12883		let events_not_backwards_compatible = events.iter().any(|(_, action)| action.is_some());
12884		if events_not_backwards_compatible {
12885			// If we're gonna write a even TLV that will overwrite our events anyway we might as
12886			// well save the space and not write any events here.
12887			0u64.write(writer)?;
12888		} else {
12889			(events.len() as u64).write(writer)?;
12890			for (event, _) in events.iter() {
12891				event.write(writer)?;
12892			}
12893		}
12894
12895		// LDK versions prior to 0.0.116 wrote the `pending_background_events`
12896		// `MonitorUpdateRegeneratedOnStartup`s here, however there was never a reason to do so -
12897		// the closing monitor updates were always effectively replayed on startup (either directly
12898		// by calling `broadcast_latest_holder_commitment_txn` on a `ChannelMonitor` during
12899		// deserialization or, in 0.0.115, by regenerating the monitor update itself).
12900		0u64.write(writer)?;
12901
12902		// Prior to 0.0.111 we tracked node_announcement serials here, however that now happens in
12903		// `PeerManager`, and thus we simply write the `highest_seen_timestamp` twice, which is
12904		// likely to be identical.
12905		(self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?;
12906		(self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?;
12907
12908		// LDK versions prior to 0.0.104 wrote `pending_inbound_payments` here, with deprecated support
12909		// for stateful inbound payments maintained until 0.0.116, after which no further inbound
12910		// payments could have been written here.
12911		(0 as u64).write(writer)?;
12912
12913		// For backwards compat, write the session privs and their total length.
12914		let mut num_pending_outbounds_compat: u64 = 0;
12915		for (_, outbound) in pending_outbound_payments.iter() {
12916			if !outbound.is_fulfilled() && !outbound.abandoned() {
12917				num_pending_outbounds_compat += outbound.remaining_parts() as u64;
12918			}
12919		}
12920		num_pending_outbounds_compat.write(writer)?;
12921		for (_, outbound) in pending_outbound_payments.iter() {
12922			match outbound {
12923				PendingOutboundPayment::Legacy { session_privs } |
12924				PendingOutboundPayment::Retryable { session_privs, .. } => {
12925					for session_priv in session_privs.iter() {
12926						session_priv.write(writer)?;
12927					}
12928				}
12929				PendingOutboundPayment::AwaitingInvoice { .. } => {},
12930				PendingOutboundPayment::AwaitingOffer { .. } => {},
12931				PendingOutboundPayment::InvoiceReceived { .. } => {},
12932				PendingOutboundPayment::StaticInvoiceReceived { .. } => {},
12933				PendingOutboundPayment::Fulfilled { .. } => {},
12934				PendingOutboundPayment::Abandoned { .. } => {},
12935			}
12936		}
12937
12938		// Encode without retry info for 0.0.101 compatibility.
12939		let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = new_hash_map();
12940		for (id, outbound) in pending_outbound_payments.iter() {
12941			match outbound {
12942				PendingOutboundPayment::Legacy { session_privs } |
12943				PendingOutboundPayment::Retryable { session_privs, .. } => {
12944					pending_outbound_payments_no_retry.insert(*id, session_privs.clone());
12945				},
12946				_ => {},
12947			}
12948		}
12949
12950		let mut pending_intercepted_htlcs = None;
12951		let our_pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
12952		if our_pending_intercepts.len() != 0 {
12953			pending_intercepted_htlcs = Some(our_pending_intercepts);
12954		}
12955
12956		let mut pending_claiming_payments = Some(&claimable_payments.pending_claiming_payments);
12957		if pending_claiming_payments.as_ref().unwrap().is_empty() {
12958			// LDK versions prior to 0.0.113 do not know how to read the pending claimed payments
12959			// map. Thus, if there are no entries we skip writing a TLV for it.
12960			pending_claiming_payments = None;
12961		}
12962
12963		let mut in_flight_monitor_updates: Option<HashMap<(&PublicKey, &OutPoint), &Vec<ChannelMonitorUpdate>>> = None;
12964		for ((counterparty_id, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
12965			for (funding_outpoint, updates) in peer_state.in_flight_monitor_updates.iter() {
12966				if !updates.is_empty() {
12967					if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(new_hash_map()); }
12968					in_flight_monitor_updates.as_mut().unwrap().insert((counterparty_id, funding_outpoint), updates);
12969				}
12970			}
12971		}
12972
12973		write_tlv_fields!(writer, {
12974			(1, pending_outbound_payments_no_retry, required),
12975			(2, pending_intercepted_htlcs, option),
12976			(3, pending_outbound_payments, required),
12977			(4, pending_claiming_payments, option),
12978			(5, self.our_network_pubkey, required),
12979			(6, monitor_update_blocked_actions_per_peer, option),
12980			(7, self.fake_scid_rand_bytes, required),
12981			(8, if events_not_backwards_compatible { Some(&*events) } else { None }, option),
12982			(9, htlc_purposes, required_vec),
12983			(10, in_flight_monitor_updates, option),
12984			(11, self.probing_cookie_secret, required),
12985			(13, htlc_onion_fields, optional_vec),
12986			(14, decode_update_add_htlcs_opt, option),
12987			(15, self.inbound_payment_id_secret, required),
12988		});
12989
12990		Ok(())
12991	}
12992}
12993
12994impl Writeable for VecDeque<(Event, Option<EventCompletionAction>)> {
12995	fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
12996		(self.len() as u64).write(w)?;
12997		for (event, action) in self.iter() {
12998			event.write(w)?;
12999			action.write(w)?;
13000			#[cfg(debug_assertions)] {
13001				// Events are MaybeReadable, in some cases indicating that they shouldn't actually
13002				// be persisted and are regenerated on restart. However, if such an event has a
13003				// post-event-handling action we'll write nothing for the event and would have to
13004				// either forget the action or fail on deserialization (which we do below). Thus,
13005				// check that the event is sane here.
13006				let event_encoded = event.encode();
13007				let event_read: Option<Event> =
13008					MaybeReadable::read(&mut &event_encoded[..]).unwrap();
13009				if action.is_some() { assert!(event_read.is_some()); }
13010			}
13011		}
13012		Ok(())
13013	}
13014}
13015impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
13016	fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
13017		let len: u64 = Readable::read(reader)?;
13018		const MAX_ALLOC_SIZE: u64 = 1024 * 16;
13019		let mut events: Self = VecDeque::with_capacity(cmp::min(
13020			MAX_ALLOC_SIZE/mem::size_of::<(events::Event, Option<EventCompletionAction>)>() as u64,
13021			len) as usize);
13022		for _ in 0..len {
13023			let ev_opt = MaybeReadable::read(reader)?;
13024			let action = Readable::read(reader)?;
13025			if let Some(ev) = ev_opt {
13026				events.push_back((ev, action));
13027			} else if action.is_some() {
13028				return Err(DecodeError::InvalidValue);
13029			}
13030		}
13031		Ok(events)
13032	}
13033}
13034
13035/// Arguments for the creation of a ChannelManager that are not deserialized.
13036///
13037/// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
13038/// is:
13039/// 1) Deserialize all stored [`ChannelMonitor`]s.
13040/// 2) Deserialize the [`ChannelManager`] by filling in this struct and calling:
13041///    `<(BlockHash, ChannelManager)>::read(reader, args)`
13042///    This may result in closing some channels if the [`ChannelMonitor`] is newer than the stored
13043///    [`ChannelManager`] state to ensure no loss of funds. Thus, transactions may be broadcasted.
13044/// 3) If you are not fetching full blocks, register all relevant [`ChannelMonitor`] outpoints the
13045///    same way you would handle a [`chain::Filter`] call using
13046///    [`ChannelMonitor::get_outputs_to_watch`] and [`ChannelMonitor::get_funding_txo`].
13047/// 4) Disconnect/connect blocks on your [`ChannelMonitor`]s to get them in sync with the chain.
13048/// 5) Disconnect/connect blocks on the [`ChannelManager`] to get it in sync with the chain.
13049/// 6) Optionally re-persist the [`ChannelMonitor`]s to ensure the latest state is on disk.
13050///    This is important if you have replayed a nontrivial number of blocks in step (4), allowing
13051///    you to avoid having to replay the same blocks if you shut down quickly after startup. It is
13052///    otherwise not required.
13053///    Note that if you're using a [`ChainMonitor`] for your [`chain::Watch`] implementation, you
13054///    will likely accomplish this as a side-effect of calling [`chain::Watch::watch_channel`] in
13055///    the next step.
13056/// 7) Move the [`ChannelMonitor`]s into your local [`chain::Watch`]. If you're using a
13057///    [`ChainMonitor`], this is done by calling [`chain::Watch::watch_channel`].
13058///
13059/// Note that the ordering of #4-7 is not of importance, however all four must occur before you
13060/// call any other methods on the newly-deserialized [`ChannelManager`].
13061///
13062/// Note that because some channels may be closed during deserialization, it is critical that you
13063/// always deserialize only the latest version of a ChannelManager and ChannelMonitors available to
13064/// you. If you deserialize an old ChannelManager (during which force-closure transactions may be
13065/// broadcast), and then later deserialize a newer version of the same ChannelManager (which will
13066/// not force-close the same channels but consider them live), you may end up revoking a state for
13067/// which you've already broadcasted the transaction.
13068///
13069/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
13070pub struct ChannelManagerReadArgs<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
13071where
13072	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
13073	T::Target: BroadcasterInterface,
13074	ES::Target: EntropySource,
13075	NS::Target: NodeSigner,
13076	SP::Target: SignerProvider,
13077	F::Target: FeeEstimator,
13078	R::Target: Router,
13079	MR::Target: MessageRouter,
13080	L::Target: Logger,
13081{
13082	/// A cryptographically secure source of entropy.
13083	pub entropy_source: ES,
13084
13085	/// A signer that is able to perform node-scoped cryptographic operations.
13086	pub node_signer: NS,
13087
13088	/// The keys provider which will give us relevant keys. Some keys will be loaded during
13089	/// deserialization and KeysInterface::read_chan_signer will be used to read per-Channel
13090	/// signing data.
13091	pub signer_provider: SP,
13092
13093	/// The fee_estimator for use in the ChannelManager in the future.
13094	///
13095	/// No calls to the FeeEstimator will be made during deserialization.
13096	pub fee_estimator: F,
13097	/// The chain::Watch for use in the ChannelManager in the future.
13098	///
13099	/// No calls to the chain::Watch will be made during deserialization. It is assumed that
13100	/// you have deserialized ChannelMonitors separately and will add them to your
13101	/// chain::Watch after deserializing this ChannelManager.
13102	pub chain_monitor: M,
13103
13104	/// The BroadcasterInterface which will be used in the ChannelManager in the future and may be
13105	/// used to broadcast the latest local commitment transactions of channels which must be
13106	/// force-closed during deserialization.
13107	pub tx_broadcaster: T,
13108	/// The router which will be used in the ChannelManager in the future for finding routes
13109	/// on-the-fly for trampoline payments. Absent in private nodes that don't support forwarding.
13110	///
13111	/// No calls to the router will be made during deserialization.
13112	pub router: R,
13113	/// The [`MessageRouter`] used for constructing [`BlindedMessagePath`]s for [`Offer`]s,
13114	/// [`Refund`]s, and any reply paths.
13115	pub message_router: MR,
13116	/// The Logger for use in the ChannelManager and which may be used to log information during
13117	/// deserialization.
13118	pub logger: L,
13119	/// Default settings used for new channels. Any existing channels will continue to use the
13120	/// runtime settings which were stored when the ChannelManager was serialized.
13121	pub default_config: UserConfig,
13122
13123	/// A map from channel funding outpoints to ChannelMonitors for those channels (ie
13124	/// value.context.get_funding_txo() should be the key).
13125	///
13126	/// If a monitor is inconsistent with the channel state during deserialization the channel will
13127	/// be force-closed using the data in the ChannelMonitor and the channel will be dropped. This
13128	/// is true for missing channels as well. If there is a monitor missing for which we find
13129	/// channel data Err(DecodeError::InvalidValue) will be returned.
13130	///
13131	/// In such cases the latest local transactions will be sent to the tx_broadcaster included in
13132	/// this struct.
13133	///
13134	/// This is not exported to bindings users because we have no HashMap bindings
13135	pub channel_monitors: HashMap<OutPoint, &'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
13136}
13137
13138impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
13139		ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>
13140where
13141	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
13142	T::Target: BroadcasterInterface,
13143	ES::Target: EntropySource,
13144	NS::Target: NodeSigner,
13145	SP::Target: SignerProvider,
13146	F::Target: FeeEstimator,
13147	R::Target: Router,
13148	MR::Target: MessageRouter,
13149	L::Target: Logger,
13150{
13151	/// Simple utility function to create a ChannelManagerReadArgs which creates the monitor
13152	/// HashMap for you. This is primarily useful for C bindings where it is not practical to
13153	/// populate a HashMap directly from C.
13154	pub fn new(
13155		entropy_source: ES, node_signer: NS, signer_provider: SP, fee_estimator: F,
13156		chain_monitor: M, tx_broadcaster: T, router: R, message_router: MR, logger: L,
13157		default_config: UserConfig,
13158		mut channel_monitors: Vec<&'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
13159	) -> Self {
13160		Self {
13161			entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor,
13162			tx_broadcaster, router, message_router, logger, default_config,
13163			channel_monitors: hash_map_from_iter(
13164				channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) })
13165			),
13166		}
13167	}
13168}
13169
13170// Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the
13171// SipmleArcChannelManager type:
13172impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
13173	ReadableArgs<ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>> for (BlockHash, Arc<ChannelManager<M, T, ES, NS, SP, F, R, MR, L>>)
13174where
13175	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
13176	T::Target: BroadcasterInterface,
13177	ES::Target: EntropySource,
13178	NS::Target: NodeSigner,
13179	SP::Target: SignerProvider,
13180	F::Target: FeeEstimator,
13181	R::Target: Router,
13182	MR::Target: MessageRouter,
13183	L::Target: Logger,
13184{
13185	fn read<Reader: io::Read>(reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>) -> Result<Self, DecodeError> {
13186		let (blockhash, chan_manager) = <(BlockHash, ChannelManager<M, T, ES, NS, SP, F, R, MR, L>)>::read(reader, args)?;
13187		Ok((blockhash, Arc::new(chan_manager)))
13188	}
13189}
13190
13191impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
13192	ReadableArgs<ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>> for (BlockHash, ChannelManager<M, T, ES, NS, SP, F, R, MR, L>)
13193where
13194	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
13195	T::Target: BroadcasterInterface,
13196	ES::Target: EntropySource,
13197	NS::Target: NodeSigner,
13198	SP::Target: SignerProvider,
13199	F::Target: FeeEstimator,
13200	R::Target: Router,
13201	MR::Target: MessageRouter,
13202	L::Target: Logger,
13203{
13204	fn read<Reader: io::Read>(reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>) -> Result<Self, DecodeError> {
13205		let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
13206
13207		let chain_hash: ChainHash = Readable::read(reader)?;
13208		let best_block_height: u32 = Readable::read(reader)?;
13209		let best_block_hash: BlockHash = Readable::read(reader)?;
13210
13211		let empty_peer_state = || {
13212			PeerState {
13213				channel_by_id: new_hash_map(),
13214				inbound_channel_request_by_id: new_hash_map(),
13215				latest_features: InitFeatures::empty(),
13216				pending_msg_events: Vec::new(),
13217				in_flight_monitor_updates: BTreeMap::new(),
13218				monitor_update_blocked_actions: BTreeMap::new(),
13219				actions_blocking_raa_monitor_updates: BTreeMap::new(),
13220				closed_channel_monitor_update_ids: BTreeMap::new(),
13221				is_connected: false,
13222			}
13223		};
13224
13225		let mut failed_htlcs = Vec::new();
13226		let channel_count: u64 = Readable::read(reader)?;
13227		let mut funding_txo_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128));
13228		let mut per_peer_state = hash_map_with_capacity(cmp::min(channel_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
13229		let mut outpoint_to_peer = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
13230		let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
13231		let mut channel_closures = VecDeque::new();
13232		let mut close_background_events = Vec::new();
13233		let mut funding_txo_to_channel_id = hash_map_with_capacity(channel_count as usize);
13234		for _ in 0..channel_count {
13235			let mut channel: Channel<SP> = Channel::read(reader, (
13236				&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
13237			))?;
13238			let logger = WithChannelContext::from(&args.logger, &channel.context, None);
13239			let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
13240			funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id());
13241			funding_txo_set.insert(funding_txo.clone());
13242			if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
13243				if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
13244						channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() ||
13245						channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
13246						channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
13247					// But if the channel is behind of the monitor, close the channel:
13248					log_error!(logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
13249					log_error!(logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
13250					if channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
13251						log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
13252							&channel.context.channel_id(), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
13253					}
13254					if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() {
13255						log_error!(logger, " The ChannelMonitor for channel {} is at holder commitment number {} but the ChannelManager is at holder commitment number {}.",
13256							&channel.context.channel_id(), monitor.get_cur_holder_commitment_number(), channel.get_cur_holder_commitment_transaction_number());
13257					}
13258					if channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() {
13259						log_error!(logger, " The ChannelMonitor for channel {} is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.",
13260							&channel.context.channel_id(), monitor.get_min_seen_secret(), channel.get_revoked_counterparty_commitment_transaction_number());
13261					}
13262					if channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() {
13263						log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
13264							&channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
13265					}
13266					let mut shutdown_result = channel.context.force_shutdown(true, ClosureReason::OutdatedChannelManager);
13267					if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
13268						return Err(DecodeError::InvalidValue);
13269					}
13270					if let Some((counterparty_node_id, funding_txo, channel_id, mut update)) = shutdown_result.monitor_update {
13271						// Our channel information is out of sync with the `ChannelMonitor`, so
13272						// force the update to use the `ChannelMonitor`'s update_id for the close
13273						// update.
13274						let latest_update_id = monitor.get_latest_update_id().saturating_add(1);
13275						update.update_id = latest_update_id;
13276						per_peer_state.entry(counterparty_node_id)
13277							.or_insert_with(|| Mutex::new(empty_peer_state()))
13278							.lock().unwrap()
13279							.closed_channel_monitor_update_ids.entry(channel_id)
13280								.and_modify(|v| *v = cmp::max(latest_update_id, *v))
13281								.or_insert(latest_update_id);
13282
13283						close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13284							counterparty_node_id, funding_txo, channel_id, update
13285						});
13286					}
13287					failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs);
13288					channel_closures.push_back((events::Event::ChannelClosed {
13289						channel_id: channel.context.channel_id(),
13290						user_channel_id: channel.context.get_user_id(),
13291						reason: ClosureReason::OutdatedChannelManager,
13292						counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
13293						channel_capacity_sats: Some(channel.context.get_value_satoshis()),
13294						channel_funding_txo: channel.context.get_funding_txo(),
13295						last_local_balance_msat: Some(channel.context.get_value_to_self_msat()),
13296					}, None));
13297					for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
13298						let mut found_htlc = false;
13299						for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() {
13300							if *channel_htlc_source == monitor_htlc_source { found_htlc = true; break; }
13301						}
13302						if !found_htlc {
13303							// If we have some HTLCs in the channel which are not present in the newer
13304							// ChannelMonitor, they have been removed and should be failed back to
13305							// ensure we don't forget them entirely. Note that if the missing HTLC(s)
13306							// were actually claimed we'd have generated and ensured the previous-hop
13307							// claim update ChannelMonitor updates were persisted prior to persising
13308							// the ChannelMonitor update for the forward leg, so attempting to fail the
13309							// backwards leg of the HTLC will simply be rejected.
13310							let logger = WithChannelContext::from(&args.logger, &channel.context, Some(*payment_hash));
13311							log_info!(logger,
13312								"Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager",
13313								&channel.context.channel_id(), &payment_hash);
13314							failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.context.get_counterparty_node_id(), channel.context.channel_id()));
13315						}
13316					}
13317				} else {
13318					channel.on_startup_drop_completed_blocked_mon_updates_through(&logger, monitor.get_latest_update_id());
13319					log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {} with {} blocked updates",
13320						&channel.context.channel_id(), channel.context.get_latest_monitor_update_id(),
13321						monitor.get_latest_update_id(), channel.blocked_monitor_updates_pending());
13322					if let Some(short_channel_id) = channel.context.get_short_channel_id() {
13323						short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
13324					}
13325					if let Some(funding_txo) = channel.context.get_funding_txo() {
13326						outpoint_to_peer.insert(funding_txo, channel.context.get_counterparty_node_id());
13327					}
13328					per_peer_state.entry(channel.context.get_counterparty_node_id())
13329						.or_insert_with(|| Mutex::new(empty_peer_state()))
13330						.get_mut().unwrap()
13331						.channel_by_id.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
13332				}
13333			} else if channel.is_awaiting_initial_mon_persist() {
13334				// If we were persisted and shut down while the initial ChannelMonitor persistence
13335				// was in-progress, we never broadcasted the funding transaction and can still
13336				// safely discard the channel.
13337				let _ = channel.context.force_shutdown(false, ClosureReason::DisconnectedPeer);
13338				channel_closures.push_back((events::Event::ChannelClosed {
13339					channel_id: channel.context.channel_id(),
13340					user_channel_id: channel.context.get_user_id(),
13341					reason: ClosureReason::DisconnectedPeer,
13342					counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
13343					channel_capacity_sats: Some(channel.context.get_value_satoshis()),
13344					channel_funding_txo: channel.context.get_funding_txo(),
13345					last_local_balance_msat: Some(channel.context.get_value_to_self_msat()),
13346				}, None));
13347			} else {
13348				log_error!(logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id());
13349				log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
13350				log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
13351				log_error!(logger, " Without the ChannelMonitor we cannot continue without risking funds.");
13352				log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
13353				return Err(DecodeError::InvalidValue);
13354			}
13355		}
13356
13357		for (funding_txo, monitor) in args.channel_monitors.iter() {
13358			if !funding_txo_set.contains(funding_txo) {
13359				let mut should_queue_fc_update = false;
13360				if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13361					// If the ChannelMonitor had any updates, we may need to update it further and
13362					// thus track it in `closed_channel_monitor_update_ids`. If the channel never
13363					// had any updates at all, there can't be any HTLCs pending which we need to
13364					// claim.
13365					// Note that a `ChannelMonitor` is created with `update_id` 0 and after we
13366					// provide it with a closure update its `update_id` will be at 1.
13367					if !monitor.offchain_closed() || monitor.get_latest_update_id() > 1 {
13368						should_queue_fc_update = !monitor.offchain_closed();
13369						let mut latest_update_id = monitor.get_latest_update_id();
13370						if should_queue_fc_update {
13371							latest_update_id += 1;
13372						}
13373						per_peer_state.entry(counterparty_node_id)
13374							.or_insert_with(|| Mutex::new(empty_peer_state()))
13375							.lock().unwrap()
13376							.closed_channel_monitor_update_ids.entry(monitor.channel_id())
13377								.and_modify(|v| *v = cmp::max(latest_update_id, *v))
13378								.or_insert(latest_update_id);
13379					}
13380				}
13381
13382				if !should_queue_fc_update {
13383					continue;
13384				}
13385
13386				let logger = WithChannelMonitor::from(&args.logger, monitor, None);
13387				let channel_id = monitor.channel_id();
13388				log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
13389					&channel_id);
13390				let mut monitor_update = ChannelMonitorUpdate {
13391					update_id: monitor.get_latest_update_id().saturating_add(1),
13392					counterparty_node_id: None,
13393					updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
13394					channel_id: Some(monitor.channel_id()),
13395				};
13396				if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13397					let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13398						counterparty_node_id,
13399						funding_txo: *funding_txo,
13400						channel_id,
13401						update: monitor_update,
13402					};
13403					close_background_events.push(update);
13404				} else {
13405					// This is a fairly old `ChannelMonitor` that hasn't seen an update to its
13406					// off-chain state since LDK 0.0.118 (as in LDK 0.0.119 any off-chain
13407					// `ChannelMonitorUpdate` will set the counterparty ID).
13408					// Thus, we assume that it has no pending HTLCs and we will not need to
13409					// generate a `ChannelMonitorUpdate` for it aside from this
13410					// `ChannelForceClosed` one.
13411					monitor_update.update_id = u64::MAX;
13412					close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, channel_id, monitor_update)));
13413				}
13414			}
13415		}
13416
13417		const MAX_ALLOC_SIZE: usize = 1024 * 64;
13418		let forward_htlcs_count: u64 = Readable::read(reader)?;
13419		let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128));
13420		for _ in 0..forward_htlcs_count {
13421			let short_channel_id = Readable::read(reader)?;
13422			let pending_forwards_count: u64 = Readable::read(reader)?;
13423			let mut pending_forwards = Vec::with_capacity(cmp::min(pending_forwards_count as usize, MAX_ALLOC_SIZE/mem::size_of::<HTLCForwardInfo>()));
13424			for _ in 0..pending_forwards_count {
13425				pending_forwards.push(Readable::read(reader)?);
13426			}
13427			forward_htlcs.insert(short_channel_id, pending_forwards);
13428		}
13429
13430		let claimable_htlcs_count: u64 = Readable::read(reader)?;
13431		let mut claimable_htlcs_list = Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128));
13432		for _ in 0..claimable_htlcs_count {
13433			let payment_hash = Readable::read(reader)?;
13434			let previous_hops_len: u64 = Readable::read(reader)?;
13435			let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, MAX_ALLOC_SIZE/mem::size_of::<ClaimableHTLC>()));
13436			for _ in 0..previous_hops_len {
13437				previous_hops.push(<ClaimableHTLC as Readable>::read(reader)?);
13438			}
13439			claimable_htlcs_list.push((payment_hash, previous_hops));
13440		}
13441
13442		let peer_count: u64 = Readable::read(reader)?;
13443		for _ in 0..peer_count {
13444			let peer_pubkey: PublicKey = Readable::read(reader)?;
13445			let latest_features = Readable::read(reader)?;
13446			if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) {
13447				peer_state.get_mut().unwrap().latest_features = latest_features;
13448			}
13449		}
13450
13451		let event_count: u64 = Readable::read(reader)?;
13452		let mut pending_events_read: VecDeque<(events::Event, Option<EventCompletionAction>)> =
13453			VecDeque::with_capacity(cmp::min(event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(events::Event, Option<EventCompletionAction>)>()));
13454		for _ in 0..event_count {
13455			match MaybeReadable::read(reader)? {
13456				Some(event) => pending_events_read.push_back((event, None)),
13457				None => continue,
13458			}
13459		}
13460
13461		let background_event_count: u64 = Readable::read(reader)?;
13462		for _ in 0..background_event_count {
13463			match <u8 as Readable>::read(reader)? {
13464				0 => {
13465					// LDK versions prior to 0.0.116 wrote pending `MonitorUpdateRegeneratedOnStartup`s here,
13466					// however we really don't (and never did) need them - we regenerate all
13467					// on-startup monitor updates.
13468					let _: OutPoint = Readable::read(reader)?;
13469					let _: ChannelMonitorUpdate = Readable::read(reader)?;
13470				}
13471				_ => return Err(DecodeError::InvalidValue),
13472			}
13473		}
13474
13475		let _last_node_announcement_serial: u32 = Readable::read(reader)?; // Only used < 0.0.111
13476		let highest_seen_timestamp: u32 = Readable::read(reader)?;
13477
13478		// The last version where a pending inbound payment may have been added was 0.0.116.
13479		let pending_inbound_payment_count: u64 = Readable::read(reader)?;
13480		for _ in 0..pending_inbound_payment_count {
13481			let payment_hash: PaymentHash = Readable::read(reader)?;
13482			let logger = WithContext::from(&args.logger, None, None, Some(payment_hash));
13483			let inbound: PendingInboundPayment = Readable::read(reader)?;
13484			log_warn!(logger, "Ignoring deprecated pending inbound payment with payment hash {}: {:?}", payment_hash, inbound);
13485		}
13486
13487		let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?;
13488		let mut pending_outbound_payments_compat: HashMap<PaymentId, PendingOutboundPayment> =
13489			hash_map_with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
13490		for _ in 0..pending_outbound_payments_count_compat {
13491			let session_priv = Readable::read(reader)?;
13492			let payment = PendingOutboundPayment::Legacy {
13493				session_privs: hash_set_from_iter([session_priv]),
13494			};
13495			if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() {
13496				return Err(DecodeError::InvalidValue)
13497			};
13498		}
13499
13500		// pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients.
13501		let mut pending_outbound_payments_no_retry: Option<HashMap<PaymentId, HashSet<[u8; 32]>>> = None;
13502		let mut pending_outbound_payments = None;
13503		let mut pending_intercepted_htlcs: Option<HashMap<InterceptId, PendingAddHTLCInfo>> = Some(new_hash_map());
13504		let mut received_network_pubkey: Option<PublicKey> = None;
13505		let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
13506		let mut probing_cookie_secret: Option<[u8; 32]> = None;
13507		let mut claimable_htlc_purposes = None;
13508		let mut claimable_htlc_onion_fields = None;
13509		let mut pending_claiming_payments = Some(new_hash_map());
13510		let mut monitor_update_blocked_actions_per_peer: Option<Vec<(_, BTreeMap<_, Vec<_>>)>> = Some(Vec::new());
13511		let mut events_override = None;
13512		let mut in_flight_monitor_updates: Option<HashMap<(PublicKey, OutPoint), Vec<ChannelMonitorUpdate>>> = None;
13513		let mut decode_update_add_htlcs: Option<HashMap<u64, Vec<msgs::UpdateAddHTLC>>> = None;
13514		let mut inbound_payment_id_secret = None;
13515		read_tlv_fields!(reader, {
13516			(1, pending_outbound_payments_no_retry, option),
13517			(2, pending_intercepted_htlcs, option),
13518			(3, pending_outbound_payments, option),
13519			(4, pending_claiming_payments, option),
13520			(5, received_network_pubkey, option),
13521			(6, monitor_update_blocked_actions_per_peer, option),
13522			(7, fake_scid_rand_bytes, option),
13523			(8, events_override, option),
13524			(9, claimable_htlc_purposes, optional_vec),
13525			(10, in_flight_monitor_updates, option),
13526			(11, probing_cookie_secret, option),
13527			(13, claimable_htlc_onion_fields, optional_vec),
13528			(14, decode_update_add_htlcs, option),
13529			(15, inbound_payment_id_secret, option),
13530		});
13531		let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map());
13532		if fake_scid_rand_bytes.is_none() {
13533			fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes());
13534		}
13535
13536		if probing_cookie_secret.is_none() {
13537			probing_cookie_secret = Some(args.entropy_source.get_secure_random_bytes());
13538		}
13539
13540		if inbound_payment_id_secret.is_none() {
13541			inbound_payment_id_secret = Some(args.entropy_source.get_secure_random_bytes());
13542		}
13543
13544		if let Some(events) = events_override {
13545			pending_events_read = events;
13546		}
13547
13548		if !channel_closures.is_empty() {
13549			pending_events_read.append(&mut channel_closures);
13550		}
13551
13552		if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
13553			pending_outbound_payments = Some(pending_outbound_payments_compat);
13554		} else if pending_outbound_payments.is_none() {
13555			let mut outbounds = new_hash_map();
13556			for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() {
13557				outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
13558			}
13559			pending_outbound_payments = Some(outbounds);
13560		}
13561		let pending_outbounds = OutboundPayments::new(pending_outbound_payments.unwrap());
13562
13563		// We have to replay (or skip, if they were completed after we wrote the `ChannelManager`)
13564		// each `ChannelMonitorUpdate` in `in_flight_monitor_updates`. After doing so, we have to
13565		// check that each channel we have isn't newer than the latest `ChannelMonitorUpdate`(s) we
13566		// replayed, and for each monitor update we have to replay we have to ensure there's a
13567		// `ChannelMonitor` for it.
13568		//
13569		// In order to do so we first walk all of our live channels (so that we can check their
13570		// state immediately after doing the update replays, when we have the `update_id`s
13571		// available) and then walk any remaining in-flight updates.
13572		//
13573		// Because the actual handling of the in-flight updates is the same, it's macro'ized here:
13574		let mut pending_background_events = Vec::new();
13575		macro_rules! handle_in_flight_updates {
13576			($counterparty_node_id: expr, $chan_in_flight_upds: expr, $funding_txo: expr,
13577			 $monitor: expr, $peer_state: expr, $logger: expr, $channel_info_log: expr
13578			) => { {
13579				let mut max_in_flight_update_id = 0;
13580				$chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id());
13581				for update in $chan_in_flight_upds.iter() {
13582					log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
13583						update.update_id, $channel_info_log, &$monitor.channel_id());
13584					max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
13585					pending_background_events.push(
13586						BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13587							counterparty_node_id: $counterparty_node_id,
13588							funding_txo: $funding_txo,
13589							channel_id: $monitor.channel_id(),
13590							update: update.clone(),
13591						});
13592				}
13593				if $chan_in_flight_upds.is_empty() {
13594					// We had some updates to apply, but it turns out they had completed before we
13595					// were serialized, we just weren't notified of that. Thus, we may have to run
13596					// the completion actions for any monitor updates, but otherwise are done.
13597					pending_background_events.push(
13598						BackgroundEvent::MonitorUpdatesComplete {
13599							counterparty_node_id: $counterparty_node_id,
13600							channel_id: $monitor.channel_id(),
13601						});
13602				} else {
13603					$peer_state.closed_channel_monitor_update_ids.entry($monitor.channel_id())
13604						.and_modify(|v| *v = cmp::max(max_in_flight_update_id, *v))
13605						.or_insert(max_in_flight_update_id);
13606				}
13607				if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() {
13608					log_error!($logger, "Duplicate in-flight monitor update set for the same channel!");
13609					return Err(DecodeError::InvalidValue);
13610				}
13611				max_in_flight_update_id
13612			} }
13613		}
13614
13615		for (counterparty_id, peer_state_mtx) in per_peer_state.iter_mut() {
13616			let mut peer_state_lock = peer_state_mtx.lock().unwrap();
13617			let peer_state = &mut *peer_state_lock;
13618			for phase in peer_state.channel_by_id.values() {
13619				if let ChannelPhase::Funded(chan) = phase {
13620					let logger = WithChannelContext::from(&args.logger, &chan.context, None);
13621
13622					// Channels that were persisted have to be funded, otherwise they should have been
13623					// discarded.
13624					let funding_txo = chan.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
13625					let monitor = args.channel_monitors.get(&funding_txo)
13626						.expect("We already checked for monitor presence when loading channels");
13627					let mut max_in_flight_update_id = monitor.get_latest_update_id();
13628					if let Some(in_flight_upds) = &mut in_flight_monitor_updates {
13629						if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, funding_txo)) {
13630							max_in_flight_update_id = cmp::max(max_in_flight_update_id,
13631								handle_in_flight_updates!(*counterparty_id, chan_in_flight_upds,
13632									funding_txo, monitor, peer_state, logger, ""));
13633						}
13634					}
13635					if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id {
13636						// If the channel is ahead of the monitor, return DangerousValue:
13637						log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
13638						log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
13639							chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id);
13640						log_error!(logger, " but the ChannelManager is at update_id {}.", chan.get_latest_unblocked_monitor_update_id());
13641						log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
13642						log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
13643						log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
13644						log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
13645						return Err(DecodeError::DangerousValue);
13646					}
13647				} else {
13648					// We shouldn't have persisted (or read) any unfunded channel types so none should have been
13649					// created in this `channel_by_id` map.
13650					debug_assert!(false);
13651					return Err(DecodeError::InvalidValue);
13652				}
13653			}
13654		}
13655
13656		if let Some(in_flight_upds) = in_flight_monitor_updates {
13657			for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds {
13658				let channel_id = funding_txo_to_channel_id.get(&funding_txo).copied();
13659				let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id, None);
13660				if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
13661					// Now that we've removed all the in-flight monitor updates for channels that are
13662					// still open, we need to replay any monitor updates that are for closed channels,
13663					// creating the neccessary peer_state entries as we go.
13664					let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| {
13665						Mutex::new(empty_peer_state())
13666					});
13667					let mut peer_state = peer_state_mutex.lock().unwrap();
13668					handle_in_flight_updates!(counterparty_id, chan_in_flight_updates,
13669						funding_txo, monitor, peer_state, logger, "closed ");
13670				} else {
13671					log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
13672					log_error!(logger, " The ChannelMonitor for channel {} is missing.", if let Some(channel_id) =
13673						channel_id { channel_id.to_string() } else { format!("with outpoint {}", funding_txo) } );
13674					log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
13675					log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
13676					log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
13677					log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
13678					log_error!(logger, " Pending in-flight updates are: {:?}", chan_in_flight_updates);
13679					return Err(DecodeError::InvalidValue);
13680				}
13681			}
13682		}
13683
13684		// The newly generated `close_background_events` have to be added after any updates that
13685		// were already in-flight on shutdown, so we append them here.
13686		pending_background_events.reserve(close_background_events.len());
13687		'each_bg_event: for mut new_event in close_background_events {
13688			if let BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13689				counterparty_node_id, funding_txo, channel_id, update,
13690			} = &mut new_event {
13691				debug_assert_eq!(update.updates.len(), 1);
13692				debug_assert!(matches!(update.updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. }));
13693				let mut updated_id = false;
13694				for pending_event in pending_background_events.iter() {
13695					if let BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13696						counterparty_node_id: pending_cp, funding_txo: pending_funding,
13697						channel_id: pending_chan_id, update: pending_update,
13698					} = pending_event {
13699						let for_same_channel = counterparty_node_id == pending_cp
13700							&& funding_txo == pending_funding
13701							&& channel_id == pending_chan_id;
13702						if for_same_channel {
13703							debug_assert!(update.update_id >= pending_update.update_id);
13704							if pending_update.updates.iter().any(|upd| matches!(upd, ChannelMonitorUpdateStep::ChannelForceClosed { .. })) {
13705								// If the background event we're looking at is just
13706								// force-closing the channel which already has a pending
13707								// force-close update, no need to duplicate it.
13708								continue 'each_bg_event;
13709							}
13710							update.update_id = pending_update.update_id.saturating_add(1);
13711							updated_id = true;
13712						}
13713					}
13714				}
13715				let mut per_peer_state = per_peer_state.get(counterparty_node_id)
13716					.expect("If we have pending updates for a channel it must have an entry")
13717					.lock().unwrap();
13718				if updated_id {
13719					per_peer_state
13720						.closed_channel_monitor_update_ids.entry(*channel_id)
13721						.and_modify(|v| *v = cmp::max(update.update_id, *v))
13722						.or_insert(update.update_id);
13723				}
13724				let in_flight_updates = per_peer_state.in_flight_monitor_updates
13725					.entry(*funding_txo)
13726					.or_insert_with(Vec::new);
13727				debug_assert!(!in_flight_updates.iter().any(|upd| upd == update));
13728				in_flight_updates.push(update.clone());
13729			}
13730			pending_background_events.push(new_event);
13731		}
13732
13733		// If there's any preimages for forwarded HTLCs hanging around in ChannelMonitors we
13734		// should ensure we try them again on the inbound edge. We put them here and do so after we
13735		// have a fully-constructed `ChannelManager` at the end.
13736		let mut pending_claims_to_replay = Vec::new();
13737
13738		{
13739			// If we're tracking pending payments, ensure we haven't lost any by looking at the
13740			// ChannelMonitor data for any channels for which we do not have authorative state
13741			// (i.e. those for which we just force-closed above or we otherwise don't have a
13742			// corresponding `Channel` at all).
13743			// This avoids several edge-cases where we would otherwise "forget" about pending
13744			// payments which are still in-flight via their on-chain state.
13745			// We only rebuild the pending payments map if we were most recently serialized by
13746			// 0.0.102+
13747			for (_, monitor) in args.channel_monitors.iter() {
13748				let counterparty_opt = outpoint_to_peer.get(&monitor.get_funding_txo().0);
13749				if counterparty_opt.is_none() {
13750					for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
13751						let logger = WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash));
13752						if let HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } = htlc_source {
13753							if path.hops.is_empty() {
13754								log_error!(logger, "Got an empty path for a pending payment");
13755								return Err(DecodeError::InvalidValue);
13756							}
13757
13758							let mut session_priv_bytes = [0; 32];
13759							session_priv_bytes[..].copy_from_slice(&session_priv[..]);
13760							pending_outbounds.insert_from_monitor_on_startup(
13761								payment_id, htlc.payment_hash, session_priv_bytes, &path, best_block_height, logger
13762							);
13763						}
13764					}
13765					for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() {
13766						let logger = WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash));
13767						match htlc_source {
13768							HTLCSource::PreviousHopData(prev_hop_data) => {
13769								let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
13770									info.prev_funding_outpoint == prev_hop_data.outpoint &&
13771										info.prev_htlc_id == prev_hop_data.htlc_id
13772								};
13773								// The ChannelMonitor is now responsible for this HTLC's
13774								// failure/success and will let us know what its outcome is. If we
13775								// still have an entry for this HTLC in `forward_htlcs` or
13776								// `pending_intercepted_htlcs`, we were apparently not persisted after
13777								// the monitor was when forwarding the payment.
13778								decode_update_add_htlcs.retain(|scid, update_add_htlcs| {
13779									update_add_htlcs.retain(|update_add_htlc| {
13780										let matches = *scid == prev_hop_data.short_channel_id &&
13781											update_add_htlc.htlc_id == prev_hop_data.htlc_id;
13782										if matches {
13783											log_info!(logger, "Removing pending to-decode HTLC with hash {} as it was forwarded to the closed channel {}",
13784												&htlc.payment_hash, &monitor.channel_id());
13785										}
13786										!matches
13787									});
13788									!update_add_htlcs.is_empty()
13789								});
13790								forward_htlcs.retain(|_, forwards| {
13791									forwards.retain(|forward| {
13792										if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
13793											if pending_forward_matches_htlc(&htlc_info) {
13794												log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
13795													&htlc.payment_hash, &monitor.channel_id());
13796												false
13797											} else { true }
13798										} else { true }
13799									});
13800									!forwards.is_empty()
13801								});
13802								pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
13803									if pending_forward_matches_htlc(&htlc_info) {
13804										log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
13805											&htlc.payment_hash, &monitor.channel_id());
13806										pending_events_read.retain(|(event, _)| {
13807											if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
13808												intercepted_id != ev_id
13809											} else { true }
13810										});
13811										false
13812									} else { true }
13813								});
13814							},
13815							HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } => {
13816								if let Some(preimage) = preimage_opt {
13817									let pending_events = Mutex::new(pending_events_read);
13818									// Note that we set `from_onchain` to "false" here,
13819									// deliberately keeping the pending payment around forever.
13820									// Given it should only occur when we have a channel we're
13821									// force-closing for being stale that's okay.
13822									// The alternative would be to wipe the state when claiming,
13823									// generating a `PaymentPathSuccessful` event but regenerating
13824									// it and the `PaymentSent` on every restart until the
13825									// `ChannelMonitor` is removed.
13826									let compl_action =
13827										EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
13828											channel_funding_outpoint: monitor.get_funding_txo().0,
13829											channel_id: monitor.channel_id(),
13830											counterparty_node_id: path.hops[0].pubkey,
13831										};
13832									pending_outbounds.claim_htlc(payment_id, preimage, session_priv,
13833										path, false, compl_action, &pending_events, &&logger);
13834									pending_events_read = pending_events.into_inner().unwrap();
13835								}
13836							},
13837						}
13838					}
13839				}
13840
13841				// Whether the downstream channel was closed or not, try to re-apply any payment
13842				// preimages from it which may be needed in upstream channels for forwarded
13843				// payments.
13844				let mut fail_read = false;
13845				let outbound_claimed_htlcs_iter = monitor.get_all_current_outbound_htlcs()
13846					.into_iter()
13847					.filter_map(|(htlc_source, (htlc, preimage_opt))| {
13848						if let HTLCSource::PreviousHopData(prev_hop) = &htlc_source {
13849							if let Some(payment_preimage) = preimage_opt {
13850								let inbound_edge_monitor = args.channel_monitors.get(&prev_hop.outpoint);
13851								// Note that for channels which have gone to chain,
13852								// `get_all_current_outbound_htlcs` is never pruned and always returns
13853								// a constant set until the monitor is removed/archived. Thus, we
13854								// want to skip replaying claims that have definitely been resolved
13855								// on-chain.
13856
13857								// If the inbound monitor is not present, we assume it was fully
13858								// resolved and properly archived, implying this payment had plenty
13859								// of time to get claimed and we can safely skip any further
13860								// attempts to claim it (they wouldn't succeed anyway as we don't
13861								// have a monitor against which to do so).
13862								let inbound_edge_monitor = if let Some(monitor) = inbound_edge_monitor {
13863									monitor
13864								} else {
13865									return None;
13866								};
13867								// Second, if the inbound edge of the payment's monitor has been
13868								// fully claimed we've had at least `ANTI_REORG_DELAY` blocks to
13869								// get any PaymentForwarded event(s) to the user and assume that
13870								// there's no need to try to replay the claim just for that.
13871								let inbound_edge_balances = inbound_edge_monitor.get_claimable_balances();
13872								if inbound_edge_balances.is_empty() {
13873									return None;
13874								}
13875
13876								if prev_hop.counterparty_node_id.is_none() {
13877									// We no longer support claiming an HTLC where we don't have
13878									// the counterparty_node_id available if the claim has to go to
13879									// a closed channel. Its possible we can get away with it if
13880									// the channel is not yet closed, but its by no means a
13881									// guarantee.
13882
13883									// Thus, in this case we are a bit more aggressive with our
13884									// pruning - if we have no use for the claim (because the
13885									// inbound edge of the payment's monitor has already claimed
13886									// the HTLC) we skip trying to replay the claim.
13887									let htlc_payment_hash: PaymentHash = payment_preimage.into();
13888									let balance_could_incl_htlc = |bal| match bal {
13889										&Balance::ClaimableOnChannelClose { .. } => {
13890											// The channel is still open, assume we can still
13891											// claim against it
13892											true
13893										},
13894										&Balance::MaybePreimageClaimableHTLC { payment_hash, .. } => {
13895											payment_hash == htlc_payment_hash
13896										},
13897										_ => false,
13898									};
13899									let htlc_may_be_in_balances =
13900										inbound_edge_balances.iter().any(balance_could_incl_htlc);
13901									if !htlc_may_be_in_balances {
13902										return None;
13903									}
13904
13905									// First check if we're absolutely going to fail - if we need
13906									// to replay this claim to get the preimage into the inbound
13907									// edge monitor but the channel is closed (and thus we'll
13908									// immediately panic if we call claim_funds_from_hop).
13909									if short_to_chan_info.get(&prev_hop.short_channel_id).is_none() {
13910										log_error!(args.logger,
13911											"We need to replay the HTLC claim for payment_hash {} (preimage {}) but cannot do so as the HTLC was forwarded prior to LDK 0.0.124.\
13912											All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1",
13913											htlc_payment_hash,
13914											payment_preimage,
13915										);
13916										fail_read = true;
13917									}
13918
13919									// At this point we're confident we need the claim, but the
13920									// inbound edge channel is still live. As long as this remains
13921									// the case, we can conceivably proceed, but we run some risk
13922									// of panicking at runtime. The user ideally should have read
13923									// the release notes and we wouldn't be here, but we go ahead
13924									// and let things run in the hope that it'll all just work out.
13925									log_error!(args.logger,
13926										"We need to replay the HTLC claim for payment_hash {} (preimage {}) but don't have all the required information to do so reliably.\
13927										As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime!\
13928										All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1\
13929										Continuing anyway, though panics may occur!",
13930										htlc_payment_hash,
13931										payment_preimage,
13932									);
13933								}
13934
13935								Some((htlc_source, payment_preimage, htlc.amount_msat,
13936									// Check if `counterparty_opt.is_none()` to see if the
13937									// downstream chan is closed (because we don't have a
13938									// channel_id -> peer map entry).
13939									counterparty_opt.is_none(),
13940									counterparty_opt.cloned().or(monitor.get_counterparty_node_id()),
13941									monitor.get_funding_txo().0, monitor.channel_id()))
13942							} else { None }
13943						} else {
13944							// If it was an outbound payment, we've handled it above - if a preimage
13945							// came in and we persisted the `ChannelManager` we either handled it and
13946							// are good to go or the channel force-closed - we don't have to handle the
13947							// channel still live case here.
13948							None
13949						}
13950					});
13951				for tuple in outbound_claimed_htlcs_iter {
13952					pending_claims_to_replay.push(tuple);
13953				}
13954				if fail_read {
13955					return Err(DecodeError::InvalidValue);
13956				}
13957			}
13958		}
13959
13960		if !forward_htlcs.is_empty() || !decode_update_add_htlcs.is_empty() || pending_outbounds.needs_abandon() {
13961			// If we have pending HTLCs to forward, assume we either dropped a
13962			// `PendingHTLCsForwardable` or the user received it but never processed it as they
13963			// shut down before the timer hit. Either way, set the time_forwardable to a small
13964			// constant as enough time has likely passed that we should simply handle the forwards
13965			// now, or at least after the user gets a chance to reconnect to our peers.
13966			pending_events_read.push_back((events::Event::PendingHTLCsForwardable {
13967				time_forwardable: Duration::from_secs(2),
13968			}, None));
13969		}
13970
13971		let expanded_inbound_key = args.node_signer.get_inbound_payment_key();
13972
13973		let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len());
13974		if let Some(purposes) = claimable_htlc_purposes {
13975			if purposes.len() != claimable_htlcs_list.len() {
13976				return Err(DecodeError::InvalidValue);
13977			}
13978			if let Some(onion_fields) = claimable_htlc_onion_fields {
13979				if onion_fields.len() != claimable_htlcs_list.len() {
13980					return Err(DecodeError::InvalidValue);
13981				}
13982				for (purpose, (onion, (payment_hash, htlcs))) in
13983					purposes.into_iter().zip(onion_fields.into_iter().zip(claimable_htlcs_list.into_iter()))
13984				{
13985					let existing_payment = claimable_payments.insert(payment_hash, ClaimablePayment {
13986						purpose, htlcs, onion_fields: onion,
13987					});
13988					if existing_payment.is_some() { return Err(DecodeError::InvalidValue); }
13989				}
13990			} else {
13991				for (purpose, (payment_hash, htlcs)) in purposes.into_iter().zip(claimable_htlcs_list.into_iter()) {
13992					let existing_payment = claimable_payments.insert(payment_hash, ClaimablePayment {
13993						purpose, htlcs, onion_fields: None,
13994					});
13995					if existing_payment.is_some() { return Err(DecodeError::InvalidValue); }
13996				}
13997			}
13998		} else {
13999			// LDK versions prior to 0.0.107 did not write a `pending_htlc_purposes`, but do
14000			// include a `_legacy_hop_data` in the `OnionPayload`.
14001			for (payment_hash, htlcs) in claimable_htlcs_list.drain(..) {
14002				if htlcs.is_empty() {
14003					return Err(DecodeError::InvalidValue);
14004				}
14005				let purpose = match &htlcs[0].onion_payload {
14006					OnionPayload::Invoice { _legacy_hop_data } => {
14007						if let Some(hop_data) = _legacy_hop_data {
14008							events::PaymentPurpose::Bolt11InvoicePayment {
14009								payment_preimage:
14010									match inbound_payment::verify(
14011										payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger
14012									) {
14013										Ok((payment_preimage, _)) => payment_preimage,
14014										Err(()) => {
14015											log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", &payment_hash);
14016											return Err(DecodeError::InvalidValue);
14017										}
14018									},
14019								payment_secret: hop_data.payment_secret,
14020							}
14021						} else { return Err(DecodeError::InvalidValue); }
14022					},
14023					OnionPayload::Spontaneous(payment_preimage) =>
14024						events::PaymentPurpose::SpontaneousPayment(*payment_preimage),
14025				};
14026				claimable_payments.insert(payment_hash, ClaimablePayment {
14027					purpose, htlcs, onion_fields: None,
14028				});
14029			}
14030		}
14031
14032		// Similar to the above cases for forwarded payments, if we have any pending inbound HTLCs
14033		// which haven't yet been claimed, we may be missing counterparty_node_id info and would
14034		// panic if we attempted to claim them at this point.
14035		for (payment_hash, payment) in claimable_payments.iter() {
14036			for htlc in payment.htlcs.iter() {
14037				if htlc.prev_hop.counterparty_node_id.is_some() {
14038					continue;
14039				}
14040				if short_to_chan_info.get(&htlc.prev_hop.short_channel_id).is_some() {
14041					log_error!(args.logger,
14042						"We do not have the required information to claim a pending payment with payment hash {} reliably.\
14043						As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime!\
14044						All HTLCs that were received by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1\
14045						Continuing anyway, though panics may occur!",
14046						payment_hash,
14047					);
14048				} else {
14049					log_error!(args.logger,
14050						"We do not have the required information to claim a pending payment with payment hash {}.\
14051						All HTLCs that were received by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1",
14052						payment_hash,
14053					);
14054					return Err(DecodeError::InvalidValue);
14055				}
14056			}
14057		}
14058
14059		let mut secp_ctx = Secp256k1::new();
14060		secp_ctx.seeded_randomize(&args.entropy_source.get_secure_random_bytes());
14061
14062		let our_network_pubkey = match args.node_signer.get_node_id(Recipient::Node) {
14063			Ok(key) => key,
14064			Err(()) => return Err(DecodeError::InvalidValue)
14065		};
14066		if let Some(network_pubkey) = received_network_pubkey {
14067			if network_pubkey != our_network_pubkey {
14068				log_error!(args.logger, "Key that was generated does not match the existing key.");
14069				return Err(DecodeError::InvalidValue);
14070			}
14071		}
14072
14073		let mut outbound_scid_aliases = new_hash_set();
14074		for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() {
14075			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
14076			let peer_state = &mut *peer_state_lock;
14077			for (chan_id, phase) in peer_state.channel_by_id.iter_mut() {
14078				if let ChannelPhase::Funded(chan) = phase {
14079					let logger = WithChannelContext::from(&args.logger, &chan.context, None);
14080					if chan.context.outbound_scid_alias() == 0 {
14081						let mut outbound_scid_alias;
14082						loop {
14083							outbound_scid_alias = fake_scid::Namespace::OutboundAlias
14084								.get_fake_scid(best_block_height, &chain_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
14085							if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
14086						}
14087						chan.context.set_outbound_scid_alias(outbound_scid_alias);
14088					} else if !outbound_scid_aliases.insert(chan.context.outbound_scid_alias()) {
14089						// Note that in rare cases its possible to hit this while reading an older
14090						// channel if we just happened to pick a colliding outbound alias above.
14091						log_error!(logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
14092						return Err(DecodeError::InvalidValue);
14093					}
14094					if chan.context.is_usable() {
14095						if short_to_chan_info.insert(chan.context.outbound_scid_alias(), (chan.context.get_counterparty_node_id(), *chan_id)).is_some() {
14096							// Note that in rare cases its possible to hit this while reading an older
14097							// channel if we just happened to pick a colliding outbound alias above.
14098							log_error!(logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
14099							return Err(DecodeError::InvalidValue);
14100						}
14101					}
14102				} else {
14103					// We shouldn't have persisted (or read) any unfunded channel types so none should have been
14104					// created in this `channel_by_id` map.
14105					debug_assert!(false);
14106					return Err(DecodeError::InvalidValue);
14107				}
14108			}
14109		}
14110
14111		let bounded_fee_estimator = LowerBoundedFeeEstimator::new(args.fee_estimator);
14112
14113		for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
14114			if let Some(peer_state) = per_peer_state.get(&node_id) {
14115				for (channel_id, actions) in monitor_update_blocked_actions.iter() {
14116					let logger = WithContext::from(&args.logger, Some(node_id), Some(*channel_id), None);
14117					for action in actions.iter() {
14118						if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
14119							downstream_counterparty_and_funding_outpoint:
14120								Some(EventUnblockedChannel {
14121									counterparty_node_id: blocked_node_id,
14122									funding_txo: _,
14123									channel_id: blocked_channel_id,
14124									blocking_action,
14125								}), ..
14126						} = action {
14127							if let Some(blocked_peer_state) = per_peer_state.get(blocked_node_id) {
14128								log_trace!(logger,
14129									"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
14130									blocked_channel_id);
14131								blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
14132									.entry(*blocked_channel_id)
14133									.or_insert_with(Vec::new).push(blocking_action.clone());
14134							} else {
14135								// If the channel we were blocking has closed, we don't need to
14136								// worry about it - the blocked monitor update should never have
14137								// been released from the `Channel` object so it can't have
14138								// completed, and if the channel closed there's no reason to bother
14139								// anymore.
14140							}
14141						}
14142						if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately { .. } = action {
14143							debug_assert!(false, "Non-event-generating channel freeing should not appear in our queue");
14144						}
14145					}
14146				}
14147				peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions;
14148			} else {
14149				log_error!(WithContext::from(&args.logger, Some(node_id), None, None), "Got blocked actions without a per-peer-state for {}", node_id);
14150				return Err(DecodeError::InvalidValue);
14151			}
14152		}
14153
14154		let channel_manager = ChannelManager {
14155			chain_hash,
14156			fee_estimator: bounded_fee_estimator,
14157			chain_monitor: args.chain_monitor,
14158			tx_broadcaster: args.tx_broadcaster,
14159			router: args.router,
14160			message_router: args.message_router,
14161
14162			best_block: RwLock::new(BestBlock::new(best_block_hash, best_block_height)),
14163
14164			inbound_payment_key: expanded_inbound_key,
14165			pending_outbound_payments: pending_outbounds,
14166			pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()),
14167
14168			forward_htlcs: Mutex::new(forward_htlcs),
14169			decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs),
14170			claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap() }),
14171			outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
14172			outpoint_to_peer: Mutex::new(outpoint_to_peer),
14173			short_to_chan_info: FairRwLock::new(short_to_chan_info),
14174			fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(),
14175
14176			probing_cookie_secret: probing_cookie_secret.unwrap(),
14177			inbound_payment_id_secret: inbound_payment_id_secret.unwrap(),
14178
14179			our_network_pubkey,
14180			secp_ctx,
14181
14182			highest_seen_timestamp: AtomicUsize::new(highest_seen_timestamp as usize),
14183
14184			per_peer_state: FairRwLock::new(per_peer_state),
14185
14186			pending_events: Mutex::new(pending_events_read),
14187			pending_events_processor: AtomicBool::new(false),
14188			pending_background_events: Mutex::new(pending_background_events),
14189			total_consistency_lock: RwLock::new(()),
14190			background_events_processed_since_startup: AtomicBool::new(false),
14191
14192			event_persist_notifier: Notifier::new(),
14193			needs_persist_flag: AtomicBool::new(false),
14194
14195			funding_batch_states: Mutex::new(BTreeMap::new()),
14196
14197			pending_offers_messages: Mutex::new(Vec::new()),
14198			pending_async_payments_messages: Mutex::new(Vec::new()),
14199
14200			pending_broadcast_messages: Mutex::new(Vec::new()),
14201
14202			entropy_source: args.entropy_source,
14203			node_signer: args.node_signer,
14204			signer_provider: args.signer_provider,
14205
14206			last_days_feerates: Mutex::new(VecDeque::new()),
14207
14208			logger: args.logger,
14209			default_configuration: args.default_config,
14210
14211			#[cfg(feature = "dnssec")]
14212			hrn_resolver: OMNameResolver::new(highest_seen_timestamp, best_block_height),
14213			#[cfg(feature = "dnssec")]
14214			pending_dns_onion_messages: Mutex::new(Vec::new()),
14215
14216			#[cfg(feature = "_test_utils")]
14217			testing_dnssec_proof_offer_resolution_override: Mutex::new(new_hash_map()),
14218		};
14219
14220		let mut processed_claims: HashSet<Vec<MPPClaimHTLCSource>> = new_hash_set();
14221		for (_, monitor) in args.channel_monitors.iter() {
14222			for (payment_hash, (payment_preimage, payment_claims)) in monitor.get_stored_preimages() {
14223				if !payment_claims.is_empty() {
14224					for payment_claim in payment_claims {
14225						if processed_claims.contains(&payment_claim.mpp_parts) {
14226							// We might get the same payment a few times from different channels
14227							// that the MPP payment was received using. There's no point in trying
14228							// to claim the same payment again and again, so we check if the HTLCs
14229							// are the same and skip the payment here.
14230							continue;
14231						}
14232						if payment_claim.mpp_parts.is_empty() {
14233							return Err(DecodeError::InvalidValue);
14234						}
14235						let pending_claims = PendingMPPClaim {
14236							channels_without_preimage: payment_claim.mpp_parts.clone(),
14237							channels_with_preimage: Vec::new(),
14238						};
14239						let pending_claim_ptr_opt = Some(Arc::new(Mutex::new(pending_claims)));
14240
14241						// While it may be duplicative to generate a PaymentClaimed here, trying to
14242						// figure out if the user definitely saw it before shutdown would require some
14243						// nontrivial logic and may break as we move away from regularly persisting
14244						// ChannelManager. Instead, we rely on the users' event handler being
14245						// idempotent and just blindly generate one no matter what, letting the
14246						// preimages eventually timing out from ChannelMonitors to prevent us from
14247						// doing so forever.
14248
14249						let claim_found =
14250							channel_manager.claimable_payments.lock().unwrap().begin_claiming_payment(
14251								payment_hash, &channel_manager.node_signer, &channel_manager.logger,
14252								&channel_manager.inbound_payment_id_secret, true,
14253							);
14254						if claim_found.is_err() {
14255							let mut claimable_payments = channel_manager.claimable_payments.lock().unwrap();
14256							match claimable_payments.pending_claiming_payments.entry(payment_hash) {
14257								hash_map::Entry::Occupied(_) => {
14258									debug_assert!(false, "Entry was added in begin_claiming_payment");
14259									return Err(DecodeError::InvalidValue);
14260								},
14261								hash_map::Entry::Vacant(entry) => {
14262									entry.insert(payment_claim.claiming_payment);
14263								},
14264							}
14265						}
14266
14267						for part in payment_claim.mpp_parts.iter() {
14268							let pending_mpp_claim = pending_claim_ptr_opt.as_ref().map(|ptr| (
14269								part.counterparty_node_id, part.channel_id, part.htlc_id,
14270								PendingMPPClaimPointer(Arc::clone(&ptr))
14271							));
14272							let pending_claim_ptr = pending_claim_ptr_opt.as_ref().map(|ptr|
14273								RAAMonitorUpdateBlockingAction::ClaimedMPPPayment {
14274									pending_claim: PendingMPPClaimPointer(Arc::clone(&ptr)),
14275								}
14276							);
14277							// Note that we don't need to pass the `payment_info` here - its
14278							// already (clearly) durably on disk in the `ChannelMonitor` so there's
14279							// no need to worry about getting it into others.
14280							channel_manager.claim_mpp_part(
14281								part.into(), payment_preimage, None,
14282								|_, _|
14283									(Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim }), pending_claim_ptr)
14284							);
14285						}
14286						processed_claims.insert(payment_claim.mpp_parts);
14287					}
14288				} else {
14289					let per_peer_state = channel_manager.per_peer_state.read().unwrap();
14290					let mut claimable_payments = channel_manager.claimable_payments.lock().unwrap();
14291					let payment = claimable_payments.claimable_payments.remove(&payment_hash);
14292					mem::drop(claimable_payments);
14293					if let Some(payment) = payment {
14294						log_info!(channel_manager.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", &payment_hash);
14295						let mut claimable_amt_msat = 0;
14296						let mut receiver_node_id = Some(our_network_pubkey);
14297						let phantom_shared_secret = payment.htlcs[0].prev_hop.phantom_shared_secret;
14298						if phantom_shared_secret.is_some() {
14299							let phantom_pubkey = channel_manager.node_signer.get_node_id(Recipient::PhantomNode)
14300								.expect("Failed to get node_id for phantom node recipient");
14301							receiver_node_id = Some(phantom_pubkey)
14302						}
14303						for claimable_htlc in &payment.htlcs {
14304							claimable_amt_msat += claimable_htlc.value;
14305
14306							// Add a holding-cell claim of the payment to the Channel, which should be
14307							// applied ~immediately on peer reconnection. Because it won't generate a
14308							// new commitment transaction we can just provide the payment preimage to
14309							// the corresponding ChannelMonitor and nothing else.
14310							//
14311							// We do so directly instead of via the normal ChannelMonitor update
14312							// procedure as the ChainMonitor hasn't yet been initialized, implying
14313							// we're not allowed to call it directly yet. Further, we do the update
14314							// without incrementing the ChannelMonitor update ID as there isn't any
14315							// reason to.
14316							// If we were to generate a new ChannelMonitor update ID here and then
14317							// crash before the user finishes block connect we'd end up force-closing
14318							// this channel as well. On the flip side, there's no harm in restarting
14319							// without the new monitor persisted - we'll end up right back here on
14320							// restart.
14321							let previous_channel_id = claimable_htlc.prev_hop.channel_id;
14322							let peer_node_id_opt = channel_manager.outpoint_to_peer.lock().unwrap()
14323								.get(&claimable_htlc.prev_hop.outpoint).cloned();
14324							if let Some(peer_node_id) = peer_node_id_opt {
14325								let peer_state_mutex = per_peer_state.get(&peer_node_id).unwrap();
14326								let mut peer_state_lock = peer_state_mutex.lock().unwrap();
14327								let peer_state = &mut *peer_state_lock;
14328								if let Some(ChannelPhase::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
14329									let logger = WithChannelContext::from(&channel_manager.logger, &channel.context, Some(payment_hash));
14330									channel.claim_htlc_while_disconnected_dropping_mon_update_legacy(
14331										claimable_htlc.prev_hop.htlc_id, payment_preimage, &&logger
14332									);
14333								}
14334							}
14335							if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) {
14336								// Note that this is unsafe as we no longer require the
14337								// `ChannelMonitor`s to be re-persisted prior to this
14338								// `ChannelManager` being persisted after we get started running.
14339								// If this `ChannelManager` gets persisted first then we crash, we
14340								// won't have the `claimable_payments` entry we need to re-enter
14341								// this code block, causing us to not re-apply the preimage to this
14342								// `ChannelMonitor`.
14343								//
14344								// We should never be here with modern payment claims, however, as
14345								// they should always include the HTLC list. Instead, this is only
14346								// for nodes during upgrade, and we explicitly require the old
14347								// persistence semantics on upgrade in the release notes.
14348								previous_hop_monitor.provide_payment_preimage_unsafe_legacy(
14349									&payment_hash, &payment_preimage, &channel_manager.tx_broadcaster,
14350									&channel_manager.fee_estimator, &channel_manager.logger
14351								);
14352							}
14353						}
14354						let mut pending_events = channel_manager.pending_events.lock().unwrap();
14355						let payment_id = payment.inbound_payment_id(&inbound_payment_id_secret.unwrap());
14356						pending_events.push_back((events::Event::PaymentClaimed {
14357							receiver_node_id,
14358							payment_hash,
14359							purpose: payment.purpose,
14360							amount_msat: claimable_amt_msat,
14361							htlcs: payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(),
14362							sender_intended_total_msat: payment.htlcs.first().map(|htlc| htlc.total_msat),
14363							onion_fields: payment.onion_fields,
14364							payment_id: Some(payment_id),
14365						}, None));
14366					}
14367				}
14368			}
14369		}
14370
14371		for htlc_source in failed_htlcs.drain(..) {
14372			let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
14373			let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
14374			let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
14375			channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
14376		}
14377
14378		for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding, downstream_channel_id) in pending_claims_to_replay {
14379			// We use `downstream_closed` in place of `from_onchain` here just as a guess - we
14380			// don't remember in the `ChannelMonitor` where we got a preimage from, but if the
14381			// channel is closed we just assume that it probably came from an on-chain claim.
14382			channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None,
14383				downstream_closed, true, downstream_node_id, downstream_funding,
14384				downstream_channel_id, None
14385			);
14386		}
14387
14388		//TODO: Broadcast channel update for closed channels, but only after we've made a
14389		//connection or two.
14390
14391		Ok((best_block_hash.clone(), channel_manager))
14392	}
14393}
14394
14395#[cfg(test)]
14396mod tests {
14397	use bitcoin::hashes::Hash;
14398	use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
14399	use core::sync::atomic::Ordering;
14400	use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
14401	use crate::ln::types::ChannelId;
14402	use crate::types::payment::{PaymentPreimage, PaymentHash, PaymentSecret};
14403	use crate::ln::channelmanager::{create_recv_pending_htlc_info, HTLCForwardInfo, inbound_payment, PaymentId, RecipientOnionFields, InterceptId};
14404	use crate::ln::functional_test_utils::*;
14405	use crate::ln::msgs::{self, ErrorAction};
14406	use crate::ln::msgs::ChannelMessageHandler;
14407	use crate::ln::outbound_payment::Retry;
14408	use crate::prelude::*;
14409	use crate::routing::router::{PaymentParameters, RouteParameters, find_route};
14410	use crate::util::errors::APIError;
14411	use crate::util::ser::Writeable;
14412	use crate::util::test_utils;
14413	use crate::util::config::{ChannelConfig, ChannelConfigUpdate};
14414	use crate::sign::EntropySource;
14415
14416	#[test]
14417	fn test_notify_limits() {
14418		// Check that a few cases which don't require the persistence of a new ChannelManager,
14419		// indeed, do not cause the persistence of a new ChannelManager.
14420		let chanmon_cfgs = create_chanmon_cfgs(3);
14421		let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
14422		let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
14423		let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
14424
14425		// All nodes start with a persistable update pending as `create_network` connects each node
14426		// with all other nodes to make most tests simpler.
14427		assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14428		assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14429		assert!(nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
14430
14431		let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1);
14432
14433		// We check that the channel info nodes have doesn't change too early, even though we try
14434		// to connect messages with new values
14435		chan.0.contents.fee_base_msat *= 2;
14436		chan.1.contents.fee_base_msat *= 2;
14437		let node_a_chan_info = nodes[0].node.list_channels_with_counterparty(
14438			&nodes[1].node.get_our_node_id()).pop().unwrap();
14439		let node_b_chan_info = nodes[1].node.list_channels_with_counterparty(
14440			&nodes[0].node.get_our_node_id()).pop().unwrap();
14441
14442		// The first two nodes (which opened a channel) should now require fresh persistence
14443		assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14444		assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14445		// ... but the last node should not.
14446		assert!(!nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
14447		// After persisting the first two nodes they should no longer need fresh persistence.
14448		assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14449		assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14450
14451		// Node 3, unrelated to the only channel, shouldn't care if it receives a channel_update
14452		// about the channel.
14453		nodes[2].node.handle_channel_update(nodes[1].node.get_our_node_id(), &chan.0);
14454		nodes[2].node.handle_channel_update(nodes[1].node.get_our_node_id(), &chan.1);
14455		assert!(!nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
14456
14457		// The nodes which are a party to the channel should also ignore messages from unrelated
14458		// parties.
14459		nodes[0].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.0);
14460		nodes[0].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.1);
14461		nodes[1].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.0);
14462		nodes[1].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.1);
14463		assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14464		assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14465
14466		// At this point the channel info given by peers should still be the same.
14467		assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
14468		assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
14469
14470		// An earlier version of handle_channel_update didn't check the directionality of the
14471		// update message and would always update the local fee info, even if our peer was
14472		// (spuriously) forwarding us our own channel_update.
14473		let as_node_one = nodes[0].node.get_our_node_id().serialize()[..] < nodes[1].node.get_our_node_id().serialize()[..];
14474		let as_update = if as_node_one == (chan.0.contents.channel_flags & 1 == 0 /* chan.0 is from node one */) { &chan.0 } else { &chan.1 };
14475		let bs_update = if as_node_one == (chan.0.contents.channel_flags & 1 == 0 /* chan.0 is from node one */) { &chan.1 } else { &chan.0 };
14476
14477		// First deliver each peers' own message, checking that the node doesn't need to be
14478		// persisted and that its channel info remains the same.
14479		nodes[0].node.handle_channel_update(nodes[1].node.get_our_node_id(), &as_update);
14480		nodes[1].node.handle_channel_update(nodes[0].node.get_our_node_id(), &bs_update);
14481		assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14482		assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14483		assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
14484		assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
14485
14486		// Finally, deliver the other peers' message, ensuring each node needs to be persisted and
14487		// the channel info has updated.
14488		nodes[0].node.handle_channel_update(nodes[1].node.get_our_node_id(), &bs_update);
14489		nodes[1].node.handle_channel_update(nodes[0].node.get_our_node_id(), &as_update);
14490		assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14491		assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14492		assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
14493		assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
14494	}
14495
14496	#[test]
14497	fn test_keysend_dup_hash_partial_mpp() {
14498		// Test that a keysend payment with a duplicate hash to an existing partial MPP payment fails as
14499		// expected.
14500		let chanmon_cfgs = create_chanmon_cfgs(2);
14501		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14502		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
14503		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
14504		create_announced_chan_between_nodes(&nodes, 0, 1);
14505
14506		// First, send a partial MPP payment.
14507		let (route, our_payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000);
14508		let mut mpp_route = route.clone();
14509		mpp_route.paths.push(mpp_route.paths[0].clone());
14510
14511		let payment_id = PaymentId([42; 32]);
14512		// Use the utility function send_payment_along_path to send the payment with MPP data which
14513		// indicates there are more HTLCs coming.
14514		let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
14515		let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
14516			RecipientOnionFields::secret_only(payment_secret), payment_id, &mpp_route).unwrap();
14517		nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &our_payment_hash,
14518			RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
14519		check_added_monitors!(nodes[0], 1);
14520		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14521		assert_eq!(events.len(), 1);
14522		pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
14523
14524		// Next, send a keysend payment with the same payment_hash and make sure it fails.
14525		nodes[0].node.send_spontaneous_payment(
14526			Some(payment_preimage), RecipientOnionFields::spontaneous_empty(),
14527			PaymentId(payment_preimage.0), route.route_params.clone().unwrap(), Retry::Attempts(0)
14528		).unwrap();
14529		check_added_monitors!(nodes[0], 1);
14530		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14531		assert_eq!(events.len(), 1);
14532		let ev = events.drain(..).next().unwrap();
14533		let payment_event = SendEvent::from_event(ev);
14534		nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
14535		check_added_monitors!(nodes[1], 0);
14536		commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
14537		expect_pending_htlcs_forwardable!(nodes[1]);
14538		expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
14539		check_added_monitors!(nodes[1], 1);
14540		let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14541		assert!(updates.update_add_htlcs.is_empty());
14542		assert!(updates.update_fulfill_htlcs.is_empty());
14543		assert_eq!(updates.update_fail_htlcs.len(), 1);
14544		assert!(updates.update_fail_malformed_htlcs.is_empty());
14545		assert!(updates.update_fee.is_none());
14546		nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
14547		commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
14548		expect_payment_failed!(nodes[0], our_payment_hash, true);
14549
14550		// Send the second half of the original MPP payment.
14551		nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &our_payment_hash,
14552			RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
14553		check_added_monitors!(nodes[0], 1);
14554		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14555		assert_eq!(events.len(), 1);
14556		pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), true, None);
14557
14558		// Claim the full MPP payment. Note that we can't use a test utility like
14559		// claim_funds_along_route because the ordering of the messages causes the second half of the
14560		// payment to be put in the holding cell, which confuses the test utilities. So we exchange the
14561		// lightning messages manually.
14562		nodes[1].node.claim_funds(payment_preimage);
14563		expect_payment_claimed!(nodes[1], our_payment_hash, 200_000);
14564		check_added_monitors!(nodes[1], 2);
14565
14566		let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14567		nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]);
14568		expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
14569		nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed);
14570		check_added_monitors!(nodes[0], 1);
14571		let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
14572		nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa);
14573		check_added_monitors!(nodes[1], 1);
14574		let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14575		nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_first_cs);
14576		check_added_monitors!(nodes[1], 1);
14577		let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
14578		nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
14579		nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
14580		check_added_monitors!(nodes[0], 1);
14581		let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
14582		nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa);
14583		let as_second_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
14584		check_added_monitors!(nodes[0], 1);
14585		nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa);
14586		check_added_monitors!(nodes[1], 1);
14587		nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed);
14588		check_added_monitors!(nodes[1], 1);
14589		let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
14590		nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_third_raa);
14591		check_added_monitors!(nodes[0], 1);
14592
14593		// Note that successful MPP payments will generate a single PaymentSent event upon the first
14594		// path's success and a PaymentPathSuccessful event for each path's success.
14595		let events = nodes[0].node.get_and_clear_pending_events();
14596		assert_eq!(events.len(), 2);
14597		match events[0] {
14598			Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
14599				assert_eq!(payment_id, *actual_payment_id);
14600				assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
14601				assert_eq!(route.paths[0], *path);
14602			},
14603			_ => panic!("Unexpected event"),
14604		}
14605		match events[1] {
14606			Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
14607				assert_eq!(payment_id, *actual_payment_id);
14608				assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
14609				assert_eq!(route.paths[0], *path);
14610			},
14611			_ => panic!("Unexpected event"),
14612		}
14613	}
14614
14615	#[test]
14616	fn test_keysend_dup_payment_hash() {
14617		// (1): Test that a keysend payment with a duplicate payment hash to an existing pending
14618		//      outbound regular payment fails as expected.
14619		// (2): Test that a regular payment with a duplicate payment hash to an existing keysend payment
14620		//      fails as expected.
14621		// (3): Test that a keysend payment with a duplicate payment hash to an existing keysend
14622		//      payment fails as expected. We only accept MPP keysends with payment secrets and reject
14623		//      otherwise.
14624		let chanmon_cfgs = create_chanmon_cfgs(2);
14625		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14626		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
14627		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
14628		create_announced_chan_between_nodes(&nodes, 0, 1);
14629		let scorer = test_utils::TestScorer::new();
14630		let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
14631
14632		// To start (1), send a regular payment but don't claim it.
14633		let expected_route = [&nodes[1]];
14634		let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &expected_route, 100_000);
14635
14636		// Next, attempt a keysend payment and make sure it fails.
14637		let route_params = RouteParameters::from_payment_params_and_value(
14638			PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(),
14639			TEST_FINAL_CLTV, false), 100_000);
14640		nodes[0].node.send_spontaneous_payment(
14641			Some(payment_preimage), RecipientOnionFields::spontaneous_empty(),
14642			PaymentId(payment_preimage.0), route_params.clone(), Retry::Attempts(0)
14643		).unwrap();
14644		check_added_monitors!(nodes[0], 1);
14645		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14646		assert_eq!(events.len(), 1);
14647		let ev = events.drain(..).next().unwrap();
14648		let payment_event = SendEvent::from_event(ev);
14649		nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
14650		check_added_monitors!(nodes[1], 0);
14651		commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
14652		// We have to forward pending HTLCs twice - once tries to forward the payment forward (and
14653		// fails), the second will process the resulting failure and fail the HTLC backward
14654		expect_pending_htlcs_forwardable!(nodes[1]);
14655		expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
14656		check_added_monitors!(nodes[1], 1);
14657		let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14658		assert!(updates.update_add_htlcs.is_empty());
14659		assert!(updates.update_fulfill_htlcs.is_empty());
14660		assert_eq!(updates.update_fail_htlcs.len(), 1);
14661		assert!(updates.update_fail_malformed_htlcs.is_empty());
14662		assert!(updates.update_fee.is_none());
14663		nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
14664		commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
14665		expect_payment_failed!(nodes[0], payment_hash, true);
14666
14667		// Finally, claim the original payment.
14668		claim_payment(&nodes[0], &expected_route, payment_preimage);
14669
14670		// To start (2), send a keysend payment but don't claim it.
14671		let payment_preimage = PaymentPreimage([42; 32]);
14672		let route = find_route(
14673			&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
14674			None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
14675		).unwrap();
14676		let payment_hash = nodes[0].node.send_spontaneous_payment(
14677			Some(payment_preimage), RecipientOnionFields::spontaneous_empty(),
14678			PaymentId(payment_preimage.0), route.route_params.clone().unwrap(), Retry::Attempts(0)
14679		).unwrap();
14680		check_added_monitors!(nodes[0], 1);
14681		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14682		assert_eq!(events.len(), 1);
14683		let event = events.pop().unwrap();
14684		let path = vec![&nodes[1]];
14685		pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
14686
14687		// Next, attempt a regular payment and make sure it fails.
14688		let payment_secret = PaymentSecret([43; 32]);
14689		nodes[0].node.send_payment_with_route(route.clone(), payment_hash,
14690			RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
14691		check_added_monitors!(nodes[0], 1);
14692		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14693		assert_eq!(events.len(), 1);
14694		let ev = events.drain(..).next().unwrap();
14695		let payment_event = SendEvent::from_event(ev);
14696		nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
14697		check_added_monitors!(nodes[1], 0);
14698		commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
14699		expect_pending_htlcs_forwardable!(nodes[1]);
14700		expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
14701		check_added_monitors!(nodes[1], 1);
14702		let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14703		assert!(updates.update_add_htlcs.is_empty());
14704		assert!(updates.update_fulfill_htlcs.is_empty());
14705		assert_eq!(updates.update_fail_htlcs.len(), 1);
14706		assert!(updates.update_fail_malformed_htlcs.is_empty());
14707		assert!(updates.update_fee.is_none());
14708		nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
14709		commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
14710		expect_payment_failed!(nodes[0], payment_hash, true);
14711
14712		// Finally, succeed the keysend payment.
14713		claim_payment(&nodes[0], &expected_route, payment_preimage);
14714
14715		// To start (3), send a keysend payment but don't claim it.
14716		let payment_id_1 = PaymentId([44; 32]);
14717		let payment_hash = nodes[0].node.send_spontaneous_payment(
14718			Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_1,
14719			route.route_params.clone().unwrap(), Retry::Attempts(0)
14720		).unwrap();
14721		check_added_monitors!(nodes[0], 1);
14722		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14723		assert_eq!(events.len(), 1);
14724		let event = events.pop().unwrap();
14725		let path = vec![&nodes[1]];
14726		pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
14727
14728		// Next, attempt a keysend payment and make sure it fails.
14729		let route_params = RouteParameters::from_payment_params_and_value(
14730			PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false),
14731			100_000
14732		);
14733		let payment_id_2 = PaymentId([45; 32]);
14734		nodes[0].node.send_spontaneous_payment(
14735			Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_2, route_params,
14736			Retry::Attempts(0)
14737		).unwrap();
14738		check_added_monitors!(nodes[0], 1);
14739		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14740		assert_eq!(events.len(), 1);
14741		let ev = events.drain(..).next().unwrap();
14742		let payment_event = SendEvent::from_event(ev);
14743		nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
14744		check_added_monitors!(nodes[1], 0);
14745		commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
14746		expect_pending_htlcs_forwardable!(nodes[1]);
14747		expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
14748		check_added_monitors!(nodes[1], 1);
14749		let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14750		assert!(updates.update_add_htlcs.is_empty());
14751		assert!(updates.update_fulfill_htlcs.is_empty());
14752		assert_eq!(updates.update_fail_htlcs.len(), 1);
14753		assert!(updates.update_fail_malformed_htlcs.is_empty());
14754		assert!(updates.update_fee.is_none());
14755		nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
14756		commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
14757		expect_payment_failed!(nodes[0], payment_hash, true);
14758
14759		// Finally, claim the original payment.
14760		claim_payment(&nodes[0], &expected_route, payment_preimage);
14761	}
14762
14763	#[test]
14764	fn test_keysend_hash_mismatch() {
14765		// Test that if we receive a keysend `update_add_htlc` msg, we fail as expected if the keysend
14766		// preimage doesn't match the msg's payment hash.
14767		let chanmon_cfgs = create_chanmon_cfgs(2);
14768		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14769		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
14770		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
14771
14772		let payer_pubkey = nodes[0].node.get_our_node_id();
14773		let payee_pubkey = nodes[1].node.get_our_node_id();
14774
14775		let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
14776		let route_params = RouteParameters::from_payment_params_and_value(
14777			PaymentParameters::for_keysend(payee_pubkey, 40, false), 10_000);
14778		let network_graph = nodes[0].network_graph;
14779		let first_hops = nodes[0].node.list_usable_channels();
14780		let scorer = test_utils::TestScorer::new();
14781		let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
14782		let route = find_route(
14783			&payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
14784			nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
14785		).unwrap();
14786
14787		let test_preimage = PaymentPreimage([42; 32]);
14788		let mismatch_payment_hash = PaymentHash([43; 32]);
14789		let session_privs = nodes[0].node.test_add_new_pending_payment(mismatch_payment_hash,
14790			RecipientOnionFields::spontaneous_empty(), PaymentId(mismatch_payment_hash.0), &route).unwrap();
14791		nodes[0].node.test_send_payment_internal(&route, mismatch_payment_hash,
14792			RecipientOnionFields::spontaneous_empty(), Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap();
14793		check_added_monitors!(nodes[0], 1);
14794
14795		let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
14796		assert_eq!(updates.update_add_htlcs.len(), 1);
14797		assert!(updates.update_fulfill_htlcs.is_empty());
14798		assert!(updates.update_fail_htlcs.is_empty());
14799		assert!(updates.update_fail_malformed_htlcs.is_empty());
14800		assert!(updates.update_fee.is_none());
14801		nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
14802
14803		nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Payment preimage didn't match payment hash", 1);
14804	}
14805
14806	#[test]
14807	fn test_multi_hop_missing_secret() {
14808		let chanmon_cfgs = create_chanmon_cfgs(4);
14809		let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
14810		let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
14811		let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
14812
14813		let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
14814		let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
14815		let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
14816		let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
14817
14818		// Marshall an MPP route.
14819		let (mut route, payment_hash, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
14820		let path = route.paths[0].clone();
14821		route.paths.push(path);
14822		route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
14823		route.paths[0].hops[0].short_channel_id = chan_1_id;
14824		route.paths[0].hops[1].short_channel_id = chan_3_id;
14825		route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
14826		route.paths[1].hops[0].short_channel_id = chan_2_id;
14827		route.paths[1].hops[1].short_channel_id = chan_4_id;
14828
14829		nodes[0].node.send_payment_with_route(route, payment_hash,
14830			RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)).unwrap();
14831		let events = nodes[0].node.get_and_clear_pending_events();
14832		assert_eq!(events.len(), 1);
14833		match events[0] {
14834			Event::PaymentFailed { reason, .. } => {
14835				assert_eq!(reason.unwrap(), crate::events::PaymentFailureReason::UnexpectedError);
14836			}
14837			_ => panic!()
14838		}
14839		nodes[0].logger.assert_log_contains("lightning::ln::outbound_payment", "Payment secret is required for multi-path payments", 2);
14840		assert!(nodes[0].node.list_recent_payments().is_empty());
14841	}
14842
14843	#[test]
14844	fn test_channel_update_cached() {
14845		let chanmon_cfgs = create_chanmon_cfgs(3);
14846		let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
14847		let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
14848		let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
14849
14850		let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
14851
14852		nodes[0].node.force_close_channel_with_peer(&chan.2, &nodes[1].node.get_our_node_id(), None, true).unwrap();
14853		check_added_monitors!(nodes[0], 1);
14854		check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
14855
14856		// Confirm that the channel_update was not sent immediately to node[1] but was cached.
14857		let node_1_events = nodes[1].node.get_and_clear_pending_msg_events();
14858		assert_eq!(node_1_events.len(), 0);
14859
14860		{
14861			// Assert that ChannelUpdate message has been added to node[0] pending broadcast messages
14862			let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
14863			assert_eq!(pending_broadcast_messages.len(), 1);
14864		}
14865
14866		// Test that we do not retrieve the pending broadcast messages when we are not connected to any peer
14867		nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
14868		nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
14869
14870		nodes[0].node.peer_disconnected(nodes[2].node.get_our_node_id());
14871		nodes[2].node.peer_disconnected(nodes[0].node.get_our_node_id());
14872
14873		let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
14874		assert_eq!(node_0_events.len(), 0);
14875
14876		// Now we reconnect to a peer
14877		nodes[0].node.peer_connected(nodes[2].node.get_our_node_id(), &msgs::Init {
14878			features: nodes[2].node.init_features(), networks: None, remote_network_address: None
14879		}, true).unwrap();
14880		nodes[2].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
14881			features: nodes[0].node.init_features(), networks: None, remote_network_address: None
14882		}, false).unwrap();
14883
14884		// Confirm that get_and_clear_pending_msg_events correctly captures pending broadcast messages
14885		let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
14886		assert_eq!(node_0_events.len(), 1);
14887		match &node_0_events[0] {
14888			MessageSendEvent::BroadcastChannelUpdate { .. } => (),
14889			_ => panic!("Unexpected event"),
14890		}
14891		{
14892			// Assert that ChannelUpdate message has been cleared from nodes[0] pending broadcast messages
14893			let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
14894			assert_eq!(pending_broadcast_messages.len(), 0);
14895		}
14896	}
14897
14898	#[test]
14899	fn test_drop_disconnected_peers_when_removing_channels() {
14900		let chanmon_cfgs = create_chanmon_cfgs(2);
14901		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14902		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
14903		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
14904
14905		create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
14906
14907		nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
14908		nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
14909		let chan_id = nodes[0].node.list_channels()[0].channel_id;
14910		let error_message = "Channel force-closed";
14911		nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
14912		check_added_monitors!(nodes[0], 1);
14913		check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 1_000_000);
14914
14915		{
14916			// Assert that nodes[1] is awaiting removal for nodes[0] once nodes[1] has been
14917			// disconnected and the channel between has been force closed.
14918			let nodes_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
14919			// Assert that nodes[1] isn't removed before `timer_tick_occurred` has been executed.
14920			assert_eq!(nodes_0_per_peer_state.len(), 1);
14921			assert!(nodes_0_per_peer_state.get(&nodes[1].node.get_our_node_id()).is_some());
14922		}
14923
14924		nodes[0].node.timer_tick_occurred();
14925
14926		{
14927			// Assert that nodes[1] has now been removed.
14928			assert_eq!(nodes[0].node.per_peer_state.read().unwrap().len(), 0);
14929		}
14930	}
14931
14932	#[test]
14933	fn test_drop_peers_when_removing_unfunded_channels() {
14934		let chanmon_cfgs = create_chanmon_cfgs(2);
14935		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14936		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
14937		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
14938
14939		exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
14940		let events = nodes[0].node.get_and_clear_pending_events();
14941		assert_eq!(events.len(), 1, "Unexpected events {:?}", events);
14942		match events[0] {
14943			Event::FundingGenerationReady { .. } => {}
14944			_ => panic!("Unexpected event {:?}", events),
14945		}
14946
14947		nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
14948		nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
14949		check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer, [nodes[1].node.get_our_node_id()], 1_000_000);
14950		check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer, [nodes[0].node.get_our_node_id()], 1_000_000);
14951
14952		// At this point the state for the peers should have been removed.
14953		assert_eq!(nodes[0].node.per_peer_state.read().unwrap().len(), 0);
14954		assert_eq!(nodes[1].node.per_peer_state.read().unwrap().len(), 0);
14955	}
14956
14957	#[test]
14958	fn bad_inbound_payment_hash() {
14959		// Add coverage for checking that a user-provided payment hash matches the payment secret.
14960		let chanmon_cfgs = create_chanmon_cfgs(2);
14961		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14962		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
14963		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
14964
14965		let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[0]);
14966		let payment_data = msgs::FinalOnionHopData {
14967			payment_secret,
14968			total_msat: 100_000,
14969		};
14970
14971		// Ensure that if the payment hash given to `inbound_payment::verify` differs from the original,
14972		// payment verification fails as expected.
14973		let mut bad_payment_hash = payment_hash.clone();
14974		bad_payment_hash.0[0] += 1;
14975		match inbound_payment::verify(bad_payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) {
14976			Ok(_) => panic!("Unexpected ok"),
14977			Err(()) => {
14978				nodes[0].logger.assert_log_contains("lightning::ln::inbound_payment", "Failing HTLC with user-generated payment_hash", 1);
14979			}
14980		}
14981
14982		// Check that using the original payment hash succeeds.
14983		assert!(inbound_payment::verify(payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger).is_ok());
14984	}
14985
14986	#[test]
14987	fn test_outpoint_to_peer_coverage() {
14988		// Test that the `ChannelManager:outpoint_to_peer` contains channels which have been assigned
14989		// a `channel_id` (i.e. have had the funding tx created), and that they are removed once
14990		// the channel is successfully closed.
14991		let chanmon_cfgs = create_chanmon_cfgs(2);
14992		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14993		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
14994		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
14995
14996		nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
14997		let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
14998		nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel);
14999		let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
15000		nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel);
15001
15002		let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
15003		let channel_id = ChannelId::from_bytes(tx.compute_txid().to_byte_array());
15004		{
15005			// Ensure that the `outpoint_to_peer` map is empty until either party has received the
15006			// funding transaction, and have the real `channel_id`.
15007			assert_eq!(nodes[0].node.outpoint_to_peer.lock().unwrap().len(), 0);
15008			assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
15009		}
15010
15011		nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
15012		{
15013			// Assert that `nodes[0]`'s `outpoint_to_peer` map is populated with the channel as soon as
15014			// as it has the funding transaction.
15015			let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
15016			assert_eq!(nodes_0_lock.len(), 1);
15017			assert!(nodes_0_lock.contains_key(&funding_output));
15018		}
15019
15020		assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
15021
15022		let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
15023
15024		nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg);
15025		{
15026			let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
15027			assert_eq!(nodes_0_lock.len(), 1);
15028			assert!(nodes_0_lock.contains_key(&funding_output));
15029		}
15030		expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
15031
15032		{
15033			// Assert that `nodes[1]`'s `outpoint_to_peer` map is populated with the channel as
15034			// soon as it has the funding transaction.
15035			let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
15036			assert_eq!(nodes_1_lock.len(), 1);
15037			assert!(nodes_1_lock.contains_key(&funding_output));
15038		}
15039		check_added_monitors!(nodes[1], 1);
15040		let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
15041		nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed);
15042		check_added_monitors!(nodes[0], 1);
15043		expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
15044		let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
15045		let (announcement, nodes_0_update, nodes_1_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
15046		update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update);
15047
15048		nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
15049		nodes[1].node.handle_shutdown(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
15050		let nodes_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
15051		nodes[0].node.handle_shutdown(nodes[1].node.get_our_node_id(), &nodes_1_shutdown);
15052
15053		let closing_signed_node_0 = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
15054		nodes[1].node.handle_closing_signed(nodes[0].node.get_our_node_id(), &closing_signed_node_0);
15055		{
15056			// Assert that the channel is kept in the `outpoint_to_peer` map for both nodes until the
15057			// channel can be fully closed by both parties (i.e. no outstanding htlcs exists, the
15058			// fee for the closing transaction has been negotiated and the parties has the other
15059			// party's signature for the fee negotiated closing transaction.)
15060			let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
15061			assert_eq!(nodes_0_lock.len(), 1);
15062			assert!(nodes_0_lock.contains_key(&funding_output));
15063		}
15064
15065		{
15066			// At this stage, `nodes[1]` has proposed a fee for the closing transaction in the
15067			// `handle_closing_signed` call above. As `nodes[1]` has not yet received the signature
15068			// from `nodes[0]` for the closing transaction with the proposed fee, the channel is
15069			// kept in the `nodes[1]`'s `outpoint_to_peer` map.
15070			let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
15071			assert_eq!(nodes_1_lock.len(), 1);
15072			assert!(nodes_1_lock.contains_key(&funding_output));
15073		}
15074
15075		nodes[0].node.handle_closing_signed(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
15076		{
15077			// `nodes[0]` accepts `nodes[1]`'s proposed fee for the closing transaction, and
15078			// therefore has all it needs to fully close the channel (both signatures for the
15079			// closing transaction).
15080			// Assert that the channel is removed from `nodes[0]`'s `outpoint_to_peer` map as it can be
15081			// fully closed by `nodes[0]`.
15082			assert_eq!(nodes[0].node.outpoint_to_peer.lock().unwrap().len(), 0);
15083
15084			// Assert that the channel is still in `nodes[1]`'s  `outpoint_to_peer` map, as `nodes[1]`
15085			// doesn't have `nodes[0]`'s signature for the closing transaction yet.
15086			let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
15087			assert_eq!(nodes_1_lock.len(), 1);
15088			assert!(nodes_1_lock.contains_key(&funding_output));
15089		}
15090
15091		let (_nodes_0_update, closing_signed_node_0) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
15092
15093		nodes[1].node.handle_closing_signed(nodes[0].node.get_our_node_id(), &closing_signed_node_0.unwrap());
15094		{
15095			// Assert that the channel has now been removed from both parties `outpoint_to_peer` map once
15096			// they both have everything required to fully close the channel.
15097			assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
15098		}
15099		let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
15100
15101		check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
15102		check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
15103	}
15104
15105	fn check_not_connected_to_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
15106		let expected_message = format!("Not connected to node: {}", expected_public_key);
15107		check_api_error_message(expected_message, res_err)
15108	}
15109
15110	fn check_unkown_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
15111		let expected_message = format!("Can't find a peer matching the passed counterparty node_id {}", expected_public_key);
15112		check_api_error_message(expected_message, res_err)
15113	}
15114
15115	fn check_channel_unavailable_error<T>(res_err: Result<T, APIError>, expected_channel_id: ChannelId, peer_node_id: PublicKey) {
15116		let expected_message = format!("Channel with id {} not found for the passed counterparty node_id {}", expected_channel_id, peer_node_id);
15117		check_api_error_message(expected_message, res_err)
15118	}
15119
15120	fn check_api_misuse_error<T>(res_err: Result<T, APIError>) {
15121		let expected_message = "No such channel awaiting to be accepted.".to_string();
15122		check_api_error_message(expected_message, res_err)
15123	}
15124
15125	fn check_api_error_message<T>(expected_err_message: String, res_err: Result<T, APIError>) {
15126		match res_err {
15127			Err(APIError::APIMisuseError { err }) => {
15128				assert_eq!(err, expected_err_message);
15129			},
15130			Err(APIError::ChannelUnavailable { err }) => {
15131				assert_eq!(err, expected_err_message);
15132			},
15133			Ok(_) => panic!("Unexpected Ok"),
15134			Err(_) => panic!("Unexpected Error"),
15135		}
15136	}
15137
15138	#[test]
15139	fn test_api_calls_with_unkown_counterparty_node() {
15140		// Tests that our API functions that expects a `counterparty_node_id` as input, behaves as
15141		// expected if the `counterparty_node_id` is an unkown peer in the
15142		// `ChannelManager::per_peer_state` map.
15143		let chanmon_cfg = create_chanmon_cfgs(2);
15144		let node_cfg = create_node_cfgs(2, &chanmon_cfg);
15145		let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
15146		let nodes = create_network(2, &node_cfg, &node_chanmgr);
15147
15148		// Dummy values
15149		let channel_id = ChannelId::from_bytes([4; 32]);
15150		let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap());
15151		let intercept_id = InterceptId([0; 32]);
15152		let error_message = "Channel force-closed";
15153
15154		// Test the API functions.
15155		check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None, None), unkown_public_key);
15156
15157		check_unkown_peer_error(nodes[0].node.accept_inbound_channel(&channel_id, &unkown_public_key, 42), unkown_public_key);
15158
15159		check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key);
15160
15161		check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key);
15162
15163		check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key);
15164
15165		check_unkown_peer_error(nodes[0].node.forward_intercepted_htlc(intercept_id, &channel_id, unkown_public_key, 1_000_000), unkown_public_key);
15166
15167		check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key);
15168	}
15169
15170	#[test]
15171	fn test_api_calls_with_unavailable_channel() {
15172		// Tests that our API functions that expects a `counterparty_node_id` and a `channel_id`
15173		// as input, behaves as expected if the `counterparty_node_id` is a known peer in the
15174		// `ChannelManager::per_peer_state` map, but the peer state doesn't contain a channel with
15175		// the given `channel_id`.
15176		let chanmon_cfg = create_chanmon_cfgs(2);
15177		let node_cfg = create_node_cfgs(2, &chanmon_cfg);
15178		let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
15179		let nodes = create_network(2, &node_cfg, &node_chanmgr);
15180
15181		let counterparty_node_id = nodes[1].node.get_our_node_id();
15182
15183		// Dummy values
15184		let channel_id = ChannelId::from_bytes([4; 32]);
15185		let error_message = "Channel force-closed";
15186
15187		// Test the API functions.
15188		check_api_misuse_error(nodes[0].node.accept_inbound_channel(&channel_id, &counterparty_node_id, 42));
15189
15190		check_channel_unavailable_error(nodes[0].node.close_channel(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
15191
15192		check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id);
15193
15194		check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id);
15195
15196		check_channel_unavailable_error(nodes[0].node.forward_intercepted_htlc(InterceptId([0; 32]), &channel_id, counterparty_node_id, 1_000_000), channel_id, counterparty_node_id);
15197
15198		check_channel_unavailable_error(nodes[0].node.update_channel_config(&counterparty_node_id, &[channel_id], &ChannelConfig::default()), channel_id, counterparty_node_id);
15199	}
15200
15201	#[test]
15202	fn test_connection_limiting() {
15203		// Test that we limit un-channel'd peers and un-funded channels properly.
15204		let chanmon_cfgs = create_chanmon_cfgs(2);
15205		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
15206		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
15207		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15208
15209		// Note that create_network connects the nodes together for us
15210
15211		nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
15212		let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15213
15214		let mut funding_tx = None;
15215		for idx in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
15216			nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15217			let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
15218
15219			if idx == 0 {
15220				nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel);
15221				let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
15222				funding_tx = Some(tx.clone());
15223				nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx).unwrap();
15224				let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
15225
15226				nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg);
15227				check_added_monitors!(nodes[1], 1);
15228				expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
15229
15230				let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
15231
15232				nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed);
15233				check_added_monitors!(nodes[0], 1);
15234				expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
15235			}
15236			open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
15237		}
15238
15239		// A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected
15240		open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(
15241			&nodes[0].keys_manager);
15242		nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15243		assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
15244			open_channel_msg.common_fields.temporary_channel_id);
15245
15246		// Further, because all of our channels with nodes[0] are inbound, and none of them funded,
15247		// it doesn't count as a "protected" peer, i.e. it counts towards the MAX_NO_CHANNEL_PEERS
15248		// limit.
15249		let mut peer_pks = Vec::with_capacity(super::MAX_NO_CHANNEL_PEERS);
15250		for _ in 1..super::MAX_NO_CHANNEL_PEERS {
15251			let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
15252				&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
15253			peer_pks.push(random_pk);
15254			nodes[1].node.peer_connected(random_pk, &msgs::Init {
15255				features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15256			}, true).unwrap();
15257		}
15258		let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
15259			&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
15260		nodes[1].node.peer_connected(last_random_pk, &msgs::Init {
15261			features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15262		}, true).unwrap_err();
15263
15264		// Also importantly, because nodes[0] isn't "protected", we will refuse a reconnection from
15265		// them if we have too many un-channel'd peers.
15266		nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
15267		let chan_closed_events = nodes[1].node.get_and_clear_pending_events();
15268		assert_eq!(chan_closed_events.len(), super::MAX_UNFUNDED_CHANS_PER_PEER - 1);
15269		for ev in chan_closed_events {
15270			if let Event::ChannelClosed { .. } = ev { } else { panic!(); }
15271		}
15272		nodes[1].node.peer_connected(last_random_pk, &msgs::Init {
15273			features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15274		}, true).unwrap();
15275		nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
15276			features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15277		}, true).unwrap_err();
15278
15279		// but of course if the connection is outbound its allowed...
15280		nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
15281			features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15282		}, false).unwrap();
15283		nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
15284
15285		// Now nodes[0] is disconnected but still has a pending, un-funded channel lying around.
15286		// Even though we accept one more connection from new peers, we won't actually let them
15287		// open channels.
15288		assert!(peer_pks.len() > super::MAX_UNFUNDED_CHANNEL_PEERS - 1);
15289		for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
15290			nodes[1].node.handle_open_channel(peer_pks[i], &open_channel_msg);
15291			get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]);
15292			open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
15293		}
15294		nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
15295		assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
15296			open_channel_msg.common_fields.temporary_channel_id);
15297
15298		// Of course, however, outbound channels are always allowed
15299		nodes[1].node.create_channel(last_random_pk, 100_000, 0, 42, None, None).unwrap();
15300		get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, last_random_pk);
15301
15302		// If we fund the first channel, nodes[0] has a live on-chain channel with us, it is now
15303		// "protected" and can connect again.
15304		mine_transaction(&nodes[1], funding_tx.as_ref().unwrap());
15305		nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
15306			features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15307		}, true).unwrap();
15308		get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
15309
15310		// Further, because the first channel was funded, we can open another channel with
15311		// last_random_pk.
15312		nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
15313		get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
15314	}
15315
15316	#[test]
15317	fn test_outbound_chans_unlimited() {
15318		// Test that we never refuse an outbound channel even if a peer is unfuned-channel-limited
15319		let chanmon_cfgs = create_chanmon_cfgs(2);
15320		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
15321		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
15322		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15323
15324		// Note that create_network connects the nodes together for us
15325
15326		nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
15327		let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15328
15329		for _ in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
15330			nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15331			get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
15332			open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
15333		}
15334
15335		// Once we have MAX_UNFUNDED_CHANS_PER_PEER unfunded channels, new inbound channels will be
15336		// rejected.
15337		nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15338		assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
15339			open_channel_msg.common_fields.temporary_channel_id);
15340
15341		// but we can still open an outbound channel.
15342		nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
15343		get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
15344
15345		// but even with such an outbound channel, additional inbound channels will still fail.
15346		nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15347		assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
15348			open_channel_msg.common_fields.temporary_channel_id);
15349	}
15350
15351	#[test]
15352	fn test_0conf_limiting() {
15353		// Tests that we properly limit inbound channels when we have the manual-channel-acceptance
15354		// flag set and (sometimes) accept channels as 0conf.
15355		let chanmon_cfgs = create_chanmon_cfgs(2);
15356		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
15357		let mut settings = test_default_channel_config();
15358		settings.manually_accept_inbound_channels = true;
15359		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(settings)]);
15360		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15361
15362		// Note that create_network connects the nodes together for us
15363
15364		nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
15365		let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15366
15367		// First, get us up to MAX_UNFUNDED_CHANNEL_PEERS so we can test at the edge
15368		for _ in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
15369			let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
15370				&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
15371			nodes[1].node.peer_connected(random_pk, &msgs::Init {
15372				features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15373			}, true).unwrap();
15374
15375			nodes[1].node.handle_open_channel(random_pk, &open_channel_msg);
15376			let events = nodes[1].node.get_and_clear_pending_events();
15377			match events[0] {
15378				Event::OpenChannelRequest { temporary_channel_id, .. } => {
15379					nodes[1].node.accept_inbound_channel(&temporary_channel_id, &random_pk, 23).unwrap();
15380				}
15381				_ => panic!("Unexpected event"),
15382			}
15383			get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, random_pk);
15384			open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
15385		}
15386
15387		// If we try to accept a channel from another peer non-0conf it will fail.
15388		let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
15389			&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
15390		nodes[1].node.peer_connected(last_random_pk, &msgs::Init {
15391			features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15392		}, true).unwrap();
15393		nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
15394		let events = nodes[1].node.get_and_clear_pending_events();
15395		match events[0] {
15396			Event::OpenChannelRequest { temporary_channel_id, .. } => {
15397				match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &last_random_pk, 23) {
15398					Err(APIError::APIMisuseError { err }) =>
15399						assert_eq!(err, "Too many peers with unfunded channels, refusing to accept new ones"),
15400					_ => panic!(),
15401				}
15402			}
15403			_ => panic!("Unexpected event"),
15404		}
15405		assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
15406			open_channel_msg.common_fields.temporary_channel_id);
15407
15408		// ...however if we accept the same channel 0conf it should work just fine.
15409		nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
15410		let events = nodes[1].node.get_and_clear_pending_events();
15411		match events[0] {
15412			Event::OpenChannelRequest { temporary_channel_id, .. } => {
15413				nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &last_random_pk, 23).unwrap();
15414			}
15415			_ => panic!("Unexpected event"),
15416		}
15417		get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
15418	}
15419
15420	#[test]
15421	fn reject_excessively_underpaying_htlcs() {
15422		let chanmon_cfg = create_chanmon_cfgs(1);
15423		let node_cfg = create_node_cfgs(1, &chanmon_cfg);
15424		let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
15425		let node = create_network(1, &node_cfg, &node_chanmgr);
15426		let sender_intended_amt_msat = 100;
15427		let extra_fee_msat = 10;
15428		let hop_data = msgs::InboundOnionPayload::Receive {
15429			sender_intended_htlc_amt_msat: 100,
15430			cltv_expiry_height: 42,
15431			payment_metadata: None,
15432			keysend_preimage: None,
15433			payment_data: Some(msgs::FinalOnionHopData {
15434				payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat,
15435			}),
15436			custom_tlvs: Vec::new(),
15437		};
15438		// Check that if the amount we received + the penultimate hop extra fee is less than the sender
15439		// intended amount, we fail the payment.
15440		let current_height: u32 = node[0].node.best_block.read().unwrap().height;
15441		if let Err(crate::ln::channelmanager::InboundHTLCErr { err_code, .. }) =
15442			create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
15443				sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat),
15444				current_height)
15445		{
15446			assert_eq!(err_code, 19);
15447		} else { panic!(); }
15448
15449		// If amt_received + extra_fee is equal to the sender intended amount, we're fine.
15450		let hop_data = msgs::InboundOnionPayload::Receive { // This is the same payload as above, InboundOnionPayload doesn't implement Clone
15451			sender_intended_htlc_amt_msat: 100,
15452			cltv_expiry_height: 42,
15453			payment_metadata: None,
15454			keysend_preimage: None,
15455			payment_data: Some(msgs::FinalOnionHopData {
15456				payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat,
15457			}),
15458			custom_tlvs: Vec::new(),
15459		};
15460		let current_height: u32 = node[0].node.best_block.read().unwrap().height;
15461		assert!(create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
15462			sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat),
15463			current_height).is_ok());
15464	}
15465
15466	#[test]
15467	fn test_final_incorrect_cltv(){
15468		let chanmon_cfg = create_chanmon_cfgs(1);
15469		let node_cfg = create_node_cfgs(1, &chanmon_cfg);
15470		let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
15471		let node = create_network(1, &node_cfg, &node_chanmgr);
15472
15473		let current_height: u32 = node[0].node.best_block.read().unwrap().height;
15474		let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive {
15475			sender_intended_htlc_amt_msat: 100,
15476			cltv_expiry_height: 22,
15477			payment_metadata: None,
15478			keysend_preimage: None,
15479			payment_data: Some(msgs::FinalOnionHopData {
15480				payment_secret: PaymentSecret([0; 32]), total_msat: 100,
15481			}),
15482			custom_tlvs: Vec::new(),
15483		}, [0; 32], PaymentHash([0; 32]), 100, 23, None, true, None, current_height);
15484
15485		// Should not return an error as this condition:
15486		// https://github.com/lightning/bolts/blob/4dcc377209509b13cf89a4b91fde7d478f5b46d8/04-onion-routing.md?plain=1#L334
15487		// is not satisfied.
15488		assert!(result.is_ok());
15489	}
15490
15491	#[test]
15492	fn test_inbound_anchors_manual_acceptance() {
15493		// Tests that we properly limit inbound channels when we have the manual-channel-acceptance
15494		// flag set and (sometimes) accept channels as 0conf.
15495		let mut anchors_cfg = test_default_channel_config();
15496		anchors_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
15497
15498		let mut anchors_manual_accept_cfg = anchors_cfg.clone();
15499		anchors_manual_accept_cfg.manually_accept_inbound_channels = true;
15500
15501		let chanmon_cfgs = create_chanmon_cfgs(3);
15502		let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
15503		let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs,
15504			&[Some(anchors_cfg.clone()), Some(anchors_cfg.clone()), Some(anchors_manual_accept_cfg.clone())]);
15505		let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
15506
15507		nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
15508		let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15509
15510		nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15511		assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
15512		let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
15513		match &msg_events[0] {
15514			MessageSendEvent::HandleError { node_id, action } => {
15515				assert_eq!(*node_id, nodes[0].node.get_our_node_id());
15516				match action {
15517					ErrorAction::SendErrorMessage { msg } =>
15518						assert_eq!(msg.data, "No channels with anchor outputs accepted".to_owned()),
15519					_ => panic!("Unexpected error action"),
15520				}
15521			}
15522			_ => panic!("Unexpected event"),
15523		}
15524
15525		nodes[2].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15526		let events = nodes[2].node.get_and_clear_pending_events();
15527		match events[0] {
15528			Event::OpenChannelRequest { temporary_channel_id, .. } =>
15529				nodes[2].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23).unwrap(),
15530			_ => panic!("Unexpected event"),
15531		}
15532		get_event_msg!(nodes[2], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
15533	}
15534
15535	#[test]
15536	fn test_anchors_zero_fee_htlc_tx_fallback() {
15537		// Tests that if both nodes support anchors, but the remote node does not want to accept
15538		// anchor channels at the moment, an error it sent to the local node such that it can retry
15539		// the channel without the anchors feature.
15540		let chanmon_cfgs = create_chanmon_cfgs(2);
15541		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
15542		let mut anchors_config = test_default_channel_config();
15543		anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
15544		anchors_config.manually_accept_inbound_channels = true;
15545		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config.clone())]);
15546		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15547		let error_message = "Channel force-closed";
15548
15549		nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 0, None, None).unwrap();
15550		let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15551		assert!(open_channel_msg.common_fields.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx());
15552
15553		nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15554		let events = nodes[1].node.get_and_clear_pending_events();
15555		match events[0] {
15556			Event::OpenChannelRequest { temporary_channel_id, .. } => {
15557				nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
15558			}
15559			_ => panic!("Unexpected event"),
15560		}
15561
15562		let error_msg = get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id());
15563		nodes[0].node.handle_error(nodes[1].node.get_our_node_id(), &error_msg);
15564
15565		let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15566		assert!(!open_channel_msg.common_fields.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx());
15567
15568		// Since nodes[1] should not have accepted the channel, it should
15569		// not have generated any events.
15570		assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
15571	}
15572
15573	#[test]
15574	fn test_update_channel_config() {
15575		let chanmon_cfg = create_chanmon_cfgs(2);
15576		let node_cfg = create_node_cfgs(2, &chanmon_cfg);
15577		let mut user_config = test_default_channel_config();
15578		let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]);
15579		let nodes = create_network(2, &node_cfg, &node_chanmgr);
15580		let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
15581		let channel = &nodes[0].node.list_channels()[0];
15582
15583		nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap();
15584		let events = nodes[0].node.get_and_clear_pending_msg_events();
15585		assert_eq!(events.len(), 0);
15586
15587		user_config.channel_config.forwarding_fee_base_msat += 10;
15588		nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap();
15589		assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_base_msat, user_config.channel_config.forwarding_fee_base_msat);
15590		let events = nodes[0].node.get_and_clear_pending_msg_events();
15591		assert_eq!(events.len(), 1);
15592		match &events[0] {
15593			MessageSendEvent::BroadcastChannelUpdate { .. } => {},
15594			_ => panic!("expected BroadcastChannelUpdate event"),
15595		}
15596
15597		nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate::default()).unwrap();
15598		let events = nodes[0].node.get_and_clear_pending_msg_events();
15599		assert_eq!(events.len(), 0);
15600
15601		let new_cltv_expiry_delta = user_config.channel_config.cltv_expiry_delta + 6;
15602		nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate {
15603			cltv_expiry_delta: Some(new_cltv_expiry_delta),
15604			..Default::default()
15605		}).unwrap();
15606		assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta);
15607		let events = nodes[0].node.get_and_clear_pending_msg_events();
15608		assert_eq!(events.len(), 1);
15609		match &events[0] {
15610			MessageSendEvent::BroadcastChannelUpdate { .. } => {},
15611			_ => panic!("expected BroadcastChannelUpdate event"),
15612		}
15613
15614		let new_fee = user_config.channel_config.forwarding_fee_proportional_millionths + 100;
15615		nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate {
15616			forwarding_fee_proportional_millionths: Some(new_fee),
15617			..Default::default()
15618		}).unwrap();
15619		assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta);
15620		assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, new_fee);
15621		let events = nodes[0].node.get_and_clear_pending_msg_events();
15622		assert_eq!(events.len(), 1);
15623		match &events[0] {
15624			MessageSendEvent::BroadcastChannelUpdate { .. } => {},
15625			_ => panic!("expected BroadcastChannelUpdate event"),
15626		}
15627
15628		// If we provide a channel_id not associated with the peer, we should get an error and no updates
15629		// should be applied to ensure update atomicity as specified in the API docs.
15630		let bad_channel_id = ChannelId::v1_from_funding_txid(&[10; 32], 10);
15631		let current_fee = nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths;
15632		let new_fee = current_fee + 100;
15633		assert!(
15634			matches!(
15635				nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id, bad_channel_id], &ChannelConfigUpdate {
15636					forwarding_fee_proportional_millionths: Some(new_fee),
15637					..Default::default()
15638				}),
15639				Err(APIError::ChannelUnavailable { err: _ }),
15640			)
15641		);
15642		// Check that the fee hasn't changed for the channel that exists.
15643		assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, current_fee);
15644		let events = nodes[0].node.get_and_clear_pending_msg_events();
15645		assert_eq!(events.len(), 0);
15646	}
15647
15648	#[test]
15649	fn test_payment_display() {
15650		let payment_id = PaymentId([42; 32]);
15651		assert_eq!(format!("{}", &payment_id), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
15652		let payment_hash = PaymentHash([42; 32]);
15653		assert_eq!(format!("{}", &payment_hash), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
15654		let payment_preimage = PaymentPreimage([42; 32]);
15655		assert_eq!(format!("{}", &payment_preimage), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
15656	}
15657
15658	#[test]
15659	fn test_trigger_lnd_force_close() {
15660		let chanmon_cfg = create_chanmon_cfgs(2);
15661		let node_cfg = create_node_cfgs(2, &chanmon_cfg);
15662		let user_config = test_default_channel_config();
15663		let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]);
15664		let nodes = create_network(2, &node_cfg, &node_chanmgr);
15665		let error_message = "Channel force-closed";
15666
15667		// Open a channel, immediately disconnect each other, and broadcast Alice's latest state.
15668		let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
15669		nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
15670		nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
15671		nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
15672		check_closed_broadcast(&nodes[0], 1, true);
15673		check_added_monitors(&nodes[0], 1);
15674		check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
15675		{
15676			let txn = nodes[0].tx_broadcaster.txn_broadcast();
15677			assert_eq!(txn.len(), 1);
15678			check_spends!(txn[0], funding_tx);
15679		}
15680
15681		// Since they're disconnected, Bob won't receive Alice's `Error` message. Reconnect them
15682		// such that Bob sends a `ChannelReestablish` to Alice since the channel is still open from
15683		// their side.
15684		nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init {
15685			features: nodes[1].node.init_features(), networks: None, remote_network_address: None
15686		}, true).unwrap();
15687		nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
15688			features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15689		}, false).unwrap();
15690		assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
15691		let channel_reestablish = get_event_msg!(
15692			nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()
15693		);
15694		nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &channel_reestablish);
15695
15696		// Alice should respond with an error since the channel isn't known, but a bogus
15697		// `ChannelReestablish` should be sent first, such that we actually trigger Bob to force
15698		// close even if it was an lnd node.
15699		let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
15700		assert_eq!(msg_events.len(), 2);
15701		if let MessageSendEvent::SendChannelReestablish { node_id, msg } = &msg_events[0] {
15702			assert_eq!(*node_id, nodes[1].node.get_our_node_id());
15703			assert_eq!(msg.next_local_commitment_number, 0);
15704			assert_eq!(msg.next_remote_commitment_number, 0);
15705			nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &msg);
15706		} else { panic!() };
15707		check_closed_broadcast(&nodes[1], 1, true);
15708		check_added_monitors(&nodes[1], 1);
15709		let expected_close_reason = ClosureReason::ProcessingError {
15710			err: "Peer sent an invalid channel_reestablish to force close in a non-standard way".to_string()
15711		};
15712		check_closed_event!(nodes[1], 1, expected_close_reason, [nodes[0].node.get_our_node_id()], 100000);
15713		{
15714			let txn = nodes[1].tx_broadcaster.txn_broadcast();
15715			assert_eq!(txn.len(), 1);
15716			check_spends!(txn[0], funding_tx);
15717		}
15718	}
15719
15720	#[test]
15721	fn test_malformed_forward_htlcs_ser() {
15722		// Ensure that `HTLCForwardInfo::FailMalformedHTLC`s are (de)serialized properly.
15723		let chanmon_cfg = create_chanmon_cfgs(1);
15724		let node_cfg = create_node_cfgs(1, &chanmon_cfg);
15725		let persister;
15726		let chain_monitor;
15727		let chanmgrs = create_node_chanmgrs(1, &node_cfg, &[None]);
15728		let deserialized_chanmgr;
15729		let mut nodes = create_network(1, &node_cfg, &chanmgrs);
15730
15731		let dummy_failed_htlc = |htlc_id| {
15732			HTLCForwardInfo::FailHTLC { htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }, }
15733		};
15734		let dummy_malformed_htlc = |htlc_id| {
15735			HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code: 0x4000, sha256_of_onion: [0; 32] }
15736		};
15737
15738		let dummy_htlcs_1: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
15739			if htlc_id % 2 == 0 {
15740				dummy_failed_htlc(htlc_id)
15741			} else {
15742				dummy_malformed_htlc(htlc_id)
15743			}
15744		}).collect();
15745
15746		let dummy_htlcs_2: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
15747			if htlc_id % 2 == 1 {
15748				dummy_failed_htlc(htlc_id)
15749			} else {
15750				dummy_malformed_htlc(htlc_id)
15751			}
15752		}).collect();
15753
15754
15755		let (scid_1, scid_2) = (42, 43);
15756		let mut forward_htlcs = new_hash_map();
15757		forward_htlcs.insert(scid_1, dummy_htlcs_1.clone());
15758		forward_htlcs.insert(scid_2, dummy_htlcs_2.clone());
15759
15760		let mut chanmgr_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
15761		*chanmgr_fwd_htlcs = forward_htlcs.clone();
15762		core::mem::drop(chanmgr_fwd_htlcs);
15763
15764		reload_node!(nodes[0], nodes[0].node.encode(), &[], persister, chain_monitor, deserialized_chanmgr);
15765
15766		let mut deserialized_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
15767		for scid in [scid_1, scid_2].iter() {
15768			let deserialized_htlcs = deserialized_fwd_htlcs.remove(scid).unwrap();
15769			assert_eq!(forward_htlcs.remove(scid).unwrap(), deserialized_htlcs);
15770		}
15771		assert!(deserialized_fwd_htlcs.is_empty());
15772		core::mem::drop(deserialized_fwd_htlcs);
15773
15774		expect_pending_htlcs_forwardable!(nodes[0]);
15775	}
15776}
15777
15778#[cfg(ldk_bench)]
15779pub mod bench {
15780	use crate::chain::Listen;
15781	use crate::chain::chainmonitor::{ChainMonitor, Persist};
15782	use crate::sign::{KeysManager, InMemorySigner};
15783	use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider};
15784	use crate::ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId, RecipientOnionFields, Retry};
15785	use crate::ln::functional_test_utils::*;
15786	use crate::ln::msgs::{ChannelMessageHandler, Init};
15787	use crate::routing::gossip::NetworkGraph;
15788	use crate::routing::router::{PaymentParameters, RouteParameters};
15789	use crate::util::test_utils;
15790	use crate::util::config::{UserConfig, MaxDustHTLCExposure};
15791
15792	use bitcoin::amount::Amount;
15793	use bitcoin::locktime::absolute::LockTime;
15794	use bitcoin::hashes::Hash;
15795	use bitcoin::hashes::sha256::Hash as Sha256;
15796	use bitcoin::{Transaction, TxOut};
15797	use bitcoin::transaction::Version;
15798
15799	use crate::sync::{Arc, Mutex, RwLock};
15800
15801	use criterion::Criterion;
15802
15803	type Manager<'a, P> = ChannelManager<
15804		&'a ChainMonitor<InMemorySigner, &'a test_utils::TestChainSource,
15805			&'a test_utils::TestBroadcaster, &'a test_utils::TestFeeEstimator,
15806			&'a test_utils::TestLogger, &'a P>,
15807		&'a test_utils::TestBroadcaster, &'a KeysManager, &'a KeysManager, &'a KeysManager,
15808		&'a test_utils::TestFeeEstimator, &'a test_utils::TestRouter<'a>,
15809		&'a test_utils::TestMessageRouter<'a>, &'a test_utils::TestLogger>;
15810
15811	struct ANodeHolder<'node_cfg, 'chan_mon_cfg: 'node_cfg, P: Persist<InMemorySigner>> {
15812		node: &'node_cfg Manager<'chan_mon_cfg, P>,
15813	}
15814	impl<'node_cfg, 'chan_mon_cfg: 'node_cfg, P: Persist<InMemorySigner>> NodeHolder for ANodeHolder<'node_cfg, 'chan_mon_cfg, P> {
15815		type CM = Manager<'chan_mon_cfg, P>;
15816		#[inline]
15817		fn node(&self) -> &Manager<'chan_mon_cfg, P> { self.node }
15818		#[inline]
15819		fn chain_monitor(&self) -> Option<&test_utils::TestChainMonitor> { None }
15820	}
15821
15822	pub fn bench_sends(bench: &mut Criterion) {
15823		bench_two_sends(bench, "bench_sends", test_utils::TestPersister::new(), test_utils::TestPersister::new());
15824	}
15825
15826	pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Criterion, bench_name: &str, persister_a: P, persister_b: P) {
15827		// Do a simple benchmark of sending a payment back and forth between two nodes.
15828		// Note that this is unrealistic as each payment send will require at least two fsync
15829		// calls per node.
15830		let network = bitcoin::Network::Testnet;
15831		let genesis_block = bitcoin::constants::genesis_block(network);
15832
15833		let tx_broadcaster = test_utils::TestBroadcaster::new(network);
15834		let fee_estimator = test_utils::TestFeeEstimator::new(253);
15835		let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
15836		let scorer = RwLock::new(test_utils::TestScorer::new());
15837		let entropy = test_utils::TestKeysInterface::new(&[0u8; 32], network);
15838		let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &logger_a, &scorer);
15839		let message_router = test_utils::TestMessageRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &entropy);
15840
15841		let mut config: UserConfig = Default::default();
15842		config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253);
15843		config.channel_handshake_config.minimum_depth = 1;
15844
15845		let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a);
15846		let seed_a = [1u8; 32];
15847		let keys_manager_a = KeysManager::new(&seed_a, 42, 42);
15848		let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &message_router, &logger_a, &keys_manager_a, &keys_manager_a, &keys_manager_a, config.clone(), ChainParameters {
15849			network,
15850			best_block: BestBlock::from_network(network),
15851		}, genesis_block.header.time);
15852		let node_a_holder = ANodeHolder { node: &node_a };
15853
15854		let logger_b = test_utils::TestLogger::with_id("node a".to_owned());
15855		let chain_monitor_b = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_b);
15856		let seed_b = [2u8; 32];
15857		let keys_manager_b = KeysManager::new(&seed_b, 42, 42);
15858		let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &message_router, &logger_b, &keys_manager_b, &keys_manager_b, &keys_manager_b, config.clone(), ChainParameters {
15859			network,
15860			best_block: BestBlock::from_network(network),
15861		}, genesis_block.header.time);
15862		let node_b_holder = ANodeHolder { node: &node_b };
15863
15864		node_a.peer_connected(node_b.get_our_node_id(), &Init {
15865			features: node_b.init_features(), networks: None, remote_network_address: None
15866		}, true).unwrap();
15867		node_b.peer_connected(node_a.get_our_node_id(), &Init {
15868			features: node_a.init_features(), networks: None, remote_network_address: None
15869		}, false).unwrap();
15870		node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None, None).unwrap();
15871		node_b.handle_open_channel(node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
15872		node_a.handle_accept_channel(node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
15873
15874		let tx;
15875		if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) {
15876			tx = Transaction { version: Version::TWO, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
15877				value: Amount::from_sat(8_000_000), script_pubkey: output_script,
15878			}]};
15879			node_a.funding_transaction_generated(temporary_channel_id, node_b.get_our_node_id(), tx.clone()).unwrap();
15880		} else { panic!(); }
15881
15882		node_b.handle_funding_created(node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingCreated, node_b.get_our_node_id()));
15883		let events_b = node_b.get_and_clear_pending_events();
15884		assert_eq!(events_b.len(), 1);
15885		match events_b[0] {
15886			Event::ChannelPending{ ref counterparty_node_id, .. } => {
15887				assert_eq!(*counterparty_node_id, node_a.get_our_node_id());
15888			},
15889			_ => panic!("Unexpected event"),
15890		}
15891
15892		node_a.handle_funding_signed(node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingSigned, node_a.get_our_node_id()));
15893		let events_a = node_a.get_and_clear_pending_events();
15894		assert_eq!(events_a.len(), 1);
15895		match events_a[0] {
15896			Event::ChannelPending{ ref counterparty_node_id, .. } => {
15897				assert_eq!(*counterparty_node_id, node_b.get_our_node_id());
15898			},
15899			_ => panic!("Unexpected event"),
15900		}
15901
15902		assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
15903
15904		let block = create_dummy_block(BestBlock::from_network(network).block_hash, 42, vec![tx]);
15905		Listen::block_connected(&node_a, &block, 1);
15906		Listen::block_connected(&node_b, &block, 1);
15907
15908		node_a.handle_channel_ready(node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendChannelReady, node_a.get_our_node_id()));
15909		let msg_events = node_a.get_and_clear_pending_msg_events();
15910		assert_eq!(msg_events.len(), 2);
15911		match msg_events[0] {
15912			MessageSendEvent::SendChannelReady { ref msg, .. } => {
15913				node_b.handle_channel_ready(node_a.get_our_node_id(), msg);
15914				get_event_msg!(node_b_holder, MessageSendEvent::SendChannelUpdate, node_a.get_our_node_id());
15915			},
15916			_ => panic!(),
15917		}
15918		match msg_events[1] {
15919			MessageSendEvent::SendChannelUpdate { .. } => {},
15920			_ => panic!(),
15921		}
15922
15923		let events_a = node_a.get_and_clear_pending_events();
15924		assert_eq!(events_a.len(), 1);
15925		match events_a[0] {
15926			Event::ChannelReady{ ref counterparty_node_id, .. } => {
15927				assert_eq!(*counterparty_node_id, node_b.get_our_node_id());
15928			},
15929			_ => panic!("Unexpected event"),
15930		}
15931
15932		let events_b = node_b.get_and_clear_pending_events();
15933		assert_eq!(events_b.len(), 1);
15934		match events_b[0] {
15935			Event::ChannelReady{ ref counterparty_node_id, .. } => {
15936				assert_eq!(*counterparty_node_id, node_a.get_our_node_id());
15937			},
15938			_ => panic!("Unexpected event"),
15939		}
15940
15941		let mut payment_count: u64 = 0;
15942		macro_rules! send_payment {
15943			($node_a: expr, $node_b: expr) => {
15944				let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id(), TEST_FINAL_CLTV)
15945					.with_bolt11_features($node_b.bolt11_invoice_features()).unwrap();
15946				let mut payment_preimage = PaymentPreimage([0; 32]);
15947				payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes());
15948				payment_count += 1;
15949				let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
15950				let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
15951
15952				$node_a.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
15953					PaymentId(payment_hash.0),
15954					RouteParameters::from_payment_params_and_value(payment_params, 10_000),
15955					Retry::Attempts(0)).unwrap();
15956				let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
15957				$node_b.handle_update_add_htlc($node_a.get_our_node_id(), &payment_event.msgs[0]);
15958				$node_b.handle_commitment_signed($node_a.get_our_node_id(), &payment_event.commitment_msg);
15959				let (raa, cs) = get_revoke_commit_msgs(&ANodeHolder { node: &$node_b }, &$node_a.get_our_node_id());
15960				$node_a.handle_revoke_and_ack($node_b.get_our_node_id(), &raa);
15961				$node_a.handle_commitment_signed($node_b.get_our_node_id(), &cs);
15962				$node_b.handle_revoke_and_ack($node_a.get_our_node_id(), &get_event_msg!(ANodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id()));
15963
15964				expect_pending_htlcs_forwardable!(ANodeHolder { node: &$node_b });
15965				expect_payment_claimable!(ANodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000);
15966				$node_b.claim_funds(payment_preimage);
15967				expect_payment_claimed!(ANodeHolder { node: &$node_b }, payment_hash, 10_000);
15968
15969				match $node_b.get_and_clear_pending_msg_events().pop().unwrap() {
15970					MessageSendEvent::UpdateHTLCs { node_id, updates } => {
15971						assert_eq!(node_id, $node_a.get_our_node_id());
15972						$node_a.handle_update_fulfill_htlc($node_b.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
15973						$node_a.handle_commitment_signed($node_b.get_our_node_id(), &updates.commitment_signed);
15974					},
15975					_ => panic!("Failed to generate claim event"),
15976				}
15977
15978				let (raa, cs) = get_revoke_commit_msgs(&ANodeHolder { node: &$node_a }, &$node_b.get_our_node_id());
15979				$node_b.handle_revoke_and_ack($node_a.get_our_node_id(), &raa);
15980				$node_b.handle_commitment_signed($node_a.get_our_node_id(), &cs);
15981				$node_a.handle_revoke_and_ack($node_b.get_our_node_id(), &get_event_msg!(ANodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id()));
15982
15983				expect_payment_sent!(ANodeHolder { node: &$node_a }, payment_preimage);
15984			}
15985		}
15986
15987		bench.bench_function(bench_name, |b| b.iter(|| {
15988			send_payment!(node_a, node_b);
15989			send_payment!(node_b, node_a);
15990		}));
15991	}
15992}