1use bitcoin::block::Header;
21use bitcoin::transaction::{Transaction, TxIn};
22use bitcoin::constants::ChainHash;
23use bitcoin::key::constants::SECRET_KEY_SIZE;
24use bitcoin::network::Network;
25
26use bitcoin::hashes::{Hash, HashEngine, HmacEngine};
27use bitcoin::hashes::hmac::Hmac;
28use bitcoin::hashes::sha256::Hash as Sha256;
29use bitcoin::hash_types::{BlockHash, Txid};
30
31use bitcoin::secp256k1::{SecretKey,PublicKey};
32use bitcoin::secp256k1::Secp256k1;
33use bitcoin::{secp256k1, Sequence, Weight};
34
35use crate::events::FundingInfo;
36use crate::blinded_path::message::{AsyncPaymentsContext, MessageContext, OffersContext};
37use crate::blinded_path::NodeIdLookUp;
38use crate::blinded_path::message::{BlindedMessagePath, MessageForwardNode};
39use crate::blinded_path::payment::{BlindedPaymentPath, Bolt12OfferContext, Bolt12RefundContext, PaymentConstraints, PaymentContext, UnauthenticatedReceiveTlvs};
40use crate::chain;
41use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
42use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
43use crate::chain::channelmonitor::{Balance, ChannelMonitor, ChannelMonitorUpdate, WithChannelMonitor, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent};
44use crate::chain::transaction::{OutPoint, TransactionData};
45use crate::events::{self, Event, EventHandler, EventsProvider, InboundChannelFunds, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination, PaymentFailureReason, ReplayEvent};
46use crate::ln::inbound_payment;
49use crate::ln::types::ChannelId;
50use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret};
51use crate::ln::channel::{self, Channel, ChannelPhase, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext, InteractivelyFunded as _};
52#[cfg(any(dual_funding, splicing))]
53use crate::ln::channel::InboundV2Channel;
54use crate::ln::channel_state::ChannelDetails;
55use crate::types::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
56#[cfg(any(feature = "_test_utils", test))]
57use crate::types::features::Bolt11InvoiceFeatures;
58use crate::routing::router::{BlindedTail, FixedRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router};
59use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundHTLCErr, NextPacketDetails};
60use crate::ln::msgs;
61use crate::ln::onion_utils;
62use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING};
63use crate::ln::msgs::{ChannelMessageHandler, CommitmentUpdate, DecodeError, LightningError};
64#[cfg(test)]
65use crate::ln::outbound_payment;
66use crate::ln::outbound_payment::{OutboundPayments, PendingOutboundPayment, RetryableInvoiceRequest, SendAlongPathArgs, StaleExpiration};
67use crate::offers::invoice::{Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, ExplicitSigningPubkey, InvoiceBuilder, UnsignedBolt12Invoice};
68use crate::offers::invoice_error::InvoiceError;
69use crate::offers::invoice_request::{InvoiceRequest, InvoiceRequestBuilder};
70use crate::offers::nonce::Nonce;
71use crate::offers::offer::{Offer, OfferBuilder};
72use crate::offers::parse::Bolt12SemanticError;
73use crate::offers::refund::{Refund, RefundBuilder};
74use crate::offers::signer;
75#[cfg(async_payments)]
76use crate::offers::static_invoice::StaticInvoice;
77use crate::onion_message::async_payments::{AsyncPaymentsMessage, HeldHtlcAvailable, ReleaseHeldHtlc, AsyncPaymentsMessageHandler};
78use crate::onion_message::dns_resolution::HumanReadableName;
79use crate::onion_message::messenger::{Destination, MessageRouter, Responder, ResponseInstruction, MessageSendInstructions};
80use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
81use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider};
82use crate::sign::ecdsa::EcdsaChannelSigner;
83use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
84use crate::util::wakers::{Future, Notifier};
85use crate::util::scid_utils::fake_scid;
86use crate::util::string::UntrustedString;
87use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
88use crate::util::ser::TransactionU16LenLimited;
89use crate::util::logger::{Level, Logger, WithContext};
90use crate::util::errors::APIError;
91
92#[cfg(feature = "dnssec")]
93use crate::blinded_path::message::DNSResolverContext;
94#[cfg(feature = "dnssec")]
95use crate::onion_message::dns_resolution::{DNSResolverMessage, DNSResolverMessageHandler, DNSSECQuery, DNSSECProof, OMNameResolver};
96
97#[cfg(not(c_bindings))]
98use {
99 crate::offers::offer::DerivedMetadata,
100 crate::onion_message::messenger::DefaultMessageRouter,
101 crate::routing::router::DefaultRouter,
102 crate::routing::gossip::NetworkGraph,
103 crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters},
104 crate::sign::KeysManager,
105};
106#[cfg(c_bindings)]
107use {
108 crate::offers::offer::OfferWithDerivedMetadataBuilder,
109 crate::offers::refund::RefundMaybeWithDerivedMetadataBuilder,
110};
111
112use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, CreationError, Currency, Description, InvoiceBuilder as Bolt11InvoiceBuilder, SignOrCreationError, DEFAULT_EXPIRY_TIME};
113
114use alloc::collections::{btree_map, BTreeMap};
115
116use crate::io;
117use crate::prelude::*;
118use core::{cmp, mem};
119use core::borrow::Borrow;
120use core::cell::RefCell;
121use crate::io::Read;
122use crate::sync::{Arc, Mutex, RwLock, RwLockReadGuard, FairRwLock, LockTestExt, LockHeldState};
123use core::sync::atomic::{AtomicUsize, AtomicBool, Ordering};
124use core::time::Duration;
125use core::ops::Deref;
126use bitcoin::hex::impl_fmt_traits;
127pub use crate::ln::outbound_payment::{Bolt12PaymentError, ProbeSendFailure, Retry, RetryableSendFailure, RecipientOnionFields};
129#[cfg(test)]
130pub(crate) use crate::ln::outbound_payment::PaymentSendFailure;
131use crate::ln::script::ShutdownScript;
132
133#[derive(Clone)] #[cfg_attr(test, derive(Debug, PartialEq))]
153pub enum PendingHTLCRouting {
154 Forward {
156 onion_packet: msgs::OnionPacket,
159 short_channel_id: u64, blinded: Option<BlindedForward>,
167 incoming_cltv_expiry: Option<u32>,
169 },
170 Receive {
175 payment_data: msgs::FinalOnionHopData,
179 payment_metadata: Option<Vec<u8>>,
185 payment_context: Option<PaymentContext>,
190 incoming_cltv_expiry: u32,
194 phantom_shared_secret: Option<[u8; 32]>,
198 custom_tlvs: Vec<(u64, Vec<u8>)>,
204 requires_blinded_error: bool,
206 },
207 ReceiveKeysend {
211 payment_data: Option<msgs::FinalOnionHopData>,
217 payment_preimage: PaymentPreimage,
220 payment_metadata: Option<Vec<u8>>,
225 incoming_cltv_expiry: u32,
229 custom_tlvs: Vec<(u64, Vec<u8>)>,
234 requires_blinded_error: bool,
236 has_recipient_created_payment_secret: bool,
240 },
241}
242
243#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
245pub struct BlindedForward {
246 pub inbound_blinding_point: PublicKey,
250 pub failure: BlindedFailure,
253 pub next_blinding_override: Option<PublicKey>,
257}
258
259impl PendingHTLCRouting {
260 fn blinded_failure(&self) -> Option<BlindedFailure> {
262 match self {
263 Self::Forward { blinded: Some(BlindedForward { failure, .. }), .. } => Some(*failure),
264 Self::Receive { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
265 Self::ReceiveKeysend { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
266 _ => None,
267 }
268 }
269
270 fn incoming_cltv_expiry(&self) -> Option<u32> {
271 match self {
272 Self::Forward { incoming_cltv_expiry, .. } => *incoming_cltv_expiry,
273 Self::Receive { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry),
274 Self::ReceiveKeysend { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry),
275 }
276 }
277}
278
279#[derive(Clone)] #[cfg_attr(test, derive(Debug, PartialEq))]
283pub struct PendingHTLCInfo {
284 pub routing: PendingHTLCRouting,
286 pub incoming_shared_secret: [u8; 32],
290 pub payment_hash: PaymentHash,
292 pub incoming_amt_msat: Option<u64>,
297 pub outgoing_amt_msat: u64,
309 pub outgoing_cltv_value: u32,
312 pub skimmed_fee_msat: Option<u64>,
322}
323
324#[derive(Clone)] pub(super) enum HTLCFailureMsg {
326 Relay(msgs::UpdateFailHTLC),
327 Malformed(msgs::UpdateFailMalformedHTLC),
328}
329
330#[derive(Clone)] pub(super) enum PendingHTLCStatus {
333 Forward(PendingHTLCInfo),
334 Fail(HTLCFailureMsg),
335}
336
337#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
338pub(super) struct PendingAddHTLCInfo {
339 pub(super) forward_info: PendingHTLCInfo,
340
341 prev_short_channel_id: u64,
348 prev_htlc_id: u64,
349 prev_counterparty_node_id: Option<PublicKey>,
350 prev_channel_id: ChannelId,
351 prev_funding_outpoint: OutPoint,
352 prev_user_channel_id: u128,
353}
354
355#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
356pub(super) enum HTLCForwardInfo {
357 AddHTLC(PendingAddHTLCInfo),
358 FailHTLC {
359 htlc_id: u64,
360 err_packet: msgs::OnionErrorPacket,
361 },
362 FailMalformedHTLC {
363 htlc_id: u64,
364 failure_code: u16,
365 sha256_of_onion: [u8; 32],
366 },
367}
368
369#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
372pub enum BlindedFailure {
373 FromIntroductionNode,
376 FromBlindedNode,
379}
380
381#[derive(Clone, Debug, Hash, PartialEq, Eq)]
383pub(crate) struct HTLCPreviousHopData {
384 short_channel_id: u64,
386 user_channel_id: Option<u128>,
387 htlc_id: u64,
388 incoming_packet_shared_secret: [u8; 32],
389 phantom_shared_secret: Option<[u8; 32]>,
390 blinded_failure: Option<BlindedFailure>,
391 channel_id: ChannelId,
392
393 outpoint: OutPoint,
396 counterparty_node_id: Option<PublicKey>,
397 cltv_expiry: Option<u32>,
400}
401
402#[derive(PartialEq, Eq)]
403enum OnionPayload {
404 Invoice {
406 _legacy_hop_data: Option<msgs::FinalOnionHopData>,
409 },
410 Spontaneous(PaymentPreimage),
412}
413
414#[derive(PartialEq, Eq)]
416struct ClaimableHTLC {
417 prev_hop: HTLCPreviousHopData,
418 cltv_expiry: u32,
419 value: u64,
421 sender_intended_value: u64,
424 onion_payload: OnionPayload,
425 timer_ticks: u8,
426 total_value_received: Option<u64>,
429 total_msat: u64,
431 counterparty_skimmed_fee_msat: Option<u64>,
433}
434
435impl From<&ClaimableHTLC> for events::ClaimedHTLC {
436 fn from(val: &ClaimableHTLC) -> Self {
437 events::ClaimedHTLC {
438 channel_id: val.prev_hop.channel_id,
439 user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0),
440 cltv_expiry: val.cltv_expiry,
441 value_msat: val.value,
442 counterparty_skimmed_fee_msat: val.counterparty_skimmed_fee_msat.unwrap_or(0),
443 }
444 }
445}
446
447impl PartialOrd for ClaimableHTLC {
448 fn partial_cmp(&self, other: &ClaimableHTLC) -> Option<cmp::Ordering> {
449 Some(self.cmp(other))
450 }
451}
452impl Ord for ClaimableHTLC {
453 fn cmp(&self, other: &ClaimableHTLC) -> cmp::Ordering {
454 let res = (self.prev_hop.channel_id, self.prev_hop.htlc_id).cmp(
455 &(other.prev_hop.channel_id, other.prev_hop.htlc_id)
456 );
457 if res.is_eq() {
458 debug_assert!(self == other, "ClaimableHTLCs from the same source should be identical");
459 }
460 res
461 }
462}
463
464pub trait Verification {
466 fn hmac_for_offer_payment(
469 &self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
470 ) -> Hmac<Sha256>;
471
472 fn verify_for_offer_payment(
474 &self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
475 ) -> Result<(), ()>;
476}
477
478impl Verification for PaymentHash {
479 fn hmac_for_offer_payment(
482 &self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
483 ) -> Hmac<Sha256> {
484 signer::hmac_for_payment_hash(*self, nonce, expanded_key)
485 }
486
487 fn verify_for_offer_payment(
490 &self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
491 ) -> Result<(), ()> {
492 signer::verify_payment_hash(*self, hmac, nonce, expanded_key)
493 }
494}
495
496impl Verification for UnauthenticatedReceiveTlvs {
497 fn hmac_for_offer_payment(
498 &self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
499 ) -> Hmac<Sha256> {
500 signer::hmac_for_payment_tlvs(self, nonce, expanded_key)
501 }
502
503 fn verify_for_offer_payment(
504 &self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
505 ) -> Result<(), ()> {
506 signer::verify_payment_tlvs(self, hmac, nonce, expanded_key)
507 }
508}
509
510#[derive(Hash, Copy, Clone, PartialEq, Eq)]
515pub struct PaymentId(pub [u8; Self::LENGTH]);
516
517impl PaymentId {
518 pub const LENGTH: usize = 32;
520
521 #[cfg(async_payments)]
524 pub fn hmac_for_async_payment(
525 &self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
526 ) -> Hmac<Sha256> {
527 signer::hmac_for_async_payment_id(*self, nonce, expanded_key)
528 }
529
530 #[cfg(async_payments)]
533 pub fn verify_for_async_payment(
534 &self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
535 ) -> Result<(), ()> {
536 signer::verify_async_payment_id(*self, hmac, nonce, expanded_key)
537 }
538}
539
540impl Verification for PaymentId {
541 fn hmac_for_offer_payment(
544 &self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
545 ) -> Hmac<Sha256> {
546 signer::hmac_for_offer_payment_id(*self, nonce, expanded_key)
547 }
548
549 fn verify_for_offer_payment(
552 &self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
553 ) -> Result<(), ()> {
554 signer::verify_offer_payment_id(*self, hmac, nonce, expanded_key)
555 }
556}
557
558impl PaymentId {
559 fn for_inbound_from_htlcs<I: Iterator<Item=(ChannelId, u64)>>(key: &[u8; 32], htlcs: I) -> PaymentId {
560 let mut prev_pair = None;
561 let mut hasher = HmacEngine::new(key);
562 for (channel_id, htlc_id) in htlcs {
563 hasher.input(&channel_id.0);
564 hasher.input(&htlc_id.to_le_bytes());
565 if let Some(prev) = prev_pair {
566 debug_assert!(prev < (channel_id, htlc_id), "HTLCs should be sorted");
567 }
568 prev_pair = Some((channel_id, htlc_id));
569 }
570 PaymentId(Hmac::<Sha256>::from_engine(hasher).to_byte_array())
571 }
572}
573
574impl Borrow<[u8]> for PaymentId {
575 fn borrow(&self) -> &[u8] {
576 &self.0[..]
577 }
578}
579
580impl_fmt_traits! {
581 impl fmt_traits for PaymentId {
582 const LENGTH: usize = 32;
583 }
584}
585
586impl Writeable for PaymentId {
587 fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
588 self.0.write(w)
589 }
590}
591
592impl Readable for PaymentId {
593 fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
594 let buf: [u8; 32] = Readable::read(r)?;
595 Ok(PaymentId(buf))
596 }
597}
598
599#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
603pub struct InterceptId(pub [u8; 32]);
604
605impl Writeable for InterceptId {
606 fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
607 self.0.write(w)
608 }
609}
610
611impl Readable for InterceptId {
612 fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
613 let buf: [u8; 32] = Readable::read(r)?;
614 Ok(InterceptId(buf))
615 }
616}
617
618#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
619pub(crate) enum SentHTLCId {
621 PreviousHopData { short_channel_id: u64, htlc_id: u64 },
622 OutboundRoute { session_priv: [u8; SECRET_KEY_SIZE] },
623}
624impl SentHTLCId {
625 pub(crate) fn from_source(source: &HTLCSource) -> Self {
626 match source {
627 HTLCSource::PreviousHopData(hop_data) => Self::PreviousHopData {
628 short_channel_id: hop_data.short_channel_id,
629 htlc_id: hop_data.htlc_id,
630 },
631 HTLCSource::OutboundRoute { session_priv, .. } =>
632 Self::OutboundRoute { session_priv: session_priv.secret_bytes() },
633 }
634 }
635}
636impl_writeable_tlv_based_enum!(SentHTLCId,
637 (0, PreviousHopData) => {
638 (0, short_channel_id, required),
639 (2, htlc_id, required),
640 },
641 (2, OutboundRoute) => {
642 (0, session_priv, required),
643 },
644);
645
646
647#[allow(clippy::derive_hash_xor_eq)] #[derive(Clone, Debug, PartialEq, Eq)]
650pub(crate) enum HTLCSource {
651 PreviousHopData(HTLCPreviousHopData),
652 OutboundRoute {
653 path: Path,
654 session_priv: SecretKey,
655 first_hop_htlc_msat: u64,
658 payment_id: PaymentId,
659 },
660}
661#[allow(clippy::derive_hash_xor_eq)] impl core::hash::Hash for HTLCSource {
663 fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
664 match self {
665 HTLCSource::PreviousHopData(prev_hop_data) => {
666 0u8.hash(hasher);
667 prev_hop_data.hash(hasher);
668 },
669 HTLCSource::OutboundRoute { path, session_priv, payment_id, first_hop_htlc_msat } => {
670 1u8.hash(hasher);
671 path.hash(hasher);
672 session_priv[..].hash(hasher);
673 payment_id.hash(hasher);
674 first_hop_htlc_msat.hash(hasher);
675 },
676 }
677 }
678}
679impl HTLCSource {
680 #[cfg(all(ldk_test_vectors, test))]
681 pub fn dummy() -> Self {
682 assert!(cfg!(not(feature = "grind_signatures")));
683 HTLCSource::OutboundRoute {
684 path: Path { hops: Vec::new(), blinded_tail: None },
685 session_priv: SecretKey::from_slice(&[1; 32]).unwrap(),
686 first_hop_htlc_msat: 0,
687 payment_id: PaymentId([2; 32]),
688 }
689 }
690
691 #[cfg(debug_assertions)]
692 pub(crate) fn possibly_matches_output(&self, htlc: &super::chan_utils::HTLCOutputInCommitment) -> bool {
695 if let HTLCSource::OutboundRoute { first_hop_htlc_msat, .. } = self {
696 *first_hop_htlc_msat == htlc.amount_msat
697 } else {
698 true
700 }
701 }
702
703 pub(crate) fn inbound_htlc_expiry(&self) -> Option<u32> {
706 match self {
707 Self::PreviousHopData(HTLCPreviousHopData { cltv_expiry, .. }) => *cltv_expiry,
708 _ => None,
709 }
710 }
711}
712
713#[derive(Clone, Copy)]
718pub enum FailureCode {
719 TemporaryNodeFailure,
722 RequiredNodeFeatureMissing,
725 IncorrectOrUnknownPaymentDetails,
730 InvalidOnionPayload(Option<(u64, u16)>),
736}
737
738impl Into<u16> for FailureCode {
739 fn into(self) -> u16 {
740 match self {
741 FailureCode::TemporaryNodeFailure => 0x2000 | 2,
742 FailureCode::RequiredNodeFeatureMissing => 0x4000 | 0x2000 | 3,
743 FailureCode::IncorrectOrUnknownPaymentDetails => 0x4000 | 15,
744 FailureCode::InvalidOnionPayload(_) => 0x4000 | 22,
745 }
746 }
747}
748
749struct MsgHandleErrInternal {
755 err: msgs::LightningError,
756 closes_channel: bool,
757 shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
758}
759impl MsgHandleErrInternal {
760 #[inline]
761 fn send_err_msg_no_close(err: String, channel_id: ChannelId) -> Self {
762 Self {
763 err: LightningError {
764 err: err.clone(),
765 action: msgs::ErrorAction::SendErrorMessage {
766 msg: msgs::ErrorMessage {
767 channel_id,
768 data: err
769 },
770 },
771 },
772 closes_channel: false,
773 shutdown_finish: None,
774 }
775 }
776 #[inline]
777 fn from_no_close(err: msgs::LightningError) -> Self {
778 Self { err, closes_channel: false, shutdown_finish: None }
779 }
780 #[inline]
781 fn from_finish_shutdown(err: String, channel_id: ChannelId, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
782 let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() };
783 let action = if shutdown_res.monitor_update.is_some() {
784 msgs::ErrorAction::DisconnectPeer { msg: Some(err_msg) }
788 } else {
789 msgs::ErrorAction::SendErrorMessage { msg: err_msg }
790 };
791 Self {
792 err: LightningError { err, action },
793 closes_channel: true,
794 shutdown_finish: Some((shutdown_res, channel_update)),
795 }
796 }
797 #[inline]
798 fn from_chan_no_close(err: ChannelError, channel_id: ChannelId) -> Self {
799 Self {
800 err: match err {
801 ChannelError::Warn(msg) => LightningError {
802 err: msg.clone(),
803 action: msgs::ErrorAction::SendWarningMessage {
804 msg: msgs::WarningMessage {
805 channel_id,
806 data: msg
807 },
808 log_level: Level::Warn,
809 },
810 },
811 ChannelError::Ignore(msg) => LightningError {
812 err: msg,
813 action: msgs::ErrorAction::IgnoreError,
814 },
815 ChannelError::Close((msg, _reason)) => LightningError {
816 err: msg.clone(),
817 action: msgs::ErrorAction::SendErrorMessage {
818 msg: msgs::ErrorMessage {
819 channel_id,
820 data: msg
821 },
822 },
823 },
824 },
825 closes_channel: false,
826 shutdown_finish: None,
827 }
828 }
829
830 fn closes_channel(&self) -> bool {
831 self.closes_channel
832 }
833}
834
835pub(super) const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100;
840
841#[derive(Clone, PartialEq, Debug)]
846pub(super) enum RAACommitmentOrder {
847 CommitmentFirst,
849 RevokeAndACKFirst,
851}
852
853#[derive(Clone, Debug, PartialEq, Eq)]
855struct ClaimingPayment {
856 amount_msat: u64,
857 payment_purpose: events::PaymentPurpose,
858 receiver_node_id: PublicKey,
859 htlcs: Vec<events::ClaimedHTLC>,
860 sender_intended_value: Option<u64>,
861 onion_fields: Option<RecipientOnionFields>,
862 payment_id: Option<PaymentId>,
863}
864impl_writeable_tlv_based!(ClaimingPayment, {
865 (0, amount_msat, required),
866 (2, payment_purpose, required),
867 (4, receiver_node_id, required),
868 (5, htlcs, optional_vec),
869 (7, sender_intended_value, option),
870 (9, onion_fields, option),
871 (11, payment_id, option),
872});
873
874struct ClaimablePayment {
875 purpose: events::PaymentPurpose,
876 onion_fields: Option<RecipientOnionFields>,
877 htlcs: Vec<ClaimableHTLC>,
878}
879
880impl ClaimablePayment {
881 fn inbound_payment_id(&self, secret: &[u8; 32]) -> PaymentId {
882 PaymentId::for_inbound_from_htlcs(
883 secret,
884 self.htlcs.iter().map(|htlc| (htlc.prev_hop.channel_id, htlc.prev_hop.htlc_id))
885 )
886 }
887}
888
889enum FundingType {
891 Checked(Transaction),
896 Unchecked(OutPoint),
904}
905
906impl FundingType {
907 fn txid(&self) -> Txid {
908 match self {
909 FundingType::Checked(tx) => tx.compute_txid(),
910 FundingType::Unchecked(outp) => outp.txid,
911 }
912 }
913
914 fn transaction_or_dummy(&self) -> Transaction {
915 match self {
916 FundingType::Checked(tx) => tx.clone(),
917 FundingType::Unchecked(_) => Transaction {
918 version: bitcoin::transaction::Version::TWO,
919 lock_time: bitcoin::absolute::LockTime::ZERO,
920 input: Vec::new(),
921 output: Vec::new(),
922 },
923 }
924 }
925
926 fn is_manual_broadcast(&self) -> bool {
927 match self {
928 FundingType::Checked(_) => false,
929 FundingType::Unchecked(_) => true,
930 }
931 }
932}
933
934struct ClaimablePayments {
936 claimable_payments: HashMap<PaymentHash, ClaimablePayment>,
945
946 pending_claiming_payments: HashMap<PaymentHash, ClaimingPayment>,
950}
951
952impl ClaimablePayments {
953 fn begin_claiming_payment<L: Deref, S: Deref>(
964 &mut self, payment_hash: PaymentHash, node_signer: &S, logger: &L,
965 inbound_payment_id_secret: &[u8; 32], custom_tlvs_known: bool,
966 ) -> Result<(Vec<ClaimableHTLC>, ClaimingPayment), Vec<ClaimableHTLC>>
967 where L::Target: Logger, S::Target: NodeSigner,
968 {
969 match self.claimable_payments.remove(&payment_hash) {
970 Some(payment) => {
971 let mut receiver_node_id = node_signer.get_node_id(Recipient::Node)
972 .expect("Failed to get node_id for node recipient");
973 for htlc in payment.htlcs.iter() {
974 if htlc.prev_hop.phantom_shared_secret.is_some() {
975 let phantom_pubkey = node_signer.get_node_id(Recipient::PhantomNode)
976 .expect("Failed to get node_id for phantom node recipient");
977 receiver_node_id = phantom_pubkey;
978 break;
979 }
980 }
981
982 if let Some(RecipientOnionFields { custom_tlvs, .. }) = &payment.onion_fields {
983 if !custom_tlvs_known && custom_tlvs.iter().any(|(typ, _)| typ % 2 == 0) {
984 log_info!(logger, "Rejecting payment with payment hash {} as we cannot accept payment with unknown even TLVs: {}",
985 &payment_hash, log_iter!(custom_tlvs.iter().map(|(typ, _)| typ).filter(|typ| *typ % 2 == 0)));
986 return Err(payment.htlcs);
987 }
988 }
989
990 let payment_id = payment.inbound_payment_id(inbound_payment_id_secret);
991 let claiming_payment = self.pending_claiming_payments
992 .entry(payment_hash)
993 .and_modify(|_| {
994 debug_assert!(false, "Shouldn't get a duplicate pending claim event ever");
995 log_error!(logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug",
996 &payment_hash);
997 })
998 .or_insert_with(|| {
999 let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect();
1000 let sender_intended_value = payment.htlcs.first().map(|htlc| htlc.total_msat);
1001 ClaimingPayment {
1002 amount_msat: payment.htlcs.iter().map(|source| source.value).sum(),
1003 payment_purpose: payment.purpose,
1004 receiver_node_id,
1005 htlcs,
1006 sender_intended_value,
1007 onion_fields: payment.onion_fields,
1008 payment_id: Some(payment_id),
1009 }
1010 }).clone();
1011
1012 Ok((payment.htlcs, claiming_payment))
1013 },
1014 None => Err(Vec::new())
1015 }
1016 }
1017}
1018
1019#[derive(Debug)]
1024enum BackgroundEvent {
1025 ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelId, ChannelMonitorUpdate)),
1033 MonitorUpdateRegeneratedOnStartup {
1047 counterparty_node_id: PublicKey,
1048 funding_txo: OutPoint,
1049 channel_id: ChannelId,
1050 update: ChannelMonitorUpdate
1051 },
1052 MonitorUpdatesComplete {
1056 counterparty_node_id: PublicKey,
1057 channel_id: ChannelId,
1058 },
1059}
1060
1061#[derive(Debug)]
1063pub(crate) struct EventUnblockedChannel {
1064 counterparty_node_id: PublicKey,
1065 funding_txo: OutPoint,
1066 channel_id: ChannelId,
1067 blocking_action: RAAMonitorUpdateBlockingAction,
1068}
1069
1070impl Writeable for EventUnblockedChannel {
1071 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
1072 self.counterparty_node_id.write(writer)?;
1073 self.funding_txo.write(writer)?;
1074 self.channel_id.write(writer)?;
1075 self.blocking_action.write(writer)
1076 }
1077}
1078
1079impl MaybeReadable for EventUnblockedChannel {
1080 fn read<R: Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
1081 let counterparty_node_id = Readable::read(reader)?;
1082 let funding_txo = Readable::read(reader)?;
1083 let channel_id = Readable::read(reader)?;
1084 let blocking_action = match RAAMonitorUpdateBlockingAction::read(reader)? {
1085 Some(blocking_action) => blocking_action,
1086 None => return Ok(None),
1087 };
1088 Ok(Some(EventUnblockedChannel {
1089 counterparty_node_id,
1090 funding_txo,
1091 channel_id,
1092 blocking_action,
1093 }))
1094 }
1095}
1096
1097#[derive(Debug)]
1098pub(crate) enum MonitorUpdateCompletionAction {
1099 PaymentClaimed {
1104 payment_hash: PaymentHash,
1105 pending_mpp_claim: Option<(PublicKey, ChannelId, u64, PendingMPPClaimPointer)>,
1109 },
1110 EmitEventAndFreeOtherChannel {
1119 event: events::Event,
1120 downstream_counterparty_and_funding_outpoint: Option<EventUnblockedChannel>,
1121 },
1122 FreeOtherChannelImmediately {
1135 downstream_counterparty_node_id: PublicKey,
1136 downstream_funding_outpoint: OutPoint,
1137 blocking_action: RAAMonitorUpdateBlockingAction,
1138 downstream_channel_id: ChannelId,
1139 },
1140}
1141
1142impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction,
1143 (0, PaymentClaimed) => {
1144 (0, payment_hash, required),
1145 (9999999999, pending_mpp_claim, (static_value, None)),
1146 },
1147 (1, FreeOtherChannelImmediately) => {
1150 (0, downstream_counterparty_node_id, required),
1151 (2, downstream_funding_outpoint, required),
1152 (4, blocking_action, upgradable_required),
1153 (5, downstream_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(downstream_funding_outpoint.0.unwrap()))),
1156 },
1157 (2, EmitEventAndFreeOtherChannel) => {
1158 (0, event, upgradable_required),
1159 (1, downstream_counterparty_and_funding_outpoint, upgradable_option),
1165 },
1166);
1167
1168#[derive(Clone, Debug, PartialEq, Eq)]
1169pub(crate) enum EventCompletionAction {
1170 ReleaseRAAChannelMonitorUpdate {
1171 counterparty_node_id: PublicKey,
1172 channel_funding_outpoint: OutPoint,
1173 channel_id: ChannelId,
1174 },
1175}
1176impl_writeable_tlv_based_enum!(EventCompletionAction,
1177 (0, ReleaseRAAChannelMonitorUpdate) => {
1178 (0, channel_funding_outpoint, required),
1179 (2, counterparty_node_id, required),
1180 (3, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(channel_funding_outpoint.0.unwrap()))),
1183 }
1184);
1185
1186struct HTLCClaimSource {
1193 counterparty_node_id: Option<PublicKey>,
1194 funding_txo: OutPoint,
1195 channel_id: ChannelId,
1196 htlc_id: u64,
1197}
1198
1199impl From<&MPPClaimHTLCSource> for HTLCClaimSource {
1200 fn from(o: &MPPClaimHTLCSource) -> HTLCClaimSource {
1201 HTLCClaimSource {
1202 counterparty_node_id: Some(o.counterparty_node_id),
1203 funding_txo: o.funding_txo,
1204 channel_id: o.channel_id,
1205 htlc_id: o.htlc_id,
1206 }
1207 }
1208}
1209
1210#[derive(Clone, Debug, Hash, PartialEq, Eq)]
1211struct MPPClaimHTLCSource {
1215 counterparty_node_id: PublicKey,
1216 funding_txo: OutPoint,
1217 channel_id: ChannelId,
1218 htlc_id: u64,
1219}
1220
1221impl_writeable_tlv_based!(MPPClaimHTLCSource, {
1222 (0, counterparty_node_id, required),
1223 (2, funding_txo, required),
1224 (4, channel_id, required),
1225 (6, htlc_id, required),
1226});
1227
1228#[derive(Debug)]
1229pub(crate) struct PendingMPPClaim {
1230 channels_without_preimage: Vec<MPPClaimHTLCSource>,
1231 channels_with_preimage: Vec<MPPClaimHTLCSource>,
1232}
1233
1234#[derive(Clone, Debug, PartialEq, Eq)]
1235pub(crate) struct PaymentClaimDetails {
1240 mpp_parts: Vec<MPPClaimHTLCSource>,
1241 claiming_payment: ClaimingPayment,
1244}
1245
1246impl_writeable_tlv_based!(PaymentClaimDetails, {
1247 (0, mpp_parts, required_vec),
1248 (2, claiming_payment, required),
1249});
1250
1251#[derive(Clone)]
1252pub(crate) struct PendingMPPClaimPointer(Arc<Mutex<PendingMPPClaim>>);
1253
1254impl PartialEq for PendingMPPClaimPointer {
1255 fn eq(&self, o: &Self) -> bool { Arc::ptr_eq(&self.0, &o.0) }
1256}
1257impl Eq for PendingMPPClaimPointer {}
1258
1259impl core::fmt::Debug for PendingMPPClaimPointer {
1260 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
1261 self.0.lock().unwrap().fmt(f)
1262 }
1263}
1264
1265#[derive(Clone, PartialEq, Eq, Debug)]
1266pub(crate) enum RAAMonitorUpdateBlockingAction {
1269 ForwardedPaymentInboundClaim {
1273 channel_id: ChannelId,
1275 htlc_id: u64,
1277 },
1278 ClaimedMPPPayment {
1286 pending_claim: PendingMPPClaimPointer,
1287 }
1288}
1289
1290impl RAAMonitorUpdateBlockingAction {
1291 fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
1292 Self::ForwardedPaymentInboundClaim {
1293 channel_id: prev_hop.channel_id,
1294 htlc_id: prev_hop.htlc_id,
1295 }
1296 }
1297}
1298
1299impl_writeable_tlv_based_enum_upgradable!(RAAMonitorUpdateBlockingAction,
1300 (0, ForwardedPaymentInboundClaim) => { (0, channel_id, required), (2, htlc_id, required) },
1301 unread_variants: ClaimedMPPPayment
1302);
1303
1304impl Readable for Option<RAAMonitorUpdateBlockingAction> {
1305 fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
1306 Ok(RAAMonitorUpdateBlockingAction::read(reader)?)
1307 }
1308}
1309
1310pub(super) struct PeerState<SP: Deref> where SP::Target: SignerProvider {
1312 pub(super) channel_by_id: HashMap<ChannelId, ChannelPhase<SP>>,
1316 pub(super) inbound_channel_request_by_id: HashMap<ChannelId, InboundChannelRequest>,
1323 latest_features: InitFeatures,
1325 pub(super) pending_msg_events: Vec<MessageSendEvent>,
1328 in_flight_monitor_updates: BTreeMap<OutPoint, Vec<ChannelMonitorUpdate>>,
1342 monitor_update_blocked_actions: BTreeMap<ChannelId, Vec<MonitorUpdateCompletionAction>>,
1357 actions_blocking_raa_monitor_updates: BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
1362 closed_channel_monitor_update_ids: BTreeMap<ChannelId, u64>,
1370 pub is_connected: bool,
1374}
1375
1376impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
1377 fn ok_to_remove(&self, require_disconnected: bool) -> bool {
1381 if require_disconnected && self.is_connected {
1382 return false
1383 }
1384 for (_, updates) in self.in_flight_monitor_updates.iter() {
1385 if !updates.is_empty() {
1386 return false;
1387 }
1388 }
1389 !self.channel_by_id.iter().any(|(_, phase)|
1390 match phase {
1391 ChannelPhase::Funded(_) | ChannelPhase::UnfundedOutboundV1(_) => true,
1392 ChannelPhase::UnfundedInboundV1(_) => false,
1393 ChannelPhase::UnfundedOutboundV2(_) => true,
1394 ChannelPhase::UnfundedInboundV2(_) => false,
1395 }
1396 )
1397 && self.monitor_update_blocked_actions.is_empty()
1398 && self.closed_channel_monitor_update_ids.is_empty()
1399 }
1400
1401 fn total_channel_count(&self) -> usize {
1403 self.channel_by_id.len() + self.inbound_channel_request_by_id.len()
1404 }
1405
1406 fn has_channel(&self, channel_id: &ChannelId) -> bool {
1408 self.channel_by_id.contains_key(channel_id) ||
1409 self.inbound_channel_request_by_id.contains_key(channel_id)
1410 }
1411}
1412
1413#[derive(Clone)]
1414pub(super) enum OpenChannelMessage {
1415 V1(msgs::OpenChannel),
1416 #[cfg(dual_funding)]
1417 V2(msgs::OpenChannelV2),
1418}
1419
1420pub(super) enum OpenChannelMessageRef<'a> {
1421 V1(&'a msgs::OpenChannel),
1422 #[cfg(dual_funding)]
1423 V2(&'a msgs::OpenChannelV2),
1424}
1425
1426pub(super) struct InboundChannelRequest {
1429 pub open_channel_msg: OpenChannelMessage,
1431 pub ticks_remaining: i32,
1433}
1434
1435const UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS: i32 = 2;
1438
1439pub(super) const FEERATE_TRACKING_BLOCKS: usize = 144;
1443
1444#[derive(Debug)]
1453struct PendingInboundPayment {
1454 payment_secret: PaymentSecret,
1456 expiry_time: u64,
1459 user_payment_id: u64,
1461 payment_preimage: Option<PaymentPreimage>,
1463 min_value_msat: Option<u64>,
1464}
1465
1466#[cfg(not(c_bindings))]
1477pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<
1478 Arc<M>,
1479 Arc<T>,
1480 Arc<KeysManager>,
1481 Arc<KeysManager>,
1482 Arc<KeysManager>,
1483 Arc<F>,
1484 Arc<DefaultRouter<
1485 Arc<NetworkGraph<Arc<L>>>,
1486 Arc<L>,
1487 Arc<KeysManager>,
1488 Arc<RwLock<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
1489 ProbabilisticScoringFeeParameters,
1490 ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>,
1491 >>,
1492 Arc<DefaultMessageRouter<
1493 Arc<NetworkGraph<Arc<L>>>,
1494 Arc<L>,
1495 Arc<KeysManager>,
1496 >>,
1497 Arc<L>
1498>;
1499
1500#[cfg(not(c_bindings))]
1512pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, 'i, M, T, F, L> =
1513 ChannelManager<
1514 &'a M,
1515 &'b T,
1516 &'c KeysManager,
1517 &'c KeysManager,
1518 &'c KeysManager,
1519 &'d F,
1520 &'e DefaultRouter<
1521 &'f NetworkGraph<&'g L>,
1522 &'g L,
1523 &'c KeysManager,
1524 &'h RwLock<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>,
1525 ProbabilisticScoringFeeParameters,
1526 ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>
1527 >,
1528 &'i DefaultMessageRouter<
1529 &'f NetworkGraph<&'g L>,
1530 &'g L,
1531 &'c KeysManager,
1532 >,
1533 &'g L
1534 >;
1535
1536pub trait AChannelManager {
1541 type Watch: chain::Watch<Self::Signer> + ?Sized;
1543 type M: Deref<Target = Self::Watch>;
1545 type Broadcaster: BroadcasterInterface + ?Sized;
1547 type T: Deref<Target = Self::Broadcaster>;
1549 type EntropySource: EntropySource + ?Sized;
1551 type ES: Deref<Target = Self::EntropySource>;
1553 type NodeSigner: NodeSigner + ?Sized;
1555 type NS: Deref<Target = Self::NodeSigner>;
1557 type Signer: EcdsaChannelSigner + Sized;
1559 type SignerProvider: SignerProvider<EcdsaSigner= Self::Signer> + ?Sized;
1561 type SP: Deref<Target = Self::SignerProvider>;
1563 type FeeEstimator: FeeEstimator + ?Sized;
1565 type F: Deref<Target = Self::FeeEstimator>;
1567 type Router: Router + ?Sized;
1569 type R: Deref<Target = Self::Router>;
1571 type MessageRouter: MessageRouter + ?Sized;
1573 type MR: Deref<Target = Self::MessageRouter>;
1575 type Logger: Logger + ?Sized;
1577 type L: Deref<Target = Self::Logger>;
1579 fn get_cm(&self) -> &ChannelManager<Self::M, Self::T, Self::ES, Self::NS, Self::SP, Self::F, Self::R, Self::MR, Self::L>;
1581}
1582
1583impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> AChannelManager
1584for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
1585where
1586 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
1587 T::Target: BroadcasterInterface,
1588 ES::Target: EntropySource,
1589 NS::Target: NodeSigner,
1590 SP::Target: SignerProvider,
1591 F::Target: FeeEstimator,
1592 R::Target: Router,
1593 MR::Target: MessageRouter,
1594 L::Target: Logger,
1595{
1596 type Watch = M::Target;
1597 type M = M;
1598 type Broadcaster = T::Target;
1599 type T = T;
1600 type EntropySource = ES::Target;
1601 type ES = ES;
1602 type NodeSigner = NS::Target;
1603 type NS = NS;
1604 type Signer = <SP::Target as SignerProvider>::EcdsaSigner;
1605 type SignerProvider = SP::Target;
1606 type SP = SP;
1607 type FeeEstimator = F::Target;
1608 type F = F;
1609 type Router = R::Target;
1610 type R = R;
1611 type MessageRouter = MR::Target;
1612 type MR = MR;
1613 type Logger = L::Target;
1614 type L = L;
1615 fn get_cm(&self) -> &ChannelManager<M, T, ES, NS, SP, F, R, MR, L> { self }
1616}
1617
1618pub struct ChannelManager<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
2404where
2405 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
2406 T::Target: BroadcasterInterface,
2407 ES::Target: EntropySource,
2408 NS::Target: NodeSigner,
2409 SP::Target: SignerProvider,
2410 F::Target: FeeEstimator,
2411 R::Target: Router,
2412 MR::Target: MessageRouter,
2413 L::Target: Logger,
2414{
2415 default_configuration: UserConfig,
2416 chain_hash: ChainHash,
2417 fee_estimator: LowerBoundedFeeEstimator<F>,
2418 chain_monitor: M,
2419 tx_broadcaster: T,
2420 router: R,
2421 message_router: MR,
2422
2423 #[cfg(test)]
2425 pub(super) best_block: RwLock<BestBlock>,
2426 #[cfg(not(test))]
2427 best_block: RwLock<BestBlock>,
2428 secp_ctx: Secp256k1<secp256k1::All>,
2429
2430 pending_outbound_payments: OutboundPayments,
2443
2444 #[cfg(test)]
2455 pub(super) forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
2456 #[cfg(not(test))]
2457 forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
2458 pending_intercepted_htlcs: Mutex<HashMap<InterceptId, PendingAddHTLCInfo>>,
2463
2464 decode_update_add_htlcs: Mutex<HashMap<u64, Vec<msgs::UpdateAddHTLC>>>,
2475
2476 claimable_payments: Mutex<ClaimablePayments>,
2481
2482 outbound_scid_aliases: Mutex<HashSet<u64>>,
2489
2490 #[cfg(not(test))]
2509 outpoint_to_peer: Mutex<HashMap<OutPoint, PublicKey>>,
2510 #[cfg(test)]
2511 pub(crate) outpoint_to_peer: Mutex<HashMap<OutPoint, PublicKey>>,
2512
2513 #[cfg(test)]
2525 pub(super) short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, ChannelId)>>,
2526 #[cfg(not(test))]
2527 short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, ChannelId)>>,
2528
2529 our_network_pubkey: PublicKey,
2530
2531 inbound_payment_key: inbound_payment::ExpandedKey,
2532
2533 fake_scid_rand_bytes: [u8; 32],
2539
2540 probing_cookie_secret: [u8; 32],
2544
2545 inbound_payment_id_secret: [u8; 32],
2547
2548 highest_seen_timestamp: AtomicUsize,
2552
2553 #[cfg(not(any(test, feature = "_test_utils")))]
2567 per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<SP>>>>,
2568 #[cfg(any(test, feature = "_test_utils"))]
2569 pub(super) per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<SP>>>>,
2570
2571 #[cfg(not(any(test, feature = "_test_utils")))]
2582 pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
2583 #[cfg(any(test, feature = "_test_utils"))]
2584 pub(crate) pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
2585
2586 pending_events_processor: AtomicBool,
2588
2589 pending_background_events: Mutex<Vec<BackgroundEvent>>,
2601 total_consistency_lock: RwLock<()>,
2608 funding_batch_states: Mutex<BTreeMap<Txid, Vec<(ChannelId, PublicKey, bool)>>>,
2614
2615 background_events_processed_since_startup: AtomicBool,
2616
2617 event_persist_notifier: Notifier,
2618 needs_persist_flag: AtomicBool,
2619
2620 #[cfg(not(any(test, feature = "_test_utils")))]
2621 pending_offers_messages: Mutex<Vec<(OffersMessage, MessageSendInstructions)>>,
2622 #[cfg(any(test, feature = "_test_utils"))]
2623 pub(crate) pending_offers_messages: Mutex<Vec<(OffersMessage, MessageSendInstructions)>>,
2624 pending_async_payments_messages: Mutex<Vec<(AsyncPaymentsMessage, MessageSendInstructions)>>,
2625
2626 pending_broadcast_messages: Mutex<Vec<MessageSendEvent>>,
2628
2629 last_days_feerates: Mutex<VecDeque<(u32, u32)>>,
2643
2644 #[cfg(feature = "dnssec")]
2645 hrn_resolver: OMNameResolver,
2646 #[cfg(feature = "dnssec")]
2647 pending_dns_onion_messages: Mutex<Vec<(DNSResolverMessage, MessageSendInstructions)>>,
2648
2649 #[cfg(feature = "_test_utils")]
2650 pub testing_dnssec_proof_offer_resolution_override: Mutex<HashMap<HumanReadableName, Offer>>,
2656
2657 #[cfg(test)]
2658 pub(super) entropy_source: ES,
2659 #[cfg(not(test))]
2660 entropy_source: ES,
2661 node_signer: NS,
2662 #[cfg(test)]
2663 pub(super) signer_provider: SP,
2664 #[cfg(not(test))]
2665 signer_provider: SP,
2666
2667 logger: L,
2668}
2669
2670#[derive(Clone, Copy, PartialEq)]
2676pub struct ChainParameters {
2677 pub network: Network,
2679
2680 pub best_block: BestBlock,
2684}
2685
2686#[derive(Copy, Clone, PartialEq)]
2687#[must_use]
2688enum NotifyOption {
2689 DoPersist,
2690 SkipPersistHandleEvents,
2691 SkipPersistNoEvents,
2692}
2693
2694struct PersistenceNotifierGuard<'a, F: FnMut() -> NotifyOption> {
2705 event_persist_notifier: &'a Notifier,
2706 needs_persist_flag: &'a AtomicBool,
2707 should_persist: F,
2708 _read_guard: RwLockReadGuard<'a, ()>,
2710}
2711
2712impl<'a> PersistenceNotifierGuard<'a, fn() -> NotifyOption> { fn notify_on_drop<C: AChannelManager>(cm: &'a C) -> PersistenceNotifierGuard<'a, impl FnMut() -> NotifyOption> {
2720 Self::optionally_notify(cm, || -> NotifyOption { NotifyOption::DoPersist })
2721 }
2722
2723 fn optionally_notify<F: FnMut() -> NotifyOption, C: AChannelManager>(cm: &'a C, mut persist_check: F)
2724 -> PersistenceNotifierGuard<'a, impl FnMut() -> NotifyOption> {
2725 let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
2726 let force_notify = cm.get_cm().process_background_events();
2727
2728 PersistenceNotifierGuard {
2729 event_persist_notifier: &cm.get_cm().event_persist_notifier,
2730 needs_persist_flag: &cm.get_cm().needs_persist_flag,
2731 should_persist: move || {
2732 let notify = persist_check();
2735 match (notify, force_notify) {
2736 (NotifyOption::DoPersist, _) => NotifyOption::DoPersist,
2737 (_, NotifyOption::DoPersist) => NotifyOption::DoPersist,
2738 (NotifyOption::SkipPersistHandleEvents, _) => NotifyOption::SkipPersistHandleEvents,
2739 (_, NotifyOption::SkipPersistHandleEvents) => NotifyOption::SkipPersistHandleEvents,
2740 _ => NotifyOption::SkipPersistNoEvents,
2741 }
2742 },
2743 _read_guard: read_guard,
2744 }
2745 }
2746
2747 fn optionally_notify_skipping_background_events<F: Fn() -> NotifyOption, C: AChannelManager>
2751 (cm: &'a C, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
2752 let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
2753
2754 PersistenceNotifierGuard {
2755 event_persist_notifier: &cm.get_cm().event_persist_notifier,
2756 needs_persist_flag: &cm.get_cm().needs_persist_flag,
2757 should_persist: persist_check,
2758 _read_guard: read_guard,
2759 }
2760 }
2761}
2762
2763impl<'a, F: FnMut() -> NotifyOption> Drop for PersistenceNotifierGuard<'a, F> {
2764 fn drop(&mut self) {
2765 match (self.should_persist)() {
2766 NotifyOption::DoPersist => {
2767 self.needs_persist_flag.store(true, Ordering::Release);
2768 self.event_persist_notifier.notify()
2769 },
2770 NotifyOption::SkipPersistHandleEvents =>
2771 self.event_persist_notifier.notify(),
2772 NotifyOption::SkipPersistNoEvents => {},
2773 }
2774 }
2775}
2776
2777pub const BREAKDOWN_TIMEOUT: u16 = 6 * 24;
2784pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7;
2787
2788pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6*7;
2799pub(super) const CLTV_FAR_FAR_AWAY: u32 = 14 * 24 * 6;
2806
2807pub const MIN_FINAL_CLTV_EXPIRY_DELTA: u16 = HTLC_FAIL_BACK_BUFFER as u16 + 3;
2814
2815#[allow(dead_code)]
2822const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
2823
2824#[allow(dead_code)]
2827const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
2828
2829pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;
2831
2832pub(crate) const DISABLE_GOSSIP_TICKS: u8 = 10;
2835
2836pub(crate) const ENABLE_GOSSIP_TICKS: u8 = 5;
2839
2840const MAX_UNFUNDED_CHANS_PER_PEER: usize = 4;
2844
2845const MAX_UNFUNDED_CHANNEL_PEERS: usize = 50;
2848
2849const MAX_NO_CHANNEL_PEERS: usize = 250;
2852
2853pub const MAX_SHORT_LIVED_RELATIVE_EXPIRY: Duration = Duration::from_secs(60 * 60 * 24);
2865
2866#[derive(Debug, PartialEq)]
2869pub enum RecentPaymentDetails {
2870 AwaitingInvoice {
2872 payment_id: PaymentId,
2875 },
2876 Pending {
2878 payment_id: PaymentId,
2884 payment_hash: PaymentHash,
2887 total_msat: u64,
2890 },
2891 Fulfilled {
2895 payment_id: PaymentId,
2901 payment_hash: Option<PaymentHash>,
2904 },
2905 Abandoned {
2909 payment_id: PaymentId,
2915 payment_hash: PaymentHash,
2917 },
2918}
2919
2920#[derive(Clone)]
2924pub struct PhantomRouteHints {
2925 pub channels: Vec<ChannelDetails>,
2927 pub phantom_scid: u64,
2930 pub real_node_pubkey: PublicKey,
2932}
2933
2934macro_rules! handle_error {
2935 ($self: ident, $internal: expr, $counterparty_node_id: expr) => { {
2936 debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
2939 debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
2940
2941 match $internal {
2942 Ok(msg) => Ok(msg),
2943 Err(MsgHandleErrInternal { err, shutdown_finish, .. }) => {
2944 let mut msg_event = None;
2945
2946 if let Some((shutdown_res, update_option)) = shutdown_finish {
2947 let counterparty_node_id = shutdown_res.counterparty_node_id;
2948 let channel_id = shutdown_res.channel_id;
2949 let logger = WithContext::from(
2950 &$self.logger, Some(counterparty_node_id), Some(channel_id), None
2951 );
2952 log_error!(logger, "Force-closing channel: {}", err.err);
2953
2954 $self.finish_close_channel(shutdown_res);
2955 if let Some(update) = update_option {
2956 let mut pending_broadcast_messages = $self.pending_broadcast_messages.lock().unwrap();
2957 pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
2958 msg: update
2959 });
2960 }
2961 } else {
2962 log_error!($self.logger, "Got non-closing error: {}", err.err);
2963 }
2964
2965 if let msgs::ErrorAction::IgnoreError = err.action {
2966 } else {
2967 msg_event = Some(events::MessageSendEvent::HandleError {
2968 node_id: $counterparty_node_id,
2969 action: err.action.clone()
2970 });
2971 }
2972
2973 if let Some(msg_event) = msg_event {
2974 let per_peer_state = $self.per_peer_state.read().unwrap();
2975 if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) {
2976 let mut peer_state = peer_state_mutex.lock().unwrap();
2977 peer_state.pending_msg_events.push(msg_event);
2978 }
2979 }
2980
2981 Err(err)
2983 },
2984 }
2985 } };
2986}
2987
2988macro_rules! locked_close_channel {
2996 ($self: ident, $peer_state: expr, $channel_context: expr, $shutdown_res_mut: expr) => {{
2997 if let Some((_, funding_txo, _, update)) = $shutdown_res_mut.monitor_update.take() {
2998 handle_new_monitor_update!($self, funding_txo, update, $peer_state,
2999 $channel_context, REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER);
3000 }
3001 let update_id = $channel_context.get_latest_monitor_update_id();
3006 if $channel_context.get_funding_tx_confirmation_height().is_some() || $channel_context.minimum_depth() == Some(0) || update_id > 1 {
3007 let chan_id = $channel_context.channel_id();
3008 $peer_state.closed_channel_monitor_update_ids.insert(chan_id, update_id);
3009 }
3010 if let Some(outpoint) = $channel_context.get_funding_txo() {
3011 $self.outpoint_to_peer.lock().unwrap().remove(&outpoint);
3012 }
3013 let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
3014 if let Some(short_id) = $channel_context.get_short_channel_id() {
3015 short_to_chan_info.remove(&short_id);
3016 } else {
3017 let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel_context.outbound_scid_alias());
3024 debug_assert!(alias_removed);
3025 }
3026 short_to_chan_info.remove(&$channel_context.outbound_scid_alias());
3027 }}
3028}
3029
3030macro_rules! convert_chan_phase_err {
3032 ($self: ident, $peer_state: expr, $err: expr, $channel: expr, $channel_id: expr, MANUAL_CHANNEL_UPDATE, $channel_update: expr) => {
3033 match $err {
3034 ChannelError::Warn(msg) => {
3035 (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), *$channel_id))
3036 },
3037 ChannelError::Ignore(msg) => {
3038 (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), *$channel_id))
3039 },
3040 ChannelError::Close((msg, reason)) => {
3041 let logger = WithChannelContext::from(&$self.logger, &$channel.context, None);
3042 log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
3043 let mut shutdown_res = $channel.context.force_shutdown(true, reason);
3044 locked_close_channel!($self, $peer_state, &$channel.context, &mut shutdown_res);
3045 let err =
3046 MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $channel_update);
3047 (true, err)
3048 },
3049 }
3050 };
3051 ($self: ident, $peer_state: expr, $err: expr, $channel: expr, $channel_id: expr, FUNDED_CHANNEL) => {
3052 convert_chan_phase_err!($self, $peer_state, $err, $channel, $channel_id, MANUAL_CHANNEL_UPDATE, { $self.get_channel_update_for_broadcast($channel).ok() })
3053 };
3054 ($self: ident, $peer_state: expr, $err: expr, $channel: expr, $channel_id: expr, UNFUNDED_CHANNEL) => {
3055 convert_chan_phase_err!($self, $peer_state, $err, $channel, $channel_id, MANUAL_CHANNEL_UPDATE, None)
3056 };
3057 ($self: ident, $peer_state: expr, $err: expr, $channel_phase: expr, $channel_id: expr) => {
3058 match $channel_phase {
3059 ChannelPhase::Funded(channel) => {
3060 convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, FUNDED_CHANNEL)
3061 },
3062 ChannelPhase::UnfundedOutboundV1(channel) => {
3063 convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, UNFUNDED_CHANNEL)
3064 },
3065 ChannelPhase::UnfundedInboundV1(channel) => {
3066 convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, UNFUNDED_CHANNEL)
3067 },
3068 ChannelPhase::UnfundedOutboundV2(channel) => {
3069 convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, UNFUNDED_CHANNEL)
3070 },
3071 ChannelPhase::UnfundedInboundV2(channel) => {
3072 convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, UNFUNDED_CHANNEL)
3073 },
3074 }
3075 };
3076}
3077
3078macro_rules! break_chan_phase_entry {
3079 ($self: ident, $peer_state: expr, $res: expr, $entry: expr) => {
3080 match $res {
3081 Ok(res) => res,
3082 Err(e) => {
3083 let key = *$entry.key();
3084 let (drop, res) = convert_chan_phase_err!($self, $peer_state, e, $entry.get_mut(), &key);
3085 if drop {
3086 $entry.remove_entry();
3087 }
3088 break Err(res);
3089 }
3090 }
3091 }
3092}
3093
3094macro_rules! try_chan_phase_entry {
3095 ($self: ident, $peer_state: expr, $res: expr, $entry: expr) => {
3096 match $res {
3097 Ok(res) => res,
3098 Err(e) => {
3099 let key = *$entry.key();
3100 let (drop, res) = convert_chan_phase_err!($self, $peer_state, e, $entry.get_mut(), &key);
3101 if drop {
3102 $entry.remove_entry();
3103 }
3104 return Err(res);
3105 }
3106 }
3107 }
3108}
3109
3110macro_rules! remove_channel_phase {
3111 ($self: ident, $peer_state: expr, $entry: expr, $shutdown_res_mut: expr) => {
3112 {
3113 let channel = $entry.remove_entry().1;
3114 locked_close_channel!($self, $peer_state, &channel.context(), $shutdown_res_mut);
3115 channel
3116 }
3117 }
3118}
3119
3120macro_rules! send_channel_ready {
3121 ($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{
3122 $pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
3123 node_id: $channel.context.get_counterparty_node_id(),
3124 msg: $channel_ready_msg,
3125 });
3126 let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
3129 let outbound_alias_insert = short_to_chan_info.insert($channel.context.outbound_scid_alias(), ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()));
3130 assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()),
3131 "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
3132 if let Some(real_scid) = $channel.context.get_short_channel_id() {
3133 let scid_insert = short_to_chan_info.insert(real_scid, ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()));
3134 assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()),
3135 "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
3136 }
3137 }}
3138}
3139macro_rules! emit_funding_tx_broadcast_safe_event {
3140 ($locked_events: expr, $channel: expr, $funding_txo: expr) => {
3141 if !$channel.context.funding_tx_broadcast_safe_event_emitted() {
3142 $locked_events.push_back((events::Event::FundingTxBroadcastSafe {
3143 channel_id: $channel.context.channel_id(),
3144 user_channel_id: $channel.context.get_user_id(),
3145 funding_txo: $funding_txo,
3146 counterparty_node_id: $channel.context.get_counterparty_node_id(),
3147 former_temporary_channel_id: $channel.context.temporary_channel_id()
3148 .expect("Unreachable: FundingTxBroadcastSafe event feature added to channel establishment process in LDK v0.0.124 where this should never be None."),
3149 }, None));
3150 $channel.context.set_funding_tx_broadcast_safe_event_emitted();
3151 }
3152 }
3153}
3154
3155macro_rules! emit_channel_pending_event {
3156 ($locked_events: expr, $channel: expr) => {
3157 if $channel.context.should_emit_channel_pending_event() {
3158 $locked_events.push_back((events::Event::ChannelPending {
3159 channel_id: $channel.context.channel_id(),
3160 former_temporary_channel_id: $channel.context.temporary_channel_id(),
3161 counterparty_node_id: $channel.context.get_counterparty_node_id(),
3162 user_channel_id: $channel.context.get_user_id(),
3163 funding_txo: $channel.context.get_funding_txo().unwrap().into_bitcoin_outpoint(),
3164 channel_type: Some($channel.context.get_channel_type().clone()),
3165 }, None));
3166 $channel.context.set_channel_pending_event_emitted();
3167 }
3168 }
3169}
3170
3171macro_rules! emit_channel_ready_event {
3172 ($locked_events: expr, $channel: expr) => {
3173 if $channel.context.should_emit_channel_ready_event() {
3174 debug_assert!($channel.context.channel_pending_event_emitted());
3175 $locked_events.push_back((events::Event::ChannelReady {
3176 channel_id: $channel.context.channel_id(),
3177 user_channel_id: $channel.context.get_user_id(),
3178 counterparty_node_id: $channel.context.get_counterparty_node_id(),
3179 channel_type: $channel.context.get_channel_type().clone(),
3180 }, None));
3181 $channel.context.set_channel_ready_event_emitted();
3182 }
3183 }
3184}
3185
3186macro_rules! handle_monitor_update_completion {
3187 ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
3188 let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3189 let mut updates = $chan.monitor_updating_restored(&&logger,
3190 &$self.node_signer, $self.chain_hash, &$self.default_configuration,
3191 $self.best_block.read().unwrap().height);
3192 let counterparty_node_id = $chan.context.get_counterparty_node_id();
3193 let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() {
3194 if let Ok(msg) = $self.get_channel_update_for_unicast($chan) {
3200 Some(events::MessageSendEvent::SendChannelUpdate {
3201 node_id: counterparty_node_id,
3202 msg,
3203 })
3204 } else { None }
3205 } else { None };
3206
3207 let update_actions = $peer_state.monitor_update_blocked_actions
3208 .remove(&$chan.context.channel_id()).unwrap_or(Vec::new());
3209
3210 let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption(
3211 &mut $peer_state.pending_msg_events, $chan, updates.raa,
3212 updates.commitment_update, updates.order, updates.accepted_htlcs, updates.pending_update_adds,
3213 updates.funding_broadcastable, updates.channel_ready,
3214 updates.announcement_sigs, updates.tx_signatures);
3215 if let Some(upd) = channel_update {
3216 $peer_state.pending_msg_events.push(upd);
3217 }
3218
3219 let channel_id = $chan.context.channel_id();
3220 let unbroadcasted_batch_funding_txid = $chan.context.unbroadcasted_batch_funding_txid();
3221 core::mem::drop($peer_state_lock);
3222 core::mem::drop($per_peer_state_lock);
3223
3224 if let Some(txid) = unbroadcasted_batch_funding_txid {
3227 let mut funding_batch_states = $self.funding_batch_states.lock().unwrap();
3228 let mut batch_completed = false;
3229 if let Some(batch_state) = funding_batch_states.get_mut(&txid) {
3230 let channel_state = batch_state.iter_mut().find(|(chan_id, pubkey, _)| (
3231 *chan_id == channel_id &&
3232 *pubkey == counterparty_node_id
3233 ));
3234 if let Some(channel_state) = channel_state {
3235 channel_state.2 = true;
3236 } else {
3237 debug_assert!(false, "Missing channel batch state for channel which completed initial monitor update");
3238 }
3239 batch_completed = batch_state.iter().all(|(_, _, completed)| *completed);
3240 } else {
3241 debug_assert!(false, "Missing batch state for channel which completed initial monitor update");
3242 }
3243
3244 if batch_completed {
3247 let removed_batch_state = funding_batch_states.remove(&txid).into_iter().flatten();
3248 let per_peer_state = $self.per_peer_state.read().unwrap();
3249 let mut batch_funding_tx = None;
3250 for (channel_id, counterparty_node_id, _) in removed_batch_state {
3251 if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
3252 let mut peer_state = peer_state_mutex.lock().unwrap();
3253 if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
3254 batch_funding_tx = batch_funding_tx.or_else(|| chan.context.unbroadcasted_funding());
3255 chan.set_batch_ready();
3256 let mut pending_events = $self.pending_events.lock().unwrap();
3257 emit_channel_pending_event!(pending_events, chan);
3258 }
3259 }
3260 }
3261 if let Some(tx) = batch_funding_tx {
3262 log_info!($self.logger, "Broadcasting batch funding transaction with txid {}", tx.compute_txid());
3263 $self.tx_broadcaster.broadcast_transactions(&[&tx]);
3264 }
3265 }
3266 }
3267
3268 $self.handle_monitor_update_completion_actions(update_actions);
3269
3270 if let Some(forwards) = htlc_forwards {
3271 $self.forward_htlcs(&mut [forwards][..]);
3272 }
3273 if let Some(decode) = decode_update_add_htlcs {
3274 $self.push_decode_update_add_htlcs(decode);
3275 }
3276 $self.finalize_claims(updates.finalized_claimed_htlcs);
3277 for failure in updates.failed_htlcs.drain(..) {
3278 let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
3279 $self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver);
3280 }
3281 } }
3282}
3283
3284macro_rules! handle_new_monitor_update {
3285 ($self: ident, $update_res: expr, $logger: expr, $channel_id: expr, _internal, $completed: expr) => { {
3286 debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire));
3287 match $update_res {
3288 ChannelMonitorUpdateStatus::UnrecoverableError => {
3289 let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
3290 log_error!($logger, "{}", err_str);
3291 panic!("{}", err_str);
3292 },
3293 ChannelMonitorUpdateStatus::InProgress => {
3294 log_debug!($logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
3295 $channel_id);
3296 false
3297 },
3298 ChannelMonitorUpdateStatus::Completed => {
3299 $completed;
3300 true
3301 },
3302 }
3303 } };
3304 ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, INITIAL_MONITOR) => {
3305 let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3306 handle_new_monitor_update!($self, $update_res, logger, $chan.context.channel_id(), _internal,
3307 handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan))
3308 };
3309 (
3310 $self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $logger: expr,
3311 $chan_id: expr, $counterparty_node_id: expr, $in_flight_updates: ident, $update_idx: ident,
3312 _internal_outer, $completed: expr
3313 ) => { {
3314 $in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
3315 .or_insert_with(Vec::new);
3316 $update_idx = $in_flight_updates.iter().position(|upd| upd == &$update)
3320 .unwrap_or_else(|| {
3321 $in_flight_updates.push($update);
3322 $in_flight_updates.len() - 1
3323 });
3324 if $self.background_events_processed_since_startup.load(Ordering::Acquire) {
3325 let update_res = $self.chain_monitor.update_channel($funding_txo, &$in_flight_updates[$update_idx]);
3326 handle_new_monitor_update!($self, update_res, $logger, $chan_id, _internal, $completed)
3327 } else {
3328 let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
3332 counterparty_node_id: $counterparty_node_id,
3333 funding_txo: $funding_txo,
3334 channel_id: $chan_id,
3335 update: $in_flight_updates[$update_idx].clone(),
3336 };
3337 $self.pending_background_events.lock().unwrap().push(event);
3346 false
3347 }
3348 } };
3349 (
3350 $self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $chan_context: expr,
3351 REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER
3352 ) => { {
3353 let logger = WithChannelContext::from(&$self.logger, &$chan_context, None);
3354 let chan_id = $chan_context.channel_id();
3355 let counterparty_node_id = $chan_context.get_counterparty_node_id();
3356 let in_flight_updates;
3357 let idx;
3358 handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
3359 counterparty_node_id, in_flight_updates, idx, _internal_outer,
3360 {
3361 let _ = in_flight_updates.remove(idx);
3362 })
3363 } };
3364 (
3365 $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
3366 $per_peer_state_lock: expr, $counterparty_node_id: expr, $channel_id: expr, POST_CHANNEL_CLOSE
3367 ) => { {
3368 let logger = WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None);
3369 let in_flight_updates;
3370 let idx;
3371 handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger,
3372 $channel_id, $counterparty_node_id, in_flight_updates, idx, _internal_outer,
3373 {
3374 let _ = in_flight_updates.remove(idx);
3375 if in_flight_updates.is_empty() {
3376 let update_actions = $peer_state.monitor_update_blocked_actions
3377 .remove(&$channel_id).unwrap_or(Vec::new());
3378
3379 mem::drop($peer_state_lock);
3380 mem::drop($per_peer_state_lock);
3381
3382 $self.handle_monitor_update_completion_actions(update_actions);
3383 }
3384 })
3385 } };
3386 (
3387 $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
3388 $per_peer_state_lock: expr, $chan: expr
3389 ) => { {
3390 let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3391 let chan_id = $chan.context.channel_id();
3392 let counterparty_node_id = $chan.context.get_counterparty_node_id();
3393 let in_flight_updates;
3394 let idx;
3395 handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
3396 counterparty_node_id, in_flight_updates, idx, _internal_outer,
3397 {
3398 let _ = in_flight_updates.remove(idx);
3399 if in_flight_updates.is_empty() && $chan.blocked_monitor_updates_pending() == 0 {
3400 handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan);
3401 }
3402 })
3403 } };
3404}
3405
3406macro_rules! process_events_body {
3407 ($self: expr, $event_to_handle: expr, $handle_event: expr) => {
3408 let mut handling_failed = false;
3409 let mut processed_all_events = false;
3410 while !handling_failed && !processed_all_events {
3411 if $self.pending_events_processor.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed).is_err() {
3412 return;
3413 }
3414
3415 let mut result;
3416
3417 {
3418 let _read_guard = $self.total_consistency_lock.read().unwrap();
3421
3422 result = $self.process_background_events();
3425
3426 if $self.process_pending_monitor_events() {
3429 result = NotifyOption::DoPersist;
3430 }
3431 }
3432
3433 let pending_events = $self.pending_events.lock().unwrap().clone();
3434 if !pending_events.is_empty() {
3435 result = NotifyOption::DoPersist;
3436 }
3437
3438 let mut post_event_actions = Vec::new();
3439
3440 let mut num_handled_events = 0;
3441 for (event, action_opt) in pending_events {
3442 log_trace!($self.logger, "Handling event {:?}...", event);
3443 $event_to_handle = event;
3444 let event_handling_result = $handle_event;
3445 log_trace!($self.logger, "Done handling event, result: {:?}", event_handling_result);
3446 match event_handling_result {
3447 Ok(()) => {
3448 if let Some(action) = action_opt {
3449 post_event_actions.push(action);
3450 }
3451 num_handled_events += 1;
3452 }
3453 Err(_e) => {
3454 handling_failed = true;
3457 break;
3458 }
3459 }
3460 }
3461
3462 {
3463 let mut pending_events = $self.pending_events.lock().unwrap();
3464 pending_events.drain(..num_handled_events);
3465 processed_all_events = pending_events.is_empty();
3466 $self.pending_events_processor.store(false, Ordering::Release);
3469 }
3470
3471 if !post_event_actions.is_empty() {
3472 $self.handle_post_event_actions(post_event_actions);
3473 processed_all_events = false;
3475 }
3476
3477 match result {
3478 NotifyOption::DoPersist => {
3479 $self.needs_persist_flag.store(true, Ordering::Release);
3480 $self.event_persist_notifier.notify();
3481 },
3482 NotifyOption::SkipPersistHandleEvents =>
3483 $self.event_persist_notifier.notify(),
3484 NotifyOption::SkipPersistNoEvents => {},
3485 }
3486 }
3487 }
3488}
3489
3490impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
3491where
3492 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
3493 T::Target: BroadcasterInterface,
3494 ES::Target: EntropySource,
3495 NS::Target: NodeSigner,
3496 SP::Target: SignerProvider,
3497 F::Target: FeeEstimator,
3498 R::Target: Router,
3499 MR::Target: MessageRouter,
3500 L::Target: Logger,
3501{
3502 pub fn new(
3520 fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, message_router: MR, logger: L,
3521 entropy_source: ES, node_signer: NS, signer_provider: SP, config: UserConfig,
3522 params: ChainParameters, current_timestamp: u32,
3523 ) -> Self {
3524 let mut secp_ctx = Secp256k1::new();
3525 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
3526 let expanded_inbound_key = node_signer.get_inbound_payment_key();
3527 ChannelManager {
3528 default_configuration: config.clone(),
3529 chain_hash: ChainHash::using_genesis_block(params.network),
3530 fee_estimator: LowerBoundedFeeEstimator::new(fee_est),
3531 chain_monitor,
3532 tx_broadcaster,
3533 router,
3534 message_router,
3535
3536 best_block: RwLock::new(params.best_block),
3537
3538 outbound_scid_aliases: Mutex::new(new_hash_set()),
3539 pending_outbound_payments: OutboundPayments::new(new_hash_map()),
3540 forward_htlcs: Mutex::new(new_hash_map()),
3541 decode_update_add_htlcs: Mutex::new(new_hash_map()),
3542 claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }),
3543 pending_intercepted_htlcs: Mutex::new(new_hash_map()),
3544 outpoint_to_peer: Mutex::new(new_hash_map()),
3545 short_to_chan_info: FairRwLock::new(new_hash_map()),
3546
3547 our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(),
3548 secp_ctx,
3549
3550 inbound_payment_key: expanded_inbound_key,
3551 fake_scid_rand_bytes: entropy_source.get_secure_random_bytes(),
3552
3553 probing_cookie_secret: entropy_source.get_secure_random_bytes(),
3554 inbound_payment_id_secret: entropy_source.get_secure_random_bytes(),
3555
3556 highest_seen_timestamp: AtomicUsize::new(current_timestamp as usize),
3557
3558 per_peer_state: FairRwLock::new(new_hash_map()),
3559
3560 pending_events: Mutex::new(VecDeque::new()),
3561 pending_events_processor: AtomicBool::new(false),
3562 pending_background_events: Mutex::new(Vec::new()),
3563 total_consistency_lock: RwLock::new(()),
3564 background_events_processed_since_startup: AtomicBool::new(false),
3565 event_persist_notifier: Notifier::new(),
3566 needs_persist_flag: AtomicBool::new(false),
3567 funding_batch_states: Mutex::new(BTreeMap::new()),
3568
3569 pending_offers_messages: Mutex::new(Vec::new()),
3570 pending_async_payments_messages: Mutex::new(Vec::new()),
3571 pending_broadcast_messages: Mutex::new(Vec::new()),
3572
3573 last_days_feerates: Mutex::new(VecDeque::new()),
3574
3575 entropy_source,
3576 node_signer,
3577 signer_provider,
3578
3579 logger,
3580
3581 #[cfg(feature = "dnssec")]
3582 hrn_resolver: OMNameResolver::new(current_timestamp, params.best_block.height),
3583 #[cfg(feature = "dnssec")]
3584 pending_dns_onion_messages: Mutex::new(Vec::new()),
3585
3586 #[cfg(feature = "_test_utils")]
3587 testing_dnssec_proof_offer_resolution_override: Mutex::new(new_hash_map()),
3588 }
3589 }
3590
3591 pub fn get_current_default_configuration(&self) -> &UserConfig {
3593 &self.default_configuration
3594 }
3595
3596 #[cfg(test)]
3597 pub fn create_and_insert_outbound_scid_alias_for_test(&self) -> u64 {
3598 self.create_and_insert_outbound_scid_alias()
3599 }
3600
3601 fn create_and_insert_outbound_scid_alias(&self) -> u64 {
3602 let height = self.best_block.read().unwrap().height;
3603 let mut outbound_scid_alias = 0;
3604 let mut i = 0;
3605 loop {
3606 if cfg!(fuzzing) { outbound_scid_alias += 1;
3608 } else {
3609 outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
3610 }
3611 if outbound_scid_alias != 0 && self.outbound_scid_aliases.lock().unwrap().insert(outbound_scid_alias) {
3612 break;
3613 }
3614 i += 1;
3615 if i > 1_000_000 { panic!("Your RNG is busted or we ran out of possible outbound SCID aliases (which should never happen before we run out of memory to store channels"); }
3616 }
3617 outbound_scid_alias
3618 }
3619
3620 pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u128, temporary_channel_id: Option<ChannelId>, override_config: Option<UserConfig>) -> Result<ChannelId, APIError> {
3653 if channel_value_satoshis < 1000 {
3654 return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
3655 }
3656
3657 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
3658 debug_assert!(&self.total_consistency_lock.try_write().is_err());
3660
3661 let per_peer_state = self.per_peer_state.read().unwrap();
3662
3663 let peer_state_mutex = per_peer_state.get(&their_network_key)
3664 .ok_or_else(|| APIError::APIMisuseError{ err: format!("Not connected to node: {}", their_network_key) })?;
3665
3666 let mut peer_state = peer_state_mutex.lock().unwrap();
3667
3668 if let Some(temporary_channel_id) = temporary_channel_id {
3669 if peer_state.channel_by_id.contains_key(&temporary_channel_id) {
3670 return Err(APIError::APIMisuseError{ err: format!("Channel with temporary channel ID {} already exists!", temporary_channel_id)});
3671 }
3672 }
3673
3674 let mut channel = {
3675 let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
3676 let their_features = &peer_state.latest_features;
3677 let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
3678 match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key,
3679 their_features, channel_value_satoshis, push_msat, user_channel_id, config,
3680 self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id, &*self.logger)
3681 {
3682 Ok(res) => res,
3683 Err(e) => {
3684 self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
3685 return Err(e);
3686 },
3687 }
3688 };
3689 let logger = WithChannelContext::from(&self.logger, &channel.context, None);
3690 let res = channel.get_open_channel(self.chain_hash, &&logger);
3691
3692 let temporary_channel_id = channel.context.channel_id();
3693 match peer_state.channel_by_id.entry(temporary_channel_id) {
3694 hash_map::Entry::Occupied(_) => {
3695 if cfg!(fuzzing) {
3696 return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
3697 } else {
3698 panic!("RNG is bad???");
3699 }
3700 },
3701 hash_map::Entry::Vacant(entry) => { entry.insert(ChannelPhase::UnfundedOutboundV1(channel)); }
3702 }
3703
3704 if let Some(msg) = res {
3705 peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
3706 node_id: their_network_key,
3707 msg,
3708 });
3709 }
3710 Ok(temporary_channel_id)
3711 }
3712
3713 fn list_funded_channels_with_filter<Fn: FnMut(&(&ChannelId, &Channel<SP>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
3714 let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
3721 {
3722 let best_block_height = self.best_block.read().unwrap().height;
3723 let per_peer_state = self.per_peer_state.read().unwrap();
3724 for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
3725 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3726 let peer_state = &mut *peer_state_lock;
3727 res.extend(peer_state.channel_by_id.iter()
3728 .filter_map(|(chan_id, phase)| match phase {
3729 ChannelPhase::Funded(chan) => Some((chan_id, chan)),
3731 _ => None,
3732 })
3733 .filter(f)
3734 .map(|(_channel_id, channel)| {
3735 ChannelDetails::from_channel_context(&channel.context, best_block_height,
3736 peer_state.latest_features.clone(), &self.fee_estimator)
3737 })
3738 );
3739 }
3740 }
3741 res
3742 }
3743
3744 pub fn list_channels(&self) -> Vec<ChannelDetails> {
3747 let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
3754 {
3755 let best_block_height = self.best_block.read().unwrap().height;
3756 let per_peer_state = self.per_peer_state.read().unwrap();
3757 for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
3758 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3759 let peer_state = &mut *peer_state_lock;
3760 for context in peer_state.channel_by_id.iter().map(|(_, phase)| phase.context()) {
3761 let details = ChannelDetails::from_channel_context(context, best_block_height,
3762 peer_state.latest_features.clone(), &self.fee_estimator);
3763 res.push(details);
3764 }
3765 }
3766 }
3767 res
3768 }
3769
3770 pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
3777 self.list_funded_channels_with_filter(|&(_, ref channel)| channel.context.is_live())
3781 }
3782
3783 pub fn list_channels_with_counterparty(&self, counterparty_node_id: &PublicKey) -> Vec<ChannelDetails> {
3785 let best_block_height = self.best_block.read().unwrap().height;
3786 let per_peer_state = self.per_peer_state.read().unwrap();
3787
3788 if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
3789 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3790 let peer_state = &mut *peer_state_lock;
3791 let features = &peer_state.latest_features;
3792 let context_to_details = |context| {
3793 ChannelDetails::from_channel_context(context, best_block_height, features.clone(), &self.fee_estimator)
3794 };
3795 return peer_state.channel_by_id
3796 .iter()
3797 .map(|(_, phase)| phase.context())
3798 .map(context_to_details)
3799 .collect();
3800 }
3801 vec![]
3802 }
3803
3804 pub fn list_recent_payments(&self) -> Vec<RecentPaymentDetails> {
3813 self.pending_outbound_payments.pending_outbound_payments.lock().unwrap().iter()
3814 .filter_map(|(payment_id, pending_outbound_payment)| match pending_outbound_payment {
3815 PendingOutboundPayment::AwaitingInvoice { .. }
3816 | PendingOutboundPayment::AwaitingOffer { .. }
3817 | PendingOutboundPayment::InvoiceReceived { .. } =>
3819 {
3820 Some(RecentPaymentDetails::AwaitingInvoice { payment_id: *payment_id })
3821 },
3822 PendingOutboundPayment::StaticInvoiceReceived { .. } => {
3823 Some(RecentPaymentDetails::AwaitingInvoice { payment_id: *payment_id })
3824 },
3825 PendingOutboundPayment::Retryable { payment_hash, total_msat, .. } => {
3826 Some(RecentPaymentDetails::Pending {
3827 payment_id: *payment_id,
3828 payment_hash: *payment_hash,
3829 total_msat: *total_msat,
3830 })
3831 },
3832 PendingOutboundPayment::Abandoned { payment_hash, .. } => {
3833 Some(RecentPaymentDetails::Abandoned { payment_id: *payment_id, payment_hash: *payment_hash })
3834 },
3835 PendingOutboundPayment::Fulfilled { payment_hash, .. } => {
3836 Some(RecentPaymentDetails::Fulfilled { payment_id: *payment_id, payment_hash: *payment_hash })
3837 },
3838 PendingOutboundPayment::Legacy { .. } => None
3839 })
3840 .collect()
3841 }
3842
3843 fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
3844 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
3845
3846 let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new();
3847 let mut shutdown_result = None;
3848
3849 {
3850 let per_peer_state = self.per_peer_state.read().unwrap();
3851
3852 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
3853 .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
3854
3855 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3856 let peer_state = &mut *peer_state_lock;
3857
3858 match peer_state.channel_by_id.entry(channel_id.clone()) {
3859 hash_map::Entry::Occupied(mut chan_phase_entry) => {
3860 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
3861 let funding_txo_opt = chan.context.get_funding_txo();
3862 let their_features = &peer_state.latest_features;
3863 let (shutdown_msg, mut monitor_update_opt, htlcs) =
3864 chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
3865 failed_htlcs = htlcs;
3866
3867 peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
3871 node_id: *counterparty_node_id,
3872 msg: shutdown_msg,
3873 });
3874
3875 debug_assert!(monitor_update_opt.is_none() || !chan.is_shutdown(),
3876 "We can't both complete shutdown and generate a monitor update");
3877
3878 if let Some(monitor_update) = monitor_update_opt.take() {
3880 handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
3881 peer_state_lock, peer_state, per_peer_state, chan);
3882 }
3883 } else {
3884 let mut shutdown_res = chan_phase_entry.get_mut().context_mut()
3885 .force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) });
3886 remove_channel_phase!(self, peer_state, chan_phase_entry, shutdown_res);
3887 shutdown_result = Some(shutdown_res);
3888 }
3889 },
3890 hash_map::Entry::Vacant(_) => {
3891 return Err(APIError::ChannelUnavailable {
3892 err: format!(
3893 "Channel with id {} not found for the passed counterparty node_id {}",
3894 channel_id, counterparty_node_id,
3895 )
3896 });
3897 },
3898 }
3899 }
3900
3901 for htlc_source in failed_htlcs.drain(..) {
3902 let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
3903 let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id };
3904 self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
3905 }
3906
3907 if let Some(shutdown_result) = shutdown_result {
3908 self.finish_close_channel(shutdown_result);
3909 }
3910
3911 Ok(())
3912 }
3913
3914 pub fn close_channel(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey) -> Result<(), APIError> {
3938 self.close_channel_internal(channel_id, counterparty_node_id, None, None)
3939 }
3940
3941 pub fn close_channel_with_feerate_and_script(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
3971 self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
3972 }
3973
3974 fn apply_post_close_monitor_update(
3976 &self, counterparty_node_id: PublicKey, channel_id: ChannelId, funding_txo: OutPoint,
3977 monitor_update: ChannelMonitorUpdate,
3978 ) {
3979 let per_peer_state = self.per_peer_state.read().unwrap();
3982 let mut peer_state_lock = per_peer_state.get(&counterparty_node_id)
3983 .expect("We must always have a peer entry for a peer with which we have channels that have ChannelMonitors")
3984 .lock().unwrap();
3985 let peer_state = &mut *peer_state_lock;
3986 match peer_state.channel_by_id.entry(channel_id) {
3987 hash_map::Entry::Occupied(mut chan_phase) => {
3988 if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
3989 handle_new_monitor_update!(self, funding_txo,
3990 monitor_update, peer_state_lock, peer_state, per_peer_state, chan);
3991 return;
3992 } else {
3993 debug_assert!(false, "We shouldn't have an update for a non-funded channel");
3994 }
3995 },
3996 hash_map::Entry::Vacant(_) => {},
3997 }
3998
3999 handle_new_monitor_update!(
4000 self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
4001 counterparty_node_id, channel_id, POST_CHANNEL_CLOSE
4002 );
4003 }
4004
4005 fn finish_close_channel(&self, mut shutdown_res: ShutdownResult) {
4011 debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
4012 #[cfg(debug_assertions)]
4013 for (_, peer) in self.per_peer_state.read().unwrap().iter() {
4014 debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
4015 }
4016
4017 let logger = WithContext::from(
4018 &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id), None
4019 );
4020
4021 log_debug!(logger, "Finishing closure of channel due to {} with {} HTLCs to fail",
4022 shutdown_res.closure_reason, shutdown_res.dropped_outbound_htlcs.len());
4023 for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
4024 let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
4025 let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
4026 let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
4027 self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
4028 }
4029 if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
4030 debug_assert!(false, "This should have been handled in `locked_close_channel`");
4031 self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
4032 }
4033 if self.background_events_processed_since_startup.load(Ordering::Acquire) {
4034 if let Some(funding_txo) = shutdown_res.channel_funding_txo {
4040 let per_peer_state = self.per_peer_state.read().unwrap();
4041 if let Some(peer_state_mtx) = per_peer_state.get(&shutdown_res.counterparty_node_id) {
4042 let mut peer_state = peer_state_mtx.lock().unwrap();
4043 if peer_state.in_flight_monitor_updates.get(&funding_txo).map(|l| l.is_empty()).unwrap_or(true) {
4044 let update_actions = peer_state.monitor_update_blocked_actions
4045 .remove(&shutdown_res.channel_id).unwrap_or(Vec::new());
4046
4047 mem::drop(peer_state);
4048 mem::drop(per_peer_state);
4049
4050 self.handle_monitor_update_completion_actions(update_actions);
4051 }
4052 }
4053 }
4054 }
4055 let mut shutdown_results = Vec::new();
4056 if let Some(txid) = shutdown_res.unbroadcasted_batch_funding_txid {
4057 let mut funding_batch_states = self.funding_batch_states.lock().unwrap();
4058 let affected_channels = funding_batch_states.remove(&txid).into_iter().flatten();
4059 let per_peer_state = self.per_peer_state.read().unwrap();
4060 let mut has_uncompleted_channel = None;
4061 for (channel_id, counterparty_node_id, state) in affected_channels {
4062 if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
4063 let mut peer_state = peer_state_mutex.lock().unwrap();
4064 if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) {
4065 let mut close_res = chan.context_mut().force_shutdown(false, ClosureReason::FundingBatchClosure);
4066 locked_close_channel!(self, &mut *peer_state, chan.context(), close_res);
4067 shutdown_results.push(close_res);
4068 }
4069 }
4070 has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state));
4071 }
4072 debug_assert!(
4073 has_uncompleted_channel.unwrap_or(true),
4074 "Closing a batch where all channels have completed initial monitor update",
4075 );
4076 }
4077
4078 {
4079 let mut pending_events = self.pending_events.lock().unwrap();
4080 pending_events.push_back((events::Event::ChannelClosed {
4081 channel_id: shutdown_res.channel_id,
4082 user_channel_id: shutdown_res.user_channel_id,
4083 reason: shutdown_res.closure_reason,
4084 counterparty_node_id: Some(shutdown_res.counterparty_node_id),
4085 channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis),
4086 channel_funding_txo: shutdown_res.channel_funding_txo,
4087 last_local_balance_msat: Some(shutdown_res.last_local_balance_msat),
4088 }, None));
4089
4090 if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx {
4091 let funding_info = if shutdown_res.is_manual_broadcast {
4092 FundingInfo::OutPoint {
4093 outpoint: shutdown_res.channel_funding_txo
4094 .expect("We had an unbroadcasted funding tx, so should also have had a funding outpoint"),
4095 }
4096 } else {
4097 FundingInfo::Tx{ transaction }
4098 };
4099 pending_events.push_back((events::Event::DiscardFunding {
4100 channel_id: shutdown_res.channel_id, funding_info
4101 }, None));
4102 }
4103 }
4104 for shutdown_result in shutdown_results.drain(..) {
4105 self.finish_close_channel(shutdown_result);
4106 }
4107 }
4108
4109 fn force_close_channel_with_peer(&self, channel_id: &ChannelId, peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool)
4112 -> Result<PublicKey, APIError> {
4113 let per_peer_state = self.per_peer_state.read().unwrap();
4114 let peer_state_mutex = per_peer_state.get(peer_node_id)
4115 .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?;
4116 let (update_opt, counterparty_node_id) = {
4117 let mut peer_state = peer_state_mutex.lock().unwrap();
4118 let closure_reason = if let Some(peer_msg) = peer_msg {
4119 ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) }
4120 } else {
4121 ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(broadcast) }
4122 };
4123 let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None);
4124 if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
4125 log_error!(logger, "Force-closing channel {}", channel_id);
4126 let (mut shutdown_res, update_opt) = match chan_phase_entry.get_mut() {
4127 ChannelPhase::Funded(ref mut chan) => {
4128 (
4129 chan.context.force_shutdown(broadcast, closure_reason),
4130 self.get_channel_update_for_broadcast(&chan).ok(),
4131 )
4132 },
4133 ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) |
4134 ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => {
4135 (chan_phase_entry.get_mut().context_mut().force_shutdown(false, closure_reason), None)
4137 },
4138 };
4139 let chan_phase = remove_channel_phase!(self, peer_state, chan_phase_entry, shutdown_res);
4140 mem::drop(peer_state);
4141 mem::drop(per_peer_state);
4142 self.finish_close_channel(shutdown_res);
4143 (update_opt, chan_phase.context().get_counterparty_node_id())
4144 } else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() {
4145 log_error!(logger, "Force-closing channel {}", &channel_id);
4146 (None, *peer_node_id)
4150 } else {
4151 return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, peer_node_id) });
4152 }
4153 };
4154 if let Some(update) = update_opt {
4155 let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
4157 pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
4158 msg: update
4159 });
4160 }
4161
4162 Ok(counterparty_node_id)
4163 }
4164
4165 fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, broadcast: bool, error_message: String)
4166 -> Result<(), APIError> {
4167 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4168 log_debug!(self.logger,
4169 "Force-closing channel, The error message sent to the peer : {}", error_message);
4170 match self.force_close_channel_with_peer(channel_id, &counterparty_node_id, None, broadcast) {
4171 Ok(counterparty_node_id) => {
4172 let per_peer_state = self.per_peer_state.read().unwrap();
4173 if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
4174 let mut peer_state = peer_state_mutex.lock().unwrap();
4175 peer_state.pending_msg_events.push(
4176 events::MessageSendEvent::HandleError {
4177 node_id: counterparty_node_id,
4178 action: msgs::ErrorAction::SendErrorMessage {
4179 msg: msgs::ErrorMessage { channel_id: *channel_id, data: error_message }
4180 },
4181 }
4182 );
4183 }
4184 Ok(())
4185 },
4186 Err(e) => Err(e)
4187 }
4188 }
4189
4190 pub fn force_close_broadcasting_latest_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String)
4199 -> Result<(), APIError> {
4200 self.force_close_sending_error(channel_id, counterparty_node_id, true, error_message)
4201 }
4202
4203 pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String)
4214 -> Result<(), APIError> {
4215 self.force_close_sending_error(channel_id, counterparty_node_id, false, error_message)
4216 }
4217
4218 pub fn force_close_all_channels_broadcasting_latest_txn(&self, error_message: String) {
4224 for chan in self.list_channels() {
4225 let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id, error_message.clone());
4226 }
4227 }
4228
4229 pub fn force_close_all_channels_without_broadcasting_txn(&self, error_message: String) {
4235 for chan in self.list_channels() {
4236 let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id, error_message.clone());
4237 }
4238 }
4239
4240 fn can_forward_htlc_to_outgoing_channel(
4241 &self, chan: &mut Channel<SP>, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails
4242 ) -> Result<(), (&'static str, u16)> {
4243 if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
4244 return Err(("Refusing to forward to a private channel based on our config.", 0x4000 | 10));
4248 }
4249 if chan.context.get_channel_type().supports_scid_privacy() && next_packet.outgoing_scid != chan.context.outbound_scid_alias() {
4250 return Err(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10));
4254 }
4255
4256 if !chan.context.is_live() {
4262 if !chan.context.is_enabled() {
4263 return Err(("Forwarding channel has been disconnected for some time.", 0x1000 | 20));
4265 } else {
4266 return Err(("Forwarding channel is not in a ready state.", 0x1000 | 7));
4268 }
4269 }
4270 if next_packet.outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { return Err(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11));
4272 }
4273 if let Err((err, code)) = chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value) {
4274 return Err((err, code));
4275 }
4276
4277 Ok(())
4278 }
4279
4280 fn do_funded_channel_callback<X, C: Fn(&mut Channel<SP>) -> X>(
4283 &self, scid: u64, callback: C,
4284 ) -> Option<X> {
4285 let (counterparty_node_id, channel_id) = match self.short_to_chan_info.read().unwrap().get(&scid).cloned() {
4286 None => return None,
4287 Some((cp_id, id)) => (cp_id, id),
4288 };
4289 let per_peer_state = self.per_peer_state.read().unwrap();
4290 let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
4291 if peer_state_mutex_opt.is_none() {
4292 return None;
4293 }
4294 let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
4295 let peer_state = &mut *peer_state_lock;
4296 match peer_state.channel_by_id.get_mut(&channel_id).and_then(
4297 |chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None }
4298 ) {
4299 None => None,
4300 Some(chan) => Some(callback(chan)),
4301 }
4302 }
4303
4304 fn can_forward_htlc(
4305 &self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails
4306 ) -> Result<(), (&'static str, u16)> {
4307 match self.do_funded_channel_callback(next_packet_details.outgoing_scid, |chan: &mut Channel<SP>| {
4308 self.can_forward_htlc_to_outgoing_channel(chan, msg, next_packet_details)
4309 }) {
4310 Some(Ok(())) => {},
4311 Some(Err(e)) => return Err(e),
4312 None => {
4313 if (self.default_configuration.accept_intercept_htlcs &&
4316 fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash)) ||
4317 fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash)
4318 {} else {
4319 return Err(("Don't have available channel for forwarding as requested.", 0x4000 | 10));
4320 }
4321 }
4322 }
4323
4324 let cur_height = self.best_block.read().unwrap().height + 1;
4325 if let Err((err_msg, err_code)) = check_incoming_htlc_cltv(
4326 cur_height, next_packet_details.outgoing_cltv_value, msg.cltv_expiry
4327 ) {
4328 return Err((err_msg, err_code));
4329 }
4330
4331 Ok(())
4332 }
4333
4334 fn htlc_failure_from_update_add_err(
4335 &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, err_msg: &'static str,
4336 err_code: u16, is_intro_node_blinded_forward: bool,
4337 shared_secret: &[u8; 32]
4338 ) -> HTLCFailureMsg {
4339 let mut res = VecWriter(Vec::with_capacity(8 + 2));
4341 if err_code & 0x1000 == 0x1000 {
4342 if err_code == 0x1000 | 11 || err_code == 0x1000 | 12 {
4343 msg.amount_msat.write(&mut res).expect("Writes cannot fail");
4344 }
4345 else if err_code == 0x1000 | 13 {
4346 msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
4347 }
4348 else if err_code == 0x1000 | 20 {
4349 0u16.write(&mut res).expect("Writes cannot fail");
4351 }
4352 (0u16).write(&mut res).expect("Writes cannot fail");
4354 }
4355
4356 log_info!(
4357 WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash)),
4358 "Failed to accept/forward incoming HTLC: {}", err_msg
4359 );
4360 if msg.blinding_point.is_some() {
4362 return HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
4363 channel_id: msg.channel_id,
4364 htlc_id: msg.htlc_id,
4365 sha256_of_onion: [0; 32],
4366 failure_code: INVALID_ONION_BLINDING,
4367 });
4368 }
4369
4370 let (err_code, err_data) = if is_intro_node_blinded_forward {
4371 (INVALID_ONION_BLINDING, &[0; 32][..])
4372 } else {
4373 (err_code, &res.0[..])
4374 };
4375 HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
4376 channel_id: msg.channel_id,
4377 htlc_id: msg.htlc_id,
4378 reason: HTLCFailReason::reason(err_code, err_data.to_vec())
4379 .get_encrypted_failure_packet(shared_secret, &None),
4380 })
4381 }
4382
4383 fn decode_update_add_htlc_onion(
4384 &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey,
4385 ) -> Result<
4386 (onion_utils::Hop, [u8; 32], Option<Result<PublicKey, secp256k1::Error>>), HTLCFailureMsg
4387 > {
4388 let (next_hop, shared_secret, next_packet_details_opt) = decode_incoming_update_add_htlc_onion(
4389 msg, &*self.node_signer, &*self.logger, &self.secp_ctx
4390 )?;
4391
4392 let next_packet_details = match next_packet_details_opt {
4393 Some(next_packet_details) => next_packet_details,
4394 None => return Ok((next_hop, shared_secret, None)),
4396 };
4397
4398 self.can_forward_htlc(&msg, &next_packet_details).map_err(|e| {
4401 let (err_msg, err_code) = e;
4402 self.htlc_failure_from_update_add_err(
4403 msg, counterparty_node_id, err_msg, err_code,
4404 next_hop.is_intro_node_blinded_forward(), &shared_secret
4405 )
4406 })?;
4407
4408 Ok((next_hop, shared_secret, Some(next_packet_details.next_packet_pubkey)))
4409 }
4410
4411 fn construct_pending_htlc_status<'a>(
4412 &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, shared_secret: [u8; 32],
4413 decoded_hop: onion_utils::Hop, allow_underpay: bool,
4414 next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>,
4415 ) -> PendingHTLCStatus {
4416 macro_rules! return_err {
4417 ($msg: expr, $err_code: expr, $data: expr) => {
4418 {
4419 let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash));
4420 log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
4421 if msg.blinding_point.is_some() {
4422 return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
4423 msgs::UpdateFailMalformedHTLC {
4424 channel_id: msg.channel_id,
4425 htlc_id: msg.htlc_id,
4426 sha256_of_onion: [0; 32],
4427 failure_code: INVALID_ONION_BLINDING,
4428 }
4429 ))
4430 }
4431 return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
4432 channel_id: msg.channel_id,
4433 htlc_id: msg.htlc_id,
4434 reason: HTLCFailReason::reason($err_code, $data.to_vec())
4435 .get_encrypted_failure_packet(&shared_secret, &None),
4436 }));
4437 }
4438 }
4439 }
4440 match decoded_hop {
4441 onion_utils::Hop::Receive(next_hop_data) => {
4442 let current_height: u32 = self.best_block.read().unwrap().height;
4444 match create_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash,
4445 msg.amount_msat, msg.cltv_expiry, None, allow_underpay, msg.skimmed_fee_msat,
4446 current_height)
4447 {
4448 Ok(info) => {
4449 PendingHTLCStatus::Forward(info)
4454 },
4455 Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
4456 }
4457 },
4458 onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => {
4459 match create_fwd_pending_htlc_info(msg, next_hop_data, next_hop_hmac,
4460 new_packet_bytes, shared_secret, next_packet_pubkey_opt) {
4461 Ok(info) => PendingHTLCStatus::Forward(info),
4462 Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
4463 }
4464 }
4465 }
4466 }
4467
4468 fn get_channel_update_for_broadcast(&self, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
4479 if !chan.context.should_announce() {
4480 return Err(LightningError {
4481 err: "Cannot broadcast a channel_update for a private channel".to_owned(),
4482 action: msgs::ErrorAction::IgnoreError
4483 });
4484 }
4485 if chan.context.get_short_channel_id().is_none() {
4486 return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError});
4487 }
4488 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
4489 log_trace!(logger, "Attempting to generate broadcast channel update for channel {}", &chan.context.channel_id());
4490 self.get_channel_update_for_unicast(chan)
4491 }
4492
4493 fn get_channel_update_for_unicast(&self, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
4505 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
4506 log_trace!(logger, "Attempting to generate channel update for channel {}", chan.context.channel_id());
4507 let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) {
4508 None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
4509 Some(id) => id,
4510 };
4511
4512 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
4513 log_trace!(logger, "Generating channel update for channel {}", chan.context.channel_id());
4514 let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
4515 let enabled = chan.context.is_enabled();
4516
4517 let unsigned = msgs::UnsignedChannelUpdate {
4518 chain_hash: self.chain_hash,
4519 short_channel_id,
4520 timestamp: chan.context.get_update_time_counter(),
4521 message_flags: 1, channel_flags: (!were_node_one) as u8 | ((!enabled as u8) << 1),
4523 cltv_expiry_delta: chan.context.get_cltv_expiry_delta(),
4524 htlc_minimum_msat: chan.context.get_counterparty_htlc_minimum_msat(),
4525 htlc_maximum_msat: chan.context.get_announced_htlc_max_msat(),
4526 fee_base_msat: chan.context.get_outbound_forwarding_fee_base_msat(),
4527 fee_proportional_millionths: chan.context.get_fee_proportional_millionths(),
4528 excess_data: Vec::new(),
4529 };
4530 let sig = self.node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelUpdate(&unsigned)).unwrap();
4535
4536 Ok(msgs::ChannelUpdate {
4537 signature: sig,
4538 contents: unsigned
4539 })
4540 }
4541
4542 #[cfg(test)]
4543 pub(crate) fn test_send_payment_along_path(&self, path: &Path, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
4544 let _lck = self.total_consistency_lock.read().unwrap();
4545 self.send_payment_along_path(SendAlongPathArgs {
4546 path, payment_hash, recipient_onion: &recipient_onion, total_value,
4547 cur_height, payment_id, keysend_preimage, invoice_request: None, session_priv_bytes
4548 })
4549 }
4550
4551 fn send_payment_along_path(&self, args: SendAlongPathArgs) -> Result<(), APIError> {
4552 let SendAlongPathArgs {
4553 path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage,
4554 invoice_request, session_priv_bytes
4555 } = args;
4556 debug_assert!(self.total_consistency_lock.try_write().is_err());
4558 let prng_seed = self.entropy_source.get_secure_random_bytes();
4559 let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
4560
4561 let (onion_packet, htlc_msat, htlc_cltv) = onion_utils::create_payment_onion(
4562 &self.secp_ctx, &path, &session_priv, total_value, recipient_onion, cur_height,
4563 payment_hash, keysend_preimage, invoice_request, prng_seed
4564 ).map_err(|e| {
4565 let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None, Some(*payment_hash));
4566 log_error!(logger, "Failed to build an onion for path for payment hash {}", payment_hash);
4567 e
4568 })?;
4569
4570 let err: Result<(), _> = loop {
4571 let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.hops.first().unwrap().short_channel_id) {
4572 None => {
4573 let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None, Some(*payment_hash));
4574 log_error!(logger, "Failed to find first-hop for payment hash {}", payment_hash);
4575 return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()})
4576 },
4577 Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
4578 };
4579
4580 let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(id), Some(*payment_hash));
4581 log_trace!(logger,
4582 "Attempting to send payment with payment hash {} along path with next hop {}",
4583 payment_hash, path.hops.first().unwrap().short_channel_id);
4584
4585 let per_peer_state = self.per_peer_state.read().unwrap();
4586 let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
4587 .ok_or_else(|| APIError::ChannelUnavailable{err: "No peer matching the path's first hop found!".to_owned() })?;
4588 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4589 let peer_state = &mut *peer_state_lock;
4590 if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(id) {
4591 match chan_phase_entry.get_mut() {
4592 ChannelPhase::Funded(chan) => {
4593 if !chan.context.is_live() {
4594 return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
4595 }
4596 let funding_txo = chan.context.get_funding_txo().unwrap();
4597 let logger = WithChannelContext::from(&self.logger, &chan.context, Some(*payment_hash));
4598 let send_res = chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(),
4599 htlc_cltv, HTLCSource::OutboundRoute {
4600 path: path.clone(),
4601 session_priv: session_priv.clone(),
4602 first_hop_htlc_msat: htlc_msat,
4603 payment_id,
4604 }, onion_packet, None, &self.fee_estimator, &&logger);
4605 match break_chan_phase_entry!(self, peer_state, send_res, chan_phase_entry) {
4606 Some(monitor_update) => {
4607 match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
4608 false => {
4609 return Err(APIError::MonitorUpdateInProgress);
4616 },
4617 true => {},
4618 }
4619 },
4620 None => {},
4621 }
4622 },
4623 _ => return Err(APIError::ChannelUnavailable{err: "Channel to first hop is unfunded".to_owned()}),
4624 };
4625 } else {
4626 return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()});
4631 }
4632 return Ok(());
4633 };
4634 match handle_error!(self, err, path.hops.first().unwrap().pubkey) {
4635 Ok(_) => unreachable!(),
4636 Err(e) => {
4637 Err(APIError::ChannelUnavailable { err: e.err })
4638 },
4639 }
4640 }
4641
4642 pub fn send_payment_with_route(
4647 &self, mut route: Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields,
4648 payment_id: PaymentId
4649 ) -> Result<(), RetryableSendFailure> {
4650 let best_block_height = self.best_block.read().unwrap().height;
4651 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4652 let route_params = route.route_params.clone().unwrap_or_else(|| {
4653 let (payee_node_id, cltv_delta) = route.paths.first()
4655 .and_then(|path| path.hops.last().map(|hop| (hop.pubkey, hop.cltv_expiry_delta as u32)))
4656 .unwrap_or_else(|| (PublicKey::from_slice(&[2; 32]).unwrap(), MIN_FINAL_CLTV_EXPIRY_DELTA as u32));
4657 let dummy_payment_params = PaymentParameters::from_node_id(payee_node_id, cltv_delta);
4658 RouteParameters::from_payment_params_and_value(dummy_payment_params, route.get_total_amount())
4659 });
4660 if route.route_params.is_none() { route.route_params = Some(route_params.clone()); }
4661 let router = FixedRouter::new(route);
4662 self.pending_outbound_payments
4663 .send_payment(payment_hash, recipient_onion, payment_id, Retry::Attempts(0),
4664 route_params, &&router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
4665 &self.entropy_source, &self.node_signer, best_block_height, &self.logger,
4666 &self.pending_events, |args| self.send_payment_along_path(args))
4667 }
4668
4669 pub fn send_payment(
4704 &self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId,
4705 route_params: RouteParameters, retry_strategy: Retry
4706 ) -> Result<(), RetryableSendFailure> {
4707 let best_block_height = self.best_block.read().unwrap().height;
4708 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4709 self.pending_outbound_payments
4710 .send_payment(payment_hash, recipient_onion, payment_id, retry_strategy, route_params,
4711 &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
4712 &self.entropy_source, &self.node_signer, best_block_height, &self.logger,
4713 &self.pending_events, |args| self.send_payment_along_path(args))
4714 }
4715
4716 #[cfg(test)]
4717 pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
4718 let best_block_height = self.best_block.read().unwrap().height;
4719 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4720 self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion,
4721 keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer,
4722 best_block_height, |args| self.send_payment_along_path(args))
4723 }
4724
4725 #[cfg(test)]
4726 pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route: &Route) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
4727 let best_block_height = self.best_block.read().unwrap().height;
4728 self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, recipient_onion, payment_id, route, None, &self.entropy_source, best_block_height)
4729 }
4730
4731 #[cfg(test)]
4732 pub(crate) fn test_set_payment_metadata(&self, payment_id: PaymentId, new_payment_metadata: Option<Vec<u8>>) {
4733 self.pending_outbound_payments.test_set_payment_metadata(payment_id, new_payment_metadata);
4734 }
4735
4736 pub fn send_payment_for_bolt12_invoice(
4757 &self, invoice: &Bolt12Invoice, context: Option<&OffersContext>,
4758 ) -> Result<(), Bolt12PaymentError> {
4759 match self.verify_bolt12_invoice(invoice, context) {
4760 Ok(payment_id) => self.send_payment_for_verified_bolt12_invoice(invoice, payment_id),
4761 Err(()) => Err(Bolt12PaymentError::UnexpectedInvoice),
4762 }
4763 }
4764
4765 fn verify_bolt12_invoice(
4766 &self, invoice: &Bolt12Invoice, context: Option<&OffersContext>,
4767 ) -> Result<PaymentId, ()> {
4768 let secp_ctx = &self.secp_ctx;
4769 let expanded_key = &self.inbound_payment_key;
4770
4771 match context {
4772 None if invoice.is_for_refund_without_paths() => {
4773 invoice.verify_using_metadata(expanded_key, secp_ctx)
4774 },
4775 Some(&OffersContext::OutboundPayment { payment_id, nonce, .. }) => {
4776 invoice.verify_using_payer_data(payment_id, nonce, expanded_key, secp_ctx)
4777 },
4778 _ => Err(()),
4779 }
4780 }
4781
4782 fn send_payment_for_verified_bolt12_invoice(&self, invoice: &Bolt12Invoice, payment_id: PaymentId) -> Result<(), Bolt12PaymentError> {
4783 let best_block_height = self.best_block.read().unwrap().height;
4784 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4785 let features = self.bolt12_invoice_features();
4786 self.pending_outbound_payments
4787 .send_payment_for_bolt12_invoice(
4788 invoice, payment_id, &self.router, self.list_usable_channels(), features,
4789 || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, &self,
4790 &self.secp_ctx, best_block_height, &self.logger, &self.pending_events,
4791 |args| self.send_payment_along_path(args)
4792 )
4793 }
4794
4795 #[cfg(async_payments)]
4796 fn initiate_async_payment(
4797 &self, invoice: &StaticInvoice, payment_id: PaymentId
4798 ) -> Result<(), Bolt12PaymentError> {
4799 let mut res = Ok(());
4800 PersistenceNotifierGuard::optionally_notify(self, || {
4801 let best_block_height = self.best_block.read().unwrap().height;
4802 let features = self.bolt12_invoice_features();
4803 let outbound_pmts_res = self.pending_outbound_payments.static_invoice_received(
4804 invoice, payment_id, features, best_block_height, &*self.entropy_source,
4805 &self.pending_events
4806 );
4807 match outbound_pmts_res {
4808 Ok(()) => {},
4809 Err(Bolt12PaymentError::UnexpectedInvoice) | Err(Bolt12PaymentError::DuplicateInvoice) => {
4810 res = outbound_pmts_res.map(|_| ());
4811 return NotifyOption::SkipPersistNoEvents
4812 },
4813 Err(e) => {
4814 res = Err(e);
4815 return NotifyOption::DoPersist
4816 }
4817 };
4818
4819 let nonce = Nonce::from_entropy_source(&*self.entropy_source);
4820 let hmac = payment_id.hmac_for_async_payment(nonce, &self.inbound_payment_key);
4821 let reply_paths = match self.create_blinded_paths(
4822 MessageContext::AsyncPayments(
4823 AsyncPaymentsContext::OutboundPayment { payment_id, nonce, hmac }
4824 )
4825 ) {
4826 Ok(paths) => paths,
4827 Err(()) => {
4828 self.abandon_payment_with_reason(payment_id, PaymentFailureReason::BlindedPathCreationFailed);
4829 res = Err(Bolt12PaymentError::BlindedPathCreationFailed);
4830 return NotifyOption::DoPersist
4831 }
4832 };
4833
4834 let mut pending_async_payments_messages = self.pending_async_payments_messages.lock().unwrap();
4835 const HTLC_AVAILABLE_LIMIT: usize = 10;
4836 reply_paths
4837 .iter()
4838 .flat_map(|reply_path| invoice.message_paths().iter().map(move |invoice_path| (invoice_path, reply_path)))
4839 .take(HTLC_AVAILABLE_LIMIT)
4840 .for_each(|(invoice_path, reply_path)| {
4841 let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
4842 destination: Destination::BlindedPath(invoice_path.clone()),
4843 reply_path: reply_path.clone(),
4844 };
4845 let message = AsyncPaymentsMessage::HeldHtlcAvailable(HeldHtlcAvailable {});
4846 pending_async_payments_messages.push((message, instructions));
4847 });
4848
4849 NotifyOption::DoPersist
4850 });
4851
4852 res
4853 }
4854
4855 #[cfg(async_payments)]
4856 fn send_payment_for_static_invoice(
4857 &self, payment_id: PaymentId
4858 ) -> Result<(), Bolt12PaymentError> {
4859 let best_block_height = self.best_block.read().unwrap().height;
4860 let mut res = Ok(());
4861 PersistenceNotifierGuard::optionally_notify(self, || {
4862 let outbound_pmts_res = self.pending_outbound_payments.send_payment_for_static_invoice(
4863 payment_id, &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
4864 &self.entropy_source, &self.node_signer, &self, &self.secp_ctx, best_block_height,
4865 &self.logger, &self.pending_events, |args| self.send_payment_along_path(args)
4866 );
4867 match outbound_pmts_res {
4868 Err(Bolt12PaymentError::UnexpectedInvoice) | Err(Bolt12PaymentError::DuplicateInvoice) => {
4869 res = outbound_pmts_res.map(|_| ());
4870 NotifyOption::SkipPersistNoEvents
4871 },
4872 other_res => {
4873 res = other_res;
4874 NotifyOption::DoPersist
4875 }
4876 }
4877 });
4878 res
4879 }
4880
4881 pub fn abandon_payment(&self, payment_id: PaymentId) {
4907 self.abandon_payment_with_reason(payment_id, PaymentFailureReason::UserAbandoned)
4908 }
4909
4910 fn abandon_payment_with_reason(&self, payment_id: PaymentId, reason: PaymentFailureReason) {
4911 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4912 self.pending_outbound_payments.abandon_payment(payment_id, reason, &self.pending_events);
4913 }
4914
4915 pub fn send_spontaneous_payment(
4933 &self, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields,
4934 payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry
4935 ) -> Result<PaymentHash, RetryableSendFailure> {
4936 let best_block_height = self.best_block.read().unwrap().height;
4937 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4938 self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, recipient_onion,
4939 payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(),
4940 || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
4941 &self.logger, &self.pending_events, |args| self.send_payment_along_path(args))
4942 }
4943
4944 pub fn send_probe(&self, path: Path) -> Result<(PaymentHash, PaymentId), ProbeSendFailure> {
4948 let best_block_height = self.best_block.read().unwrap().height;
4949 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4950 self.pending_outbound_payments.send_probe(path, self.probing_cookie_secret,
4951 &self.entropy_source, &self.node_signer, best_block_height,
4952 |args| self.send_payment_along_path(args))
4953 }
4954
4955 #[cfg(test)]
4958 pub(crate) fn payment_is_probe(&self, payment_hash: &PaymentHash, payment_id: &PaymentId) -> bool {
4959 outbound_payment::payment_is_probe(payment_hash, payment_id, self.probing_cookie_secret)
4960 }
4961
4962 pub fn send_spontaneous_preflight_probes(
4967 &self, node_id: PublicKey, amount_msat: u64, final_cltv_expiry_delta: u32,
4968 liquidity_limit_multiplier: Option<u64>,
4969 ) -> Result<Vec<(PaymentHash, PaymentId)>, ProbeSendFailure> {
4970 let payment_params =
4971 PaymentParameters::from_node_id(node_id, final_cltv_expiry_delta);
4972
4973 let route_params = RouteParameters::from_payment_params_and_value(payment_params, amount_msat);
4974
4975 self.send_preflight_probes(route_params, liquidity_limit_multiplier)
4976 }
4977
4978 pub fn send_preflight_probes(
4993 &self, route_params: RouteParameters, liquidity_limit_multiplier: Option<u64>,
4994 ) -> Result<Vec<(PaymentHash, PaymentId)>, ProbeSendFailure> {
4995 let liquidity_limit_multiplier = liquidity_limit_multiplier.unwrap_or(3);
4996
4997 let payer = self.get_our_node_id();
4998 let usable_channels = self.list_usable_channels();
4999 let first_hops = usable_channels.iter().collect::<Vec<_>>();
5000 let inflight_htlcs = self.compute_inflight_htlcs();
5001
5002 let route = self
5003 .router
5004 .find_route(&payer, &route_params, Some(&first_hops), inflight_htlcs)
5005 .map_err(|e| {
5006 log_error!(self.logger, "Failed to find path for payment probe: {:?}", e);
5007 ProbeSendFailure::RouteNotFound
5008 })?;
5009
5010 let mut used_liquidity_map = hash_map_with_capacity(first_hops.len());
5011
5012 let mut res = Vec::new();
5013
5014 for mut path in route.paths {
5015 while let Some(last_path_hop) = path.hops.last() {
5018 if last_path_hop.maybe_announced_channel {
5019 break;
5021 } else {
5022 log_debug!(
5024 self.logger,
5025 "Avoided sending payment probe all the way to last hop {} as it is likely unannounced.",
5026 last_path_hop.short_channel_id
5027 );
5028 let final_value_msat = path.final_value_msat();
5029 path.hops.pop();
5030 if let Some(new_last) = path.hops.last_mut() {
5031 new_last.fee_msat += final_value_msat;
5032 }
5033 }
5034 }
5035
5036 if path.hops.len() < 2 {
5037 log_debug!(
5038 self.logger,
5039 "Skipped sending payment probe over path with less than two hops."
5040 );
5041 continue;
5042 }
5043
5044 if let Some(first_path_hop) = path.hops.first() {
5045 if let Some(first_hop) = first_hops.iter().find(|h| {
5046 h.get_outbound_payment_scid() == Some(first_path_hop.short_channel_id)
5047 }) {
5048 let path_value = path.final_value_msat() + path.fee_msat();
5049 let used_liquidity =
5050 used_liquidity_map.entry(first_path_hop.short_channel_id).or_insert(0);
5051
5052 if first_hop.next_outbound_htlc_limit_msat
5053 < (*used_liquidity + path_value) * liquidity_limit_multiplier
5054 {
5055 log_debug!(self.logger, "Skipped sending payment probe to avoid putting channel {} under the liquidity limit.", first_path_hop.short_channel_id);
5056 continue;
5057 } else {
5058 *used_liquidity += path_value;
5059 }
5060 }
5061 }
5062
5063 res.push(self.send_probe(path).map_err(|e| {
5064 log_error!(self.logger, "Failed to send pre-flight probe: {:?}", e);
5065 e
5066 })?);
5067 }
5068
5069 Ok(res)
5070 }
5071
5072 fn funding_transaction_generated_intern<FundingOutput: FnMut(&OutboundV1Channel<SP>) -> Result<OutPoint, &'static str>>(
5075 &self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding_transaction: Transaction, is_batch_funding: bool,
5076 mut find_funding_output: FundingOutput, is_manual_broadcast: bool,
5077 ) -> Result<(), APIError> {
5078 let per_peer_state = self.per_peer_state.read().unwrap();
5079 let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
5080 .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
5081
5082 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5083 let peer_state = &mut *peer_state_lock;
5084 let funding_txo;
5085 let (mut chan, msg_opt) = match peer_state.channel_by_id.remove(&temporary_channel_id) {
5086 Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => {
5087 macro_rules! close_chan { ($err: expr, $api_err: expr, $chan: expr) => { {
5088 let counterparty;
5089 let err = if let ChannelError::Close((msg, reason)) = $err {
5090 let channel_id = $chan.context.channel_id();
5091 counterparty = chan.context.get_counterparty_node_id();
5092 let shutdown_res = $chan.context.force_shutdown(false, reason);
5093 MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None)
5094 } else { unreachable!(); };
5095
5096 mem::drop(peer_state_lock);
5097 mem::drop(per_peer_state);
5098 let _: Result<(), _> = handle_error!(self, Err(err), counterparty);
5099 Err($api_err)
5100 } } }
5101 match find_funding_output(&chan) {
5102 Ok(found_funding_txo) => funding_txo = found_funding_txo,
5103 Err(err) => {
5104 let chan_err = ChannelError::close(err.to_owned());
5105 let api_err = APIError::APIMisuseError { err: err.to_owned() };
5106 return close_chan!(chan_err, api_err, chan);
5107 },
5108 }
5109
5110 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
5111 let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger);
5112 match funding_res {
5113 Ok(funding_msg) => (chan, funding_msg),
5114 Err((mut chan, chan_err)) => {
5115 let api_err = APIError::ChannelUnavailable { err: "Signer refused to sign the initial commitment transaction".to_owned() };
5116 return close_chan!(chan_err, api_err, chan);
5117 }
5118 }
5119 },
5120 Some(phase) => {
5121 peer_state.channel_by_id.insert(temporary_channel_id, phase);
5122 return Err(APIError::APIMisuseError {
5123 err: format!(
5124 "Channel with id {} for the passed counterparty node_id {} is not an unfunded, outbound V1 channel",
5125 temporary_channel_id, counterparty_node_id),
5126 })
5127 },
5128 None => return Err(APIError::ChannelUnavailable {err: format!(
5129 "Channel with id {} not found for the passed counterparty node_id {}",
5130 temporary_channel_id, counterparty_node_id),
5131 }),
5132 };
5133
5134 if let Some(msg) = msg_opt {
5135 peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
5136 node_id: chan.context.get_counterparty_node_id(),
5137 msg,
5138 });
5139 }
5140 if is_manual_broadcast {
5141 chan.context.set_manual_broadcast();
5142 }
5143 match peer_state.channel_by_id.entry(chan.context.channel_id()) {
5144 hash_map::Entry::Occupied(_) => {
5145 panic!("Generated duplicate funding txid?");
5146 },
5147 hash_map::Entry::Vacant(e) => {
5148 let mut outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
5149 match outpoint_to_peer.entry(funding_txo) {
5150 hash_map::Entry::Vacant(e) => { e.insert(chan.context.get_counterparty_node_id()); },
5151 hash_map::Entry::Occupied(o) => {
5152 let err = format!(
5153 "An existing channel using outpoint {} is open with peer {}",
5154 funding_txo, o.get()
5155 );
5156 mem::drop(outpoint_to_peer);
5157 mem::drop(peer_state_lock);
5158 mem::drop(per_peer_state);
5159 let reason = ClosureReason::ProcessingError { err: err.clone() };
5160 self.finish_close_channel(chan.context.force_shutdown(true, reason));
5161 return Err(APIError::ChannelUnavailable { err });
5162 }
5163 }
5164 e.insert(ChannelPhase::UnfundedOutboundV1(chan));
5165 }
5166 }
5167 Ok(())
5168 }
5169
5170 #[cfg(test)]
5171 pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> {
5172 let txid = funding_transaction.compute_txid();
5173 self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, false, |_| {
5174 Ok(OutPoint { txid, index: output_index })
5175 }, false)
5176 }
5177
5178 pub fn funding_transaction_generated(&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
5209 self.batch_funding_transaction_generated(&[(&temporary_channel_id, &counterparty_node_id)], funding_transaction)
5210 }
5211
5212
5213 pub fn unsafe_manual_funding_transaction_generated(&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding: OutPoint) -> Result<(), APIError> {
5241 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5242
5243 let temporary_channels = &[(&temporary_channel_id, &counterparty_node_id)];
5244 return self.batch_funding_transaction_generated_intern(temporary_channels, FundingType::Unchecked(funding));
5245
5246 }
5247
5248 pub fn batch_funding_transaction_generated(&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding_transaction: Transaction) -> Result<(), APIError> {
5259 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5260 self.batch_funding_transaction_generated_intern(temporary_channels, FundingType::Checked(funding_transaction))
5261 }
5262
5263 fn batch_funding_transaction_generated_intern(&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding: FundingType) -> Result<(), APIError> {
5264 let mut result = Ok(());
5265 if let FundingType::Checked(funding_transaction) = &funding {
5266 if !funding_transaction.is_coinbase() {
5267 for inp in funding_transaction.input.iter() {
5268 if inp.witness.is_empty() {
5269 result = result.and(Err(APIError::APIMisuseError {
5270 err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned()
5271 }));
5272 }
5273 }
5274 }
5275
5276 if funding_transaction.output.len() > u16::max_value() as usize {
5277 result = result.and(Err(APIError::APIMisuseError {
5278 err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
5279 }));
5280 }
5281 let height = self.best_block.read().unwrap().height;
5282 if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) &&
5287 funding_transaction.lock_time.is_block_height() &&
5288 funding_transaction.lock_time.to_consensus_u32() > height + 1
5289 {
5290 result = result.and(Err(APIError::APIMisuseError {
5291 err: "Funding transaction absolute timelock is non-final".to_owned()
5292 }));
5293 }
5294 }
5295
5296 let txid = funding.txid();
5297 let is_batch_funding = temporary_channels.len() > 1;
5298 let mut funding_batch_states = if is_batch_funding {
5299 Some(self.funding_batch_states.lock().unwrap())
5300 } else {
5301 None
5302 };
5303 let mut funding_batch_state = funding_batch_states.as_mut().and_then(|states| {
5304 match states.entry(txid) {
5305 btree_map::Entry::Occupied(_) => {
5306 result = result.clone().and(Err(APIError::APIMisuseError {
5307 err: "Batch funding transaction with the same txid already exists".to_owned()
5308 }));
5309 None
5310 },
5311 btree_map::Entry::Vacant(vacant) => Some(vacant.insert(Vec::new())),
5312 }
5313 });
5314 let is_manual_broadcast = funding.is_manual_broadcast();
5315 for &(temporary_channel_id, counterparty_node_id) in temporary_channels {
5316 result = result.and_then(|_| self.funding_transaction_generated_intern(
5317 *temporary_channel_id,
5318 *counterparty_node_id,
5319 funding.transaction_or_dummy(),
5320 is_batch_funding,
5321 |chan| {
5322 let mut output_index = None;
5323 let expected_spk = chan.context.get_funding_redeemscript().to_p2wsh();
5324 let outpoint = match &funding {
5325 FundingType::Checked(tx) => {
5326 for (idx, outp) in tx.output.iter().enumerate() {
5327 if outp.script_pubkey == expected_spk && outp.value.to_sat() == chan.context.get_value_satoshis() {
5328 if output_index.is_some() {
5329 return Err("Multiple outputs matched the expected script and value");
5330 }
5331 output_index = Some(idx as u16);
5332 }
5333 }
5334 if output_index.is_none() {
5335 return Err("No output matched the script_pubkey and value in the FundingGenerationReady event");
5336 }
5337 OutPoint { txid, index: output_index.unwrap() }
5338 },
5339 FundingType::Unchecked(outpoint) => outpoint.clone(),
5340 };
5341 if let Some(funding_batch_state) = funding_batch_state.as_mut() {
5342 funding_batch_state.push((ChannelId::v1_from_funding_outpoint(outpoint), *counterparty_node_id, false));
5346 }
5347 Ok(outpoint)
5348 },
5349 is_manual_broadcast)
5350 );
5351 }
5352 if let Err(ref e) = result {
5353 let e = format!("Error in transaction funding: {:?}", e);
5355 let mut channels_to_remove = Vec::new();
5356 channels_to_remove.extend(funding_batch_states.as_mut()
5357 .and_then(|states| states.remove(&txid))
5358 .into_iter().flatten()
5359 .map(|(chan_id, node_id, _state)| (chan_id, node_id))
5360 );
5361 channels_to_remove.extend(temporary_channels.iter()
5362 .map(|(&chan_id, &node_id)| (chan_id, node_id))
5363 );
5364 let mut shutdown_results = Vec::new();
5365 {
5366 let per_peer_state = self.per_peer_state.read().unwrap();
5367 for (channel_id, counterparty_node_id) in channels_to_remove {
5368 per_peer_state.get(&counterparty_node_id)
5369 .map(|peer_state_mutex| peer_state_mutex.lock().unwrap())
5370 .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id).map(|chan| (chan, peer_state)))
5371 .map(|(mut chan, mut peer_state)| {
5372 let closure_reason = ClosureReason::ProcessingError { err: e.clone() };
5373 let mut close_res = chan.context_mut().force_shutdown(false, closure_reason);
5374 locked_close_channel!(self, peer_state, chan.context(), close_res);
5375 shutdown_results.push(close_res);
5376 peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
5377 node_id: counterparty_node_id,
5378 action: msgs::ErrorAction::SendErrorMessage {
5379 msg: msgs::ErrorMessage {
5380 channel_id,
5381 data: "Failed to fund channel".to_owned(),
5382 }
5383 },
5384 });
5385 });
5386 }
5387 }
5388 mem::drop(funding_batch_states);
5389 for shutdown_result in shutdown_results.drain(..) {
5390 self.finish_close_channel(shutdown_result);
5391 }
5392 }
5393 result
5394 }
5395
5396 pub fn update_partial_channel_config(
5419 &self, counterparty_node_id: &PublicKey, channel_ids: &[ChannelId], config_update: &ChannelConfigUpdate,
5420 ) -> Result<(), APIError> {
5421 if config_update.cltv_expiry_delta.map(|delta| delta < MIN_CLTV_EXPIRY_DELTA).unwrap_or(false) {
5422 return Err(APIError::APIMisuseError {
5423 err: format!("The chosen CLTV expiry delta is below the minimum of {}", MIN_CLTV_EXPIRY_DELTA),
5424 });
5425 }
5426
5427 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5428 let per_peer_state = self.per_peer_state.read().unwrap();
5429 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
5430 .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
5431 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5432 let peer_state = &mut *peer_state_lock;
5433
5434 for channel_id in channel_ids {
5435 if !peer_state.has_channel(channel_id) {
5436 return Err(APIError::ChannelUnavailable {
5437 err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, counterparty_node_id),
5438 });
5439 };
5440 }
5441 for channel_id in channel_ids {
5442 if let Some(channel_phase) = peer_state.channel_by_id.get_mut(channel_id) {
5443 let mut config = channel_phase.context().config();
5444 config.apply(config_update);
5445 if !channel_phase.context_mut().update_config(&config) {
5446 continue;
5447 }
5448 if let ChannelPhase::Funded(channel) = channel_phase {
5449 if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
5450 let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
5451 pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
5452 } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
5453 peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
5454 node_id: channel.context.get_counterparty_node_id(),
5455 msg,
5456 });
5457 }
5458 }
5459 continue;
5460 } else {
5461 debug_assert!(false);
5463 return Err(APIError::ChannelUnavailable {
5464 err: format!(
5465 "Channel with ID {} for passed counterparty_node_id {} disappeared after we confirmed its existence - this should not be reachable!",
5466 channel_id, counterparty_node_id),
5467 });
5468 };
5469 }
5470 Ok(())
5471 }
5472
5473 pub fn update_channel_config(
5496 &self, counterparty_node_id: &PublicKey, channel_ids: &[ChannelId], config: &ChannelConfig,
5497 ) -> Result<(), APIError> {
5498 return self.update_partial_channel_config(counterparty_node_id, channel_ids, &(*config).into());
5499 }
5500
5501 pub fn forward_intercepted_htlc(&self, intercept_id: InterceptId, next_hop_channel_id: &ChannelId, next_node_id: PublicKey, amt_to_forward_msat: u64) -> Result<(), APIError> {
5527 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5528
5529 let next_hop_scid = {
5530 let peer_state_lock = self.per_peer_state.read().unwrap();
5531 let peer_state_mutex = peer_state_lock.get(&next_node_id)
5532 .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", next_node_id) })?;
5533 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5534 let peer_state = &mut *peer_state_lock;
5535 match peer_state.channel_by_id.get(next_hop_channel_id) {
5536 Some(ChannelPhase::Funded(chan)) => {
5537 if !chan.context.is_usable() {
5538 return Err(APIError::ChannelUnavailable {
5539 err: format!("Channel with id {} not fully established", next_hop_channel_id)
5540 })
5541 }
5542 chan.context.get_short_channel_id().unwrap_or(chan.context.outbound_scid_alias())
5543 },
5544 Some(_) => return Err(APIError::ChannelUnavailable {
5545 err: format!("Channel with id {} for the passed counterparty node_id {} is still opening.",
5546 next_hop_channel_id, next_node_id)
5547 }),
5548 None => {
5549 let error = format!("Channel with id {} not found for the passed counterparty node_id {}",
5550 next_hop_channel_id, next_node_id);
5551 let logger = WithContext::from(&self.logger, Some(next_node_id), Some(*next_hop_channel_id), None);
5552 log_error!(logger, "{} when attempting to forward intercepted HTLC", error);
5553 return Err(APIError::ChannelUnavailable {
5554 err: error
5555 })
5556 }
5557 }
5558 };
5559
5560 let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id)
5561 .ok_or_else(|| APIError::APIMisuseError {
5562 err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0))
5563 })?;
5564
5565 let routing = match payment.forward_info.routing {
5566 PendingHTLCRouting::Forward { onion_packet, blinded, incoming_cltv_expiry, .. } => {
5567 PendingHTLCRouting::Forward {
5568 onion_packet, blinded, incoming_cltv_expiry, short_channel_id: next_hop_scid,
5569 }
5570 },
5571 _ => unreachable!() };
5573 let skimmed_fee_msat =
5574 payment.forward_info.outgoing_amt_msat.saturating_sub(amt_to_forward_msat);
5575 let pending_htlc_info = PendingHTLCInfo {
5576 skimmed_fee_msat: if skimmed_fee_msat == 0 { None } else { Some(skimmed_fee_msat) },
5577 outgoing_amt_msat: amt_to_forward_msat, routing, ..payment.forward_info
5578 };
5579
5580 let mut per_source_pending_forward = [(
5581 payment.prev_short_channel_id,
5582 payment.prev_counterparty_node_id,
5583 payment.prev_funding_outpoint,
5584 payment.prev_channel_id,
5585 payment.prev_user_channel_id,
5586 vec![(pending_htlc_info, payment.prev_htlc_id)]
5587 )];
5588 self.forward_htlcs(&mut per_source_pending_forward);
5589 Ok(())
5590 }
5591
5592 pub fn fail_intercepted_htlc(&self, intercept_id: InterceptId) -> Result<(), APIError> {
5600 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5601
5602 let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id)
5603 .ok_or_else(|| APIError::APIMisuseError {
5604 err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0))
5605 })?;
5606
5607 if let PendingHTLCRouting::Forward { short_channel_id, incoming_cltv_expiry, .. } = payment.forward_info.routing {
5608 let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
5609 short_channel_id: payment.prev_short_channel_id,
5610 user_channel_id: Some(payment.prev_user_channel_id),
5611 outpoint: payment.prev_funding_outpoint,
5612 channel_id: payment.prev_channel_id,
5613 counterparty_node_id: payment.prev_counterparty_node_id,
5614 htlc_id: payment.prev_htlc_id,
5615 incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret,
5616 phantom_shared_secret: None,
5617 blinded_failure: payment.forward_info.routing.blinded_failure(),
5618 cltv_expiry: incoming_cltv_expiry,
5619 });
5620
5621 let failure_reason = HTLCFailReason::from_failure_code(0x4000 | 10);
5622 let destination = HTLCDestination::UnknownNextHop { requested_forward_scid: short_channel_id };
5623 self.fail_htlc_backwards_internal(&htlc_source, &payment.forward_info.payment_hash, &failure_reason, destination);
5624 } else { unreachable!() } Ok(())
5627 }
5628
5629 fn process_pending_update_add_htlcs(&self) {
5630 let mut decode_update_add_htlcs = new_hash_map();
5631 mem::swap(&mut decode_update_add_htlcs, &mut self.decode_update_add_htlcs.lock().unwrap());
5632
5633 let get_failed_htlc_destination = |outgoing_scid_opt: Option<u64>, payment_hash: PaymentHash| {
5634 if let Some(outgoing_scid) = outgoing_scid_opt {
5635 match self.short_to_chan_info.read().unwrap().get(&outgoing_scid) {
5636 Some((outgoing_counterparty_node_id, outgoing_channel_id)) =>
5637 HTLCDestination::NextHopChannel {
5638 node_id: Some(*outgoing_counterparty_node_id),
5639 channel_id: *outgoing_channel_id,
5640 },
5641 None => HTLCDestination::UnknownNextHop {
5642 requested_forward_scid: outgoing_scid,
5643 },
5644 }
5645 } else {
5646 HTLCDestination::FailedPayment { payment_hash }
5647 }
5648 };
5649
5650 'outer_loop: for (incoming_scid, update_add_htlcs) in decode_update_add_htlcs {
5651 let incoming_channel_details_opt = self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel<SP>| {
5652 let counterparty_node_id = chan.context.get_counterparty_node_id();
5653 let channel_id = chan.context.channel_id();
5654 let funding_txo = chan.context.get_funding_txo().unwrap();
5655 let user_channel_id = chan.context.get_user_id();
5656 let accept_underpaying_htlcs = chan.context.config().accept_underpaying_htlcs;
5657 (counterparty_node_id, channel_id, funding_txo, user_channel_id, accept_underpaying_htlcs)
5658 });
5659 let (
5660 incoming_counterparty_node_id, incoming_channel_id, incoming_funding_txo,
5661 incoming_user_channel_id, incoming_accept_underpaying_htlcs
5662 ) = if let Some(incoming_channel_details) = incoming_channel_details_opt {
5663 incoming_channel_details
5664 } else {
5665 continue;
5667 };
5668
5669 let mut htlc_forwards = Vec::new();
5670 let mut htlc_fails = Vec::new();
5671 for update_add_htlc in &update_add_htlcs {
5672 let (next_hop, shared_secret, next_packet_details_opt) = match decode_incoming_update_add_htlc_onion(
5673 &update_add_htlc, &*self.node_signer, &*self.logger, &self.secp_ctx
5674 ) {
5675 Ok(decoded_onion) => decoded_onion,
5676 Err(htlc_fail) => {
5677 htlc_fails.push((htlc_fail, HTLCDestination::InvalidOnion));
5678 continue;
5679 },
5680 };
5681
5682 let is_intro_node_blinded_forward = next_hop.is_intro_node_blinded_forward();
5683 let outgoing_scid_opt = next_packet_details_opt.as_ref().map(|d| d.outgoing_scid);
5684
5685 match self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel<SP>| {
5687 let logger = WithChannelContext::from(&self.logger, &chan.context, Some(update_add_htlc.payment_hash));
5688 chan.can_accept_incoming_htlc(
5689 update_add_htlc, &self.fee_estimator, &logger,
5690 )
5691 }) {
5692 Some(Ok(_)) => {},
5693 Some(Err((err, code))) => {
5694 let htlc_fail = self.htlc_failure_from_update_add_err(
5695 &update_add_htlc, &incoming_counterparty_node_id, err, code,
5696 is_intro_node_blinded_forward, &shared_secret,
5697 );
5698 let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
5699 htlc_fails.push((htlc_fail, htlc_destination));
5700 continue;
5701 },
5702 None => continue 'outer_loop,
5704 }
5705
5706 if let Some(next_packet_details) = next_packet_details_opt.as_ref() {
5708 if let Err((err, code)) = self.can_forward_htlc(
5709 &update_add_htlc, next_packet_details
5710 ) {
5711 let htlc_fail = self.htlc_failure_from_update_add_err(
5712 &update_add_htlc, &incoming_counterparty_node_id, err, code,
5713 is_intro_node_blinded_forward, &shared_secret,
5714 );
5715 let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
5716 htlc_fails.push((htlc_fail, htlc_destination));
5717 continue;
5718 }
5719 }
5720
5721 match self.construct_pending_htlc_status(
5722 &update_add_htlc, &incoming_counterparty_node_id, shared_secret, next_hop,
5723 incoming_accept_underpaying_htlcs, next_packet_details_opt.map(|d| d.next_packet_pubkey),
5724 ) {
5725 PendingHTLCStatus::Forward(htlc_forward) => {
5726 htlc_forwards.push((htlc_forward, update_add_htlc.htlc_id));
5727 },
5728 PendingHTLCStatus::Fail(htlc_fail) => {
5729 let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
5730 htlc_fails.push((htlc_fail, htlc_destination));
5731 },
5732 }
5733 }
5734
5735 let pending_forwards = (
5738 incoming_scid, Some(incoming_counterparty_node_id), incoming_funding_txo,
5739 incoming_channel_id, incoming_user_channel_id, htlc_forwards.drain(..).collect()
5740 );
5741 self.forward_htlcs_without_forward_event(&mut [pending_forwards]);
5742 for (htlc_fail, htlc_destination) in htlc_fails.drain(..) {
5743 let failure = match htlc_fail {
5744 HTLCFailureMsg::Relay(fail_htlc) => HTLCForwardInfo::FailHTLC {
5745 htlc_id: fail_htlc.htlc_id,
5746 err_packet: fail_htlc.reason,
5747 },
5748 HTLCFailureMsg::Malformed(fail_malformed_htlc) => HTLCForwardInfo::FailMalformedHTLC {
5749 htlc_id: fail_malformed_htlc.htlc_id,
5750 sha256_of_onion: fail_malformed_htlc.sha256_of_onion,
5751 failure_code: fail_malformed_htlc.failure_code,
5752 },
5753 };
5754 self.forward_htlcs.lock().unwrap().entry(incoming_scid).or_default().push(failure);
5755 self.pending_events.lock().unwrap().push_back((events::Event::HTLCHandlingFailed {
5756 prev_channel_id: incoming_channel_id,
5757 failed_next_destination: htlc_destination,
5758 }, None));
5759 }
5760 }
5761 }
5762
5763 pub fn process_pending_htlc_forwards(&self) {
5768 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5769
5770 self.process_pending_update_add_htlcs();
5771
5772 let mut new_events = VecDeque::new();
5773 let mut failed_forwards = Vec::new();
5774 let mut phantom_receives: Vec<(u64, Option<PublicKey>, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
5775 {
5776 let mut forward_htlcs = new_hash_map();
5777 mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
5778
5779 for (short_chan_id, mut pending_forwards) in forward_htlcs {
5780 if short_chan_id != 0 {
5781 let mut forwarding_counterparty = None;
5782 macro_rules! forwarding_channel_not_found {
5783 ($forward_infos: expr) => {
5784 for forward_info in $forward_infos {
5785 match forward_info {
5786 HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
5787 prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
5788 prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo {
5789 routing, incoming_shared_secret, payment_hash, outgoing_amt_msat,
5790 outgoing_cltv_value, ..
5791 }
5792 }) => {
5793 let cltv_expiry = routing.incoming_cltv_expiry();
5794 macro_rules! failure_handler {
5795 ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
5796 let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id), Some(payment_hash));
5797 log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
5798
5799 let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
5800 short_channel_id: prev_short_channel_id,
5801 user_channel_id: Some(prev_user_channel_id),
5802 channel_id: prev_channel_id,
5803 outpoint: prev_funding_outpoint,
5804 counterparty_node_id: prev_counterparty_node_id,
5805 htlc_id: prev_htlc_id,
5806 incoming_packet_shared_secret: incoming_shared_secret,
5807 phantom_shared_secret: $phantom_ss,
5808 blinded_failure: routing.blinded_failure(),
5809 cltv_expiry,
5810 });
5811
5812 let reason = if $next_hop_unknown {
5813 HTLCDestination::UnknownNextHop { requested_forward_scid: short_chan_id }
5814 } else {
5815 HTLCDestination::FailedPayment{ payment_hash }
5816 };
5817
5818 failed_forwards.push((htlc_source, payment_hash,
5819 HTLCFailReason::reason($err_code, $err_data),
5820 reason
5821 ));
5822 continue;
5823 }
5824 }
5825 macro_rules! fail_forward {
5826 ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
5827 {
5828 failure_handler!($msg, $err_code, $err_data, $phantom_ss, true);
5829 }
5830 }
5831 }
5832 macro_rules! failed_payment {
5833 ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
5834 {
5835 failure_handler!($msg, $err_code, $err_data, $phantom_ss, false);
5836 }
5837 }
5838 }
5839 if let PendingHTLCRouting::Forward { ref onion_packet, .. } = routing {
5840 let phantom_pubkey_res = self.node_signer.get_node_id(Recipient::PhantomNode);
5841 if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.chain_hash) {
5842 let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes();
5843 let next_hop = match onion_utils::decode_next_payment_hop(
5844 phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac,
5845 payment_hash, None, &*self.node_signer
5846 ) {
5847 Ok(res) => res,
5848 Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
5849 let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).to_byte_array();
5850 failed_payment!(err_msg, err_code, sha256_of_onion.to_vec(), None);
5855 },
5856 Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
5857 failed_payment!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
5858 },
5859 };
5860 match next_hop {
5861 onion_utils::Hop::Receive(hop_data) => {
5862 let current_height: u32 = self.best_block.read().unwrap().height;
5863 match create_recv_pending_htlc_info(hop_data,
5864 incoming_shared_secret, payment_hash, outgoing_amt_msat,
5865 outgoing_cltv_value, Some(phantom_shared_secret), false, None,
5866 current_height)
5867 {
5868 Ok(info) => phantom_receives.push((
5869 prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint,
5870 prev_channel_id, prev_user_channel_id, vec![(info, prev_htlc_id)]
5871 )),
5872 Err(InboundHTLCErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
5873 }
5874 },
5875 _ => panic!(),
5876 }
5877 } else {
5878 fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
5879 }
5880 } else {
5881 fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
5882 }
5883 },
5884 HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => {
5885 }
5890 }
5891 }
5892 }
5893 }
5894 let chan_info_opt = self.short_to_chan_info.read().unwrap().get(&short_chan_id).cloned();
5895 let (counterparty_node_id, forward_chan_id) = match chan_info_opt {
5896 Some((cp_id, chan_id)) => (cp_id, chan_id),
5897 None => {
5898 forwarding_channel_not_found!(pending_forwards.drain(..));
5899 continue;
5900 }
5901 };
5902 forwarding_counterparty = Some(counterparty_node_id);
5903 let per_peer_state = self.per_peer_state.read().unwrap();
5904 let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
5905 if peer_state_mutex_opt.is_none() {
5906 forwarding_channel_not_found!(pending_forwards.drain(..));
5907 continue;
5908 }
5909 let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
5910 let peer_state = &mut *peer_state_lock;
5911 let mut draining_pending_forwards = pending_forwards.drain(..);
5912 while let Some(forward_info) = draining_pending_forwards.next() {
5913 let queue_fail_htlc_res = match forward_info {
5914 HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
5915 prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
5916 prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo {
5917 incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
5918 routing: PendingHTLCRouting::Forward {
5919 ref onion_packet, blinded, incoming_cltv_expiry, ..
5920 }, skimmed_fee_msat, ..
5921 },
5922 }) => {
5923 let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
5924 short_channel_id: prev_short_channel_id,
5925 user_channel_id: Some(prev_user_channel_id),
5926 counterparty_node_id: prev_counterparty_node_id,
5927 channel_id: prev_channel_id,
5928 outpoint: prev_funding_outpoint,
5929 htlc_id: prev_htlc_id,
5930 incoming_packet_shared_secret: incoming_shared_secret,
5931 phantom_shared_secret: None,
5933 blinded_failure: blinded.map(|b| b.failure),
5934 cltv_expiry: incoming_cltv_expiry,
5935 });
5936 let next_blinding_point = blinded.and_then(|b| {
5937 b.next_blinding_override.or_else(|| {
5938 let encrypted_tlvs_ss = self.node_signer.ecdh(
5939 Recipient::Node, &b.inbound_blinding_point, None
5940 ).unwrap().secret_bytes();
5941 onion_utils::next_hop_pubkey(
5942 &self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss
5943 ).ok()
5944 })
5945 });
5946
5947 let maybe_optimal_channel = peer_state.channel_by_id.values_mut().filter_map(|phase| match phase {
5952 ChannelPhase::Funded(chan) => {
5953 let balances = chan.context.get_available_balances(&self.fee_estimator);
5954 if outgoing_amt_msat <= balances.next_outbound_htlc_limit_msat &&
5955 outgoing_amt_msat >= balances.next_outbound_htlc_minimum_msat &&
5956 chan.context.is_usable() {
5957 Some((chan, balances))
5958 } else {
5959 None
5960 }
5961 },
5962 _ => None,
5963 }).min_by_key(|(_, balances)| balances.next_outbound_htlc_limit_msat).map(|(c, _)| c);
5964 let optimal_channel = match maybe_optimal_channel {
5965 Some(chan) => chan,
5966 None => {
5967 if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
5969 chan
5970 } else {
5971 forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
5972 break;
5973 }
5974 }
5975 };
5976
5977 let logger = WithChannelContext::from(&self.logger, &optimal_channel.context, Some(payment_hash));
5978 let channel_description = if optimal_channel.context.get_short_channel_id() == Some(short_chan_id) {
5979 "specified"
5980 } else {
5981 "alternate"
5982 };
5983 log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} channel {} with corresponding peer {}",
5984 prev_short_channel_id, &payment_hash, short_chan_id, channel_description, optimal_channel.context.channel_id(), &counterparty_node_id);
5985 if let Err(e) = optimal_channel.queue_add_htlc(outgoing_amt_msat,
5986 payment_hash, outgoing_cltv_value, htlc_source.clone(),
5987 onion_packet.clone(), skimmed_fee_msat, next_blinding_point, &self.fee_estimator,
5988 &&logger)
5989 {
5990 if let ChannelError::Ignore(msg) = e {
5991 log_trace!(logger, "Failed to forward HTLC with payment_hash {} to peer {}: {}", &payment_hash, &counterparty_node_id, msg);
5992 } else {
5993 panic!("Stated return value requirements in send_htlc() were not met");
5994 }
5995
5996 if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
5997 let failure_code = 0x1000|7;
5998 let data = self.get_htlc_inbound_temp_fail_data(failure_code);
5999 failed_forwards.push((htlc_source, payment_hash,
6000 HTLCFailReason::reason(failure_code, data),
6001 HTLCDestination::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id }
6002 ));
6003 } else {
6004 forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
6005 break;
6006 }
6007 }
6008 None
6009 },
6010 HTLCForwardInfo::AddHTLC { .. } => {
6011 panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
6012 },
6013 HTLCForwardInfo::FailHTLC { htlc_id, ref err_packet } => {
6014 if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
6015 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
6016 log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
6017 Some((chan.queue_fail_htlc(htlc_id, err_packet.clone(), &&logger), htlc_id))
6018 } else {
6019 forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
6020 break;
6021 }
6022 },
6023 HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
6024 if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
6025 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
6026 log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
6027 let res = chan.queue_fail_malformed_htlc(
6028 htlc_id, failure_code, sha256_of_onion, &&logger
6029 );
6030 Some((res, htlc_id))
6031 } else {
6032 forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
6033 break;
6034 }
6035 },
6036 };
6037 if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
6038 if let Err(e) = queue_fail_htlc_res {
6039 if let ChannelError::Ignore(msg) = e {
6040 if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
6041 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
6042 log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
6043 }
6044 } else {
6045 panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
6046 }
6047 }
6051 }
6052 }
6053 } else {
6054 'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
6055 match forward_info {
6056 HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
6057 prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
6058 prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo {
6059 routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat,
6060 skimmed_fee_msat, ..
6061 }
6062 }) => {
6063 let blinded_failure = routing.blinded_failure();
6064 let (
6065 cltv_expiry, onion_payload, payment_data, payment_context, phantom_shared_secret,
6066 mut onion_fields, has_recipient_created_payment_secret
6067 ) = match routing {
6068 PendingHTLCRouting::Receive {
6069 payment_data, payment_metadata, payment_context,
6070 incoming_cltv_expiry, phantom_shared_secret, custom_tlvs,
6071 requires_blinded_error: _
6072 } => {
6073 let _legacy_hop_data = Some(payment_data.clone());
6074 let onion_fields = RecipientOnionFields { payment_secret: Some(payment_data.payment_secret),
6075 payment_metadata, custom_tlvs };
6076 (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data },
6077 Some(payment_data), payment_context, phantom_shared_secret, onion_fields,
6078 true)
6079 },
6080 PendingHTLCRouting::ReceiveKeysend {
6081 payment_data, payment_preimage, payment_metadata,
6082 incoming_cltv_expiry, custom_tlvs, requires_blinded_error: _,
6083 has_recipient_created_payment_secret,
6084 } => {
6085 let onion_fields = RecipientOnionFields {
6086 payment_secret: payment_data.as_ref().map(|data| data.payment_secret),
6087 payment_metadata,
6088 custom_tlvs,
6089 };
6090 (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage),
6091 payment_data, None, None, onion_fields, has_recipient_created_payment_secret)
6092 },
6093 _ => {
6094 panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
6095 }
6096 };
6097 let claimable_htlc = ClaimableHTLC {
6098 prev_hop: HTLCPreviousHopData {
6099 short_channel_id: prev_short_channel_id,
6100 user_channel_id: Some(prev_user_channel_id),
6101 counterparty_node_id: prev_counterparty_node_id,
6102 channel_id: prev_channel_id,
6103 outpoint: prev_funding_outpoint,
6104 htlc_id: prev_htlc_id,
6105 incoming_packet_shared_secret: incoming_shared_secret,
6106 phantom_shared_secret,
6107 blinded_failure,
6108 cltv_expiry: Some(cltv_expiry),
6109 },
6110 value: incoming_amt_msat.unwrap_or(outgoing_amt_msat),
6114 sender_intended_value: outgoing_amt_msat,
6115 timer_ticks: 0,
6116 total_value_received: None,
6117 total_msat: if let Some(data) = &payment_data { data.total_msat } else { outgoing_amt_msat },
6118 cltv_expiry,
6119 onion_payload,
6120 counterparty_skimmed_fee_msat: skimmed_fee_msat,
6121 };
6122
6123 let mut committed_to_claimable = false;
6124
6125 macro_rules! fail_htlc {
6126 ($htlc: expr, $payment_hash: expr) => {
6127 debug_assert!(!committed_to_claimable);
6128 let mut htlc_msat_height_data = $htlc.value.to_be_bytes().to_vec();
6129 htlc_msat_height_data.extend_from_slice(
6130 &self.best_block.read().unwrap().height.to_be_bytes(),
6131 );
6132 failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
6133 short_channel_id: $htlc.prev_hop.short_channel_id,
6134 user_channel_id: $htlc.prev_hop.user_channel_id,
6135 counterparty_node_id: $htlc.prev_hop.counterparty_node_id,
6136 channel_id: prev_channel_id,
6137 outpoint: prev_funding_outpoint,
6138 htlc_id: $htlc.prev_hop.htlc_id,
6139 incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
6140 phantom_shared_secret,
6141 blinded_failure,
6142 cltv_expiry: Some(cltv_expiry),
6143 }), payment_hash,
6144 HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data),
6145 HTLCDestination::FailedPayment { payment_hash: $payment_hash },
6146 ));
6147 continue 'next_forwardable_htlc;
6148 }
6149 }
6150 let phantom_shared_secret = claimable_htlc.prev_hop.phantom_shared_secret;
6151 let mut receiver_node_id = self.our_network_pubkey;
6152 if phantom_shared_secret.is_some() {
6153 receiver_node_id = self.node_signer.get_node_id(Recipient::PhantomNode)
6154 .expect("Failed to get node_id for phantom node recipient");
6155 }
6156
6157 macro_rules! check_total_value {
6158 ($purpose: expr) => {{
6159 let mut payment_claimable_generated = false;
6160 let is_keysend = $purpose.is_keysend();
6161 let mut claimable_payments = self.claimable_payments.lock().unwrap();
6162 if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
6163 fail_htlc!(claimable_htlc, payment_hash);
6164 }
6165 let ref mut claimable_payment = claimable_payments.claimable_payments
6166 .entry(payment_hash)
6167 .or_insert_with(|| {
6169 committed_to_claimable = true;
6170 ClaimablePayment {
6171 purpose: $purpose.clone(), htlcs: Vec::new(), onion_fields: None,
6172 }
6173 });
6174 if $purpose != claimable_payment.purpose {
6175 let log_keysend = |keysend| if keysend { "keysend" } else { "non-keysend" };
6176 log_trace!(self.logger, "Failing new {} HTLC with payment_hash {} as we already had an existing {} HTLC with the same payment hash", log_keysend(is_keysend), &payment_hash, log_keysend(!is_keysend));
6177 fail_htlc!(claimable_htlc, payment_hash);
6178 }
6179 if let Some(earlier_fields) = &mut claimable_payment.onion_fields {
6180 if earlier_fields.check_merge(&mut onion_fields).is_err() {
6181 fail_htlc!(claimable_htlc, payment_hash);
6182 }
6183 } else {
6184 claimable_payment.onion_fields = Some(onion_fields);
6185 }
6186 let mut total_value = claimable_htlc.sender_intended_value;
6187 let mut earliest_expiry = claimable_htlc.cltv_expiry;
6188 for htlc in claimable_payment.htlcs.iter() {
6189 total_value += htlc.sender_intended_value;
6190 earliest_expiry = cmp::min(earliest_expiry, htlc.cltv_expiry);
6191 if htlc.total_msat != claimable_htlc.total_msat {
6192 log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
6193 &payment_hash, claimable_htlc.total_msat, htlc.total_msat);
6194 total_value = msgs::MAX_VALUE_MSAT;
6195 }
6196 if total_value >= msgs::MAX_VALUE_MSAT { break; }
6197 }
6198 if total_value >= msgs::MAX_VALUE_MSAT {
6201 fail_htlc!(claimable_htlc, payment_hash);
6202 } else if total_value - claimable_htlc.sender_intended_value >= claimable_htlc.total_msat {
6203 log_trace!(self.logger, "Failing HTLC with payment_hash {} as payment is already claimable",
6204 &payment_hash);
6205 fail_htlc!(claimable_htlc, payment_hash);
6206 } else if total_value >= claimable_htlc.total_msat {
6207 #[allow(unused_assignments)] {
6208 committed_to_claimable = true;
6209 }
6210 claimable_payment.htlcs.push(claimable_htlc);
6211 let amount_msat =
6212 claimable_payment.htlcs.iter().map(|htlc| htlc.value).sum();
6213 claimable_payment.htlcs.iter_mut()
6214 .for_each(|htlc| htlc.total_value_received = Some(amount_msat));
6215 let counterparty_skimmed_fee_msat = claimable_payment.htlcs.iter()
6216 .map(|htlc| htlc.counterparty_skimmed_fee_msat.unwrap_or(0)).sum();
6217 debug_assert!(total_value.saturating_sub(amount_msat) <=
6218 counterparty_skimmed_fee_msat);
6219 claimable_payment.htlcs.sort();
6220 let payment_id =
6221 claimable_payment.inbound_payment_id(&self.inbound_payment_id_secret);
6222 new_events.push_back((events::Event::PaymentClaimable {
6223 receiver_node_id: Some(receiver_node_id),
6224 payment_hash,
6225 purpose: $purpose,
6226 amount_msat,
6227 counterparty_skimmed_fee_msat,
6228 via_channel_id: Some(prev_channel_id),
6229 via_user_channel_id: Some(prev_user_channel_id),
6230 claim_deadline: Some(earliest_expiry - HTLC_FAIL_BACK_BUFFER),
6231 onion_fields: claimable_payment.onion_fields.clone(),
6232 payment_id: Some(payment_id),
6233 }, None));
6234 payment_claimable_generated = true;
6235 } else {
6236 claimable_payment.htlcs.push(claimable_htlc);
6240 #[allow(unused_assignments)] {
6241 committed_to_claimable = true;
6242 }
6243 }
6244 payment_claimable_generated
6245 }}
6246 }
6247
6248 let payment_preimage = if has_recipient_created_payment_secret {
6255 if let Some(ref payment_data) = payment_data {
6256 let (payment_preimage, min_final_cltv_expiry_delta) = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) {
6257 Ok(result) => result,
6258 Err(()) => {
6259 log_trace!(self.logger, "Failing new HTLC with payment_hash {} as payment verification failed", &payment_hash);
6260 fail_htlc!(claimable_htlc, payment_hash);
6261 }
6262 };
6263 if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta {
6264 let expected_min_expiry_height = (self.current_best_block().height + min_final_cltv_expiry_delta as u32) as u64;
6265 if (cltv_expiry as u64) < expected_min_expiry_height {
6266 log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})",
6267 &payment_hash, cltv_expiry, expected_min_expiry_height);
6268 fail_htlc!(claimable_htlc, payment_hash);
6269 }
6270 }
6271 payment_preimage
6272 } else { fail_htlc!(claimable_htlc, payment_hash); }
6273 } else { None };
6274 match claimable_htlc.onion_payload {
6275 OnionPayload::Invoice { .. } => {
6276 let payment_data = payment_data.unwrap();
6277 let purpose = events::PaymentPurpose::from_parts(
6278 payment_preimage,
6279 payment_data.payment_secret,
6280 payment_context,
6281 );
6282 check_total_value!(purpose);
6283 },
6284 OnionPayload::Spontaneous(preimage) => {
6285 let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
6286 check_total_value!(purpose);
6287 }
6288 }
6289 },
6290 HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => {
6291 panic!("Got pending fail of our own HTLC");
6292 }
6293 }
6294 }
6295 }
6296 }
6297 }
6298
6299 let best_block_height = self.best_block.read().unwrap().height;
6300 self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(),
6301 || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
6302 &self.pending_events, &self.logger, |args| self.send_payment_along_path(args));
6303
6304 for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
6305 self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
6306 }
6307 self.forward_htlcs(&mut phantom_receives);
6308
6309 self.check_free_holding_cells();
6314
6315 if new_events.is_empty() { return }
6316 let mut events = self.pending_events.lock().unwrap();
6317 events.append(&mut new_events);
6318 }
6319
6320 fn process_background_events(&self) -> NotifyOption {
6324 debug_assert_ne!(self.total_consistency_lock.held_by_thread(), LockHeldState::NotHeldByThread);
6325
6326 self.background_events_processed_since_startup.store(true, Ordering::Release);
6327
6328 let mut background_events = Vec::new();
6329 mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events);
6330 if background_events.is_empty() {
6331 return NotifyOption::SkipPersistNoEvents;
6332 }
6333
6334 for event in background_events.drain(..) {
6335 match event {
6336 BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, _channel_id, update)) => {
6337 let _ = self.chain_monitor.update_channel(funding_txo, &update);
6340 },
6341 BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
6342 self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
6343 },
6344 BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
6345 let per_peer_state = self.per_peer_state.read().unwrap();
6346 if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
6347 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6348 let peer_state = &mut *peer_state_lock;
6349 if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
6350 handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
6351 } else {
6352 let update_actions = peer_state.monitor_update_blocked_actions
6353 .remove(&channel_id).unwrap_or(Vec::new());
6354 mem::drop(peer_state_lock);
6355 mem::drop(per_peer_state);
6356 self.handle_monitor_update_completion_actions(update_actions);
6357 }
6358 }
6359 },
6360 }
6361 }
6362 NotifyOption::DoPersist
6363 }
6364
6365 #[cfg(any(test, feature = "_test_utils"))]
6366 pub fn test_process_background_events(&self) {
6368 let _lck = self.total_consistency_lock.read().unwrap();
6369 let _ = self.process_background_events();
6370 }
6371
6372 fn update_channel_fee(&self, chan_id: &ChannelId, chan: &mut Channel<SP>, new_feerate: u32) -> NotifyOption {
6373 if !chan.context.is_outbound() { return NotifyOption::SkipPersistNoEvents; }
6374
6375 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
6376
6377 if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
6379 return NotifyOption::SkipPersistNoEvents;
6380 }
6381 if !chan.context.is_live() {
6382 log_trace!(logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
6383 chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
6384 return NotifyOption::SkipPersistNoEvents;
6385 }
6386 log_trace!(logger, "Channel {} qualifies for a feerate change from {} to {}.",
6387 &chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
6388
6389 chan.queue_update_fee(new_feerate, &self.fee_estimator, &&logger);
6390 NotifyOption::DoPersist
6391 }
6392
6393 #[cfg(fuzzing)]
6394 pub fn maybe_update_chan_fees(&self) {
6399 PersistenceNotifierGuard::optionally_notify(self, || {
6400 let mut should_persist = NotifyOption::SkipPersistNoEvents;
6401
6402 let non_anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6403 let anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee);
6404
6405 let per_peer_state = self.per_peer_state.read().unwrap();
6406 for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
6407 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6408 let peer_state = &mut *peer_state_lock;
6409 for (chan_id, chan) in peer_state.channel_by_id.iter_mut().filter_map(
6410 |(chan_id, phase)| if let ChannelPhase::Funded(chan) = phase { Some((chan_id, chan)) } else { None }
6411 ) {
6412 let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6413 anchor_feerate
6414 } else {
6415 non_anchor_feerate
6416 };
6417 let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
6418 if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
6419 }
6420 }
6421
6422 should_persist
6423 });
6424 }
6425
6426 pub fn timer_tick_occurred(&self) {
6448 PersistenceNotifierGuard::optionally_notify(self, || {
6449 let mut should_persist = NotifyOption::SkipPersistNoEvents;
6450
6451 let non_anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6452 let anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee);
6453
6454 let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
6455 let mut timed_out_mpp_htlcs = Vec::new();
6456 let mut pending_peers_awaiting_removal = Vec::new();
6457 let mut shutdown_channels = Vec::new();
6458
6459 macro_rules! process_unfunded_channel_tick {
6460 ($peer_state: expr, $chan: expr, $pending_msg_events: expr) => { {
6461 let context = &mut $chan.context;
6462 context.maybe_expire_prev_config();
6463 if $chan.unfunded_context.should_expire_unfunded_channel() {
6464 let logger = WithChannelContext::from(&self.logger, context, None);
6465 log_error!(logger,
6466 "Force-closing pending channel with ID {} for not establishing in a timely manner",
6467 context.channel_id());
6468 let mut close_res = context.force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) });
6469 locked_close_channel!(self, $peer_state, context, close_res);
6470 shutdown_channels.push(close_res);
6471 $pending_msg_events.push(MessageSendEvent::HandleError {
6472 node_id: context.get_counterparty_node_id(),
6473 action: msgs::ErrorAction::SendErrorMessage {
6474 msg: msgs::ErrorMessage {
6475 channel_id: context.channel_id(),
6476 data: "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(),
6477 },
6478 },
6479 });
6480 false
6481 } else {
6482 true
6483 }
6484 } }
6485 }
6486
6487 {
6488 let per_peer_state = self.per_peer_state.read().unwrap();
6489 for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
6490 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6491 let peer_state = &mut *peer_state_lock;
6492 let pending_msg_events = &mut peer_state.pending_msg_events;
6493 let counterparty_node_id = *counterparty_node_id;
6494 peer_state.channel_by_id.retain(|chan_id, phase| {
6495 match phase {
6496 ChannelPhase::Funded(chan) => {
6497 let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6498 anchor_feerate
6499 } else {
6500 non_anchor_feerate
6501 };
6502 let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
6503 if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
6504
6505 if let Err(e) = chan.timer_check_closing_negotiation_progress() {
6506 let (needs_close, err) = convert_chan_phase_err!(self, peer_state, e, chan, chan_id, FUNDED_CHANNEL);
6507 handle_errors.push((Err(err), counterparty_node_id));
6508 if needs_close { return false; }
6509 }
6510
6511 match chan.channel_update_status() {
6512 ChannelUpdateStatus::Enabled if !chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)),
6513 ChannelUpdateStatus::Disabled if chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)),
6514 ChannelUpdateStatus::DisabledStaged(_) if chan.context.is_live()
6515 => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
6516 ChannelUpdateStatus::EnabledStaged(_) if !chan.context.is_live()
6517 => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
6518 ChannelUpdateStatus::DisabledStaged(mut n) if !chan.context.is_live() => {
6519 n += 1;
6520 if n >= DISABLE_GOSSIP_TICKS {
6521 chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
6522 if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
6523 let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
6524 pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
6525 msg: update
6526 });
6527 }
6528 should_persist = NotifyOption::DoPersist;
6529 } else {
6530 chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(n));
6531 }
6532 },
6533 ChannelUpdateStatus::EnabledStaged(mut n) if chan.context.is_live() => {
6534 n += 1;
6535 if n >= ENABLE_GOSSIP_TICKS {
6536 chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
6537 if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
6538 let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
6539 pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
6540 msg: update
6541 });
6542 }
6543 should_persist = NotifyOption::DoPersist;
6544 } else {
6545 chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(n));
6546 }
6547 },
6548 _ => {},
6549 }
6550
6551 chan.context.maybe_expire_prev_config();
6552
6553 if chan.should_disconnect_peer_awaiting_response() {
6554 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
6555 log_debug!(logger, "Disconnecting peer {} due to not making any progress on channel {}",
6556 counterparty_node_id, chan_id);
6557 pending_msg_events.push(MessageSendEvent::HandleError {
6558 node_id: counterparty_node_id,
6559 action: msgs::ErrorAction::DisconnectPeerWithWarning {
6560 msg: msgs::WarningMessage {
6561 channel_id: *chan_id,
6562 data: "Disconnecting due to timeout awaiting response".to_owned(),
6563 },
6564 },
6565 });
6566 }
6567
6568 true
6569 },
6570 ChannelPhase::UnfundedInboundV1(chan) => {
6571 process_unfunded_channel_tick!(peer_state, chan, pending_msg_events)
6572 },
6573 ChannelPhase::UnfundedOutboundV1(chan) => {
6574 process_unfunded_channel_tick!(peer_state, chan, pending_msg_events)
6575 },
6576 ChannelPhase::UnfundedInboundV2(chan) => {
6577 process_unfunded_channel_tick!(peer_state, chan, pending_msg_events)
6578 },
6579 ChannelPhase::UnfundedOutboundV2(chan) => {
6580 process_unfunded_channel_tick!(peer_state, chan, pending_msg_events)
6581 },
6582 }
6583 });
6584
6585 for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() {
6586 if { req.ticks_remaining -= 1 ; req.ticks_remaining } <= 0 {
6587 let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*chan_id), None);
6588 log_error!(logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id);
6589 peer_state.pending_msg_events.push(
6590 events::MessageSendEvent::HandleError {
6591 node_id: counterparty_node_id,
6592 action: msgs::ErrorAction::SendErrorMessage {
6593 msg: msgs::ErrorMessage { channel_id: chan_id.clone(), data: "Channel force-closed".to_owned() }
6594 },
6595 }
6596 );
6597 }
6598 }
6599 peer_state.inbound_channel_request_by_id.retain(|_, req| req.ticks_remaining > 0);
6600
6601 if peer_state.ok_to_remove(true) {
6602 pending_peers_awaiting_removal.push(counterparty_node_id);
6603 }
6604 }
6605 }
6606
6607 if pending_peers_awaiting_removal.len() > 0 {
6615 let mut per_peer_state = self.per_peer_state.write().unwrap();
6616 for counterparty_node_id in pending_peers_awaiting_removal {
6617 match per_peer_state.entry(counterparty_node_id) {
6618 hash_map::Entry::Occupied(entry) => {
6619 let remove_entry = {
6622 let peer_state = entry.get().lock().unwrap();
6623 peer_state.ok_to_remove(true)
6624 };
6625 if remove_entry {
6626 entry.remove_entry();
6627 }
6628 },
6629 hash_map::Entry::Vacant(_) => { }
6630 }
6631 }
6632 }
6633
6634 self.claimable_payments.lock().unwrap().claimable_payments.retain(|payment_hash, payment| {
6635 if payment.htlcs.is_empty() {
6636 debug_assert!(false);
6638 return false;
6639 }
6640 if let OnionPayload::Invoice { .. } = payment.htlcs[0].onion_payload {
6641 if payment.htlcs[0].total_msat <= payment.htlcs.iter()
6646 .fold(0, |total, htlc| total + htlc.sender_intended_value)
6647 {
6648 return true;
6649 } else if payment.htlcs.iter_mut().any(|htlc| {
6650 htlc.timer_ticks += 1;
6651 return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
6652 }) {
6653 timed_out_mpp_htlcs.extend(payment.htlcs.drain(..)
6654 .map(|htlc: ClaimableHTLC| (htlc.prev_hop, *payment_hash)));
6655 return false;
6656 }
6657 }
6658 true
6659 });
6660
6661 for htlc_source in timed_out_mpp_htlcs.drain(..) {
6662 let source = HTLCSource::PreviousHopData(htlc_source.0.clone());
6663 let reason = HTLCFailReason::from_failure_code(23);
6664 let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
6665 self.fail_htlc_backwards_internal(&source, &htlc_source.1, &reason, receiver);
6666 }
6667
6668 for (err, counterparty_node_id) in handle_errors.drain(..) {
6669 let _ = handle_error!(self, err, counterparty_node_id);
6670 }
6671
6672 for shutdown_res in shutdown_channels {
6673 self.finish_close_channel(shutdown_res);
6674 }
6675
6676 #[cfg(feature = "std")]
6677 let duration_since_epoch = std::time::SystemTime::now()
6678 .duration_since(std::time::SystemTime::UNIX_EPOCH)
6679 .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
6680 #[cfg(not(feature = "std"))]
6681 let duration_since_epoch = Duration::from_secs(
6682 self.highest_seen_timestamp.load(Ordering::Acquire).saturating_sub(7200) as u64
6683 );
6684
6685 self.pending_outbound_payments.remove_stale_payments(
6686 duration_since_epoch, &self.pending_events
6687 );
6688
6689 if self.check_free_holding_cells() {
6693 should_persist = NotifyOption::DoPersist;
6694 }
6695
6696 should_persist
6697 });
6698 }
6699
6700 pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
6714 self.fail_htlc_backwards_with_reason(payment_hash, FailureCode::IncorrectOrUnknownPaymentDetails);
6715 }
6716
6717 pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: FailureCode) {
6722 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
6723
6724 let removed_source = self.claimable_payments.lock().unwrap().claimable_payments.remove(payment_hash);
6725 if let Some(payment) = removed_source {
6726 for htlc in payment.htlcs {
6727 let reason = self.get_htlc_fail_reason_from_failure_code(failure_code, &htlc);
6728 let source = HTLCSource::PreviousHopData(htlc.prev_hop);
6729 let receiver = HTLCDestination::FailedPayment { payment_hash: *payment_hash };
6730 self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
6731 }
6732 }
6733 }
6734
6735 fn get_htlc_fail_reason_from_failure_code(&self, failure_code: FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason {
6737 match failure_code {
6738 FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(failure_code.into()),
6739 FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code.into()),
6740 FailureCode::IncorrectOrUnknownPaymentDetails => {
6741 let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
6742 htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes());
6743 HTLCFailReason::reason(failure_code.into(), htlc_msat_height_data)
6744 },
6745 FailureCode::InvalidOnionPayload(data) => {
6746 let fail_data = match data {
6747 Some((typ, offset)) => [BigSize(typ).encode(), offset.encode()].concat(),
6748 None => Vec::new(),
6749 };
6750 HTLCFailReason::reason(failure_code.into(), fail_data)
6751 }
6752 }
6753 }
6754
6755 fn get_htlc_inbound_temp_fail_data(&self, err_code: u16) -> Vec<u8> {
6761 debug_assert_eq!(err_code & 0x1000, 0x1000);
6762 debug_assert_ne!(err_code, 0x1000|11);
6763 debug_assert_ne!(err_code, 0x1000|12);
6764 debug_assert_ne!(err_code, 0x1000|13);
6765 let mut enc = VecWriter(Vec::with_capacity(4));
6767 if err_code == 0x1000 | 20 {
6768 0u16.write(&mut enc).expect("Writes cannot fail");
6771 }
6772 (0u16).write(&mut enc).expect("Writes cannot fail");
6774 enc.0
6775 }
6776
6777 fn fail_holding_cell_htlcs(
6781 &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: ChannelId,
6782 counterparty_node_id: &PublicKey
6783 ) {
6784 let (failure_code, onion_failure_data) = {
6785 let per_peer_state = self.per_peer_state.read().unwrap();
6786 if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
6787 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6788 let peer_state = &mut *peer_state_lock;
6789 match peer_state.channel_by_id.entry(channel_id) {
6790 hash_map::Entry::Occupied(chan_phase_entry) => {
6791 if let ChannelPhase::Funded(_chan) = chan_phase_entry.get() {
6792 let failure_code = 0x1000|7;
6793 let data = self.get_htlc_inbound_temp_fail_data(failure_code);
6794 (failure_code, data)
6795 } else {
6796 debug_assert!(false);
6798 (0x4000|10, Vec::new())
6799 }
6800 },
6801 hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
6802 }
6803 } else { (0x4000|10, Vec::new()) }
6804 };
6805
6806 for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
6807 let reason = HTLCFailReason::reason(failure_code, onion_failure_data.clone());
6808 let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id };
6809 self.fail_htlc_backwards_internal(&htlc_src, &payment_hash, &reason, receiver);
6810 }
6811 }
6812
6813 fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
6814 let push_forward_event = self.fail_htlc_backwards_internal_without_forward_event(source, payment_hash, onion_error, destination);
6815 if push_forward_event { self.push_pending_forwards_ev(); }
6816 }
6817
6818 fn fail_htlc_backwards_internal_without_forward_event(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) -> bool {
6821 #[cfg(debug_assertions)]
6826 for (_, peer) in self.per_peer_state.read().unwrap().iter() {
6827 debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
6828 }
6829
6830 let mut push_forward_event;
6839 match source {
6840 HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => {
6841 push_forward_event = self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path,
6842 session_priv, payment_id, self.probing_cookie_secret, &self.secp_ctx,
6843 &self.pending_events, &self.logger);
6844 },
6845 HTLCSource::PreviousHopData(HTLCPreviousHopData {
6846 ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret,
6847 ref phantom_shared_secret, outpoint: _, ref blinded_failure, ref channel_id, ..
6848 }) => {
6849 log_trace!(
6850 WithContext::from(&self.logger, None, Some(*channel_id), Some(*payment_hash)),
6851 "Failing {}HTLC with payment_hash {} backwards from us: {:?}",
6852 if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error
6853 );
6854 let failure = match blinded_failure {
6855 Some(BlindedFailure::FromIntroductionNode) => {
6856 let blinded_onion_error = HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32]);
6857 let err_packet = blinded_onion_error.get_encrypted_failure_packet(
6858 incoming_packet_shared_secret, phantom_shared_secret
6859 );
6860 HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }
6861 },
6862 Some(BlindedFailure::FromBlindedNode) => {
6863 HTLCForwardInfo::FailMalformedHTLC {
6864 htlc_id: *htlc_id,
6865 failure_code: INVALID_ONION_BLINDING,
6866 sha256_of_onion: [0; 32]
6867 }
6868 },
6869 None => {
6870 let err_packet = onion_error.get_encrypted_failure_packet(
6871 incoming_packet_shared_secret, phantom_shared_secret
6872 );
6873 HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }
6874 }
6875 };
6876
6877 push_forward_event = self.decode_update_add_htlcs.lock().unwrap().is_empty();
6878 let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
6879 push_forward_event &= forward_htlcs.is_empty();
6880 match forward_htlcs.entry(*short_channel_id) {
6881 hash_map::Entry::Occupied(mut entry) => {
6882 entry.get_mut().push(failure);
6883 },
6884 hash_map::Entry::Vacant(entry) => {
6885 entry.insert(vec!(failure));
6886 }
6887 }
6888 mem::drop(forward_htlcs);
6889 let mut pending_events = self.pending_events.lock().unwrap();
6890 pending_events.push_back((events::Event::HTLCHandlingFailed {
6891 prev_channel_id: *channel_id,
6892 failed_next_destination: destination,
6893 }, None));
6894 },
6895 }
6896 push_forward_event
6897 }
6898
6899 pub fn claim_funds(&self, payment_preimage: PaymentPreimage) {
6924 self.claim_payment_internal(payment_preimage, false);
6925 }
6926
6927 pub fn claim_funds_with_known_custom_tlvs(&self, payment_preimage: PaymentPreimage) {
6937 self.claim_payment_internal(payment_preimage, true);
6938 }
6939
6940 fn claim_payment_internal(&self, payment_preimage: PaymentPreimage, custom_tlvs_known: bool) {
6941 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).to_byte_array());
6942
6943 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
6944
6945 let (sources, claiming_payment) = {
6946 let res = self.claimable_payments.lock().unwrap().begin_claiming_payment(
6947 payment_hash, &self.node_signer, &self.logger, &self.inbound_payment_id_secret,
6948 custom_tlvs_known,
6949 );
6950
6951 match res {
6952 Ok((htlcs, payment_info)) => (htlcs, payment_info),
6953 Err(htlcs) => {
6954 for htlc in htlcs {
6955 let reason = self.get_htlc_fail_reason_from_failure_code(FailureCode::InvalidOnionPayload(None), &htlc);
6956 let source = HTLCSource::PreviousHopData(htlc.prev_hop);
6957 let receiver = HTLCDestination::FailedPayment { payment_hash };
6958 self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
6959 }
6960 return;
6961 }
6962 }
6963 };
6964 debug_assert!(!sources.is_empty());
6965
6966 let mut claimable_amt_msat = 0;
6971 let mut prev_total_msat = None;
6972 let mut expected_amt_msat = None;
6973 let mut valid_mpp = true;
6974 let mut errs = Vec::new();
6975 let per_peer_state = self.per_peer_state.read().unwrap();
6976 for htlc in sources.iter() {
6977 if prev_total_msat.is_some() && prev_total_msat != Some(htlc.total_msat) {
6978 log_error!(self.logger, "Somehow ended up with an MPP payment with different expected total amounts - this should not be reachable!");
6979 debug_assert!(false);
6980 valid_mpp = false;
6981 break;
6982 }
6983 prev_total_msat = Some(htlc.total_msat);
6984
6985 if expected_amt_msat.is_some() && expected_amt_msat != htlc.total_value_received {
6986 log_error!(self.logger, "Somehow ended up with an MPP payment with different received total amounts - this should not be reachable!");
6987 debug_assert!(false);
6988 valid_mpp = false;
6989 break;
6990 }
6991 expected_amt_msat = htlc.total_value_received;
6992 claimable_amt_msat += htlc.value;
6993 }
6994 mem::drop(per_peer_state);
6995 if sources.is_empty() || expected_amt_msat.is_none() {
6996 self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
6997 log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!");
6998 return;
6999 }
7000 if claimable_amt_msat != expected_amt_msat.unwrap() {
7001 self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
7002 log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.",
7003 expected_amt_msat.unwrap(), claimable_amt_msat);
7004 return;
7005 }
7006 if valid_mpp {
7007 let mpp_parts: Vec<_> = sources.iter().filter_map(|htlc| {
7008 if let Some(cp_id) = htlc.prev_hop.counterparty_node_id {
7009 Some(MPPClaimHTLCSource {
7010 counterparty_node_id: cp_id,
7011 funding_txo: htlc.prev_hop.outpoint,
7012 channel_id: htlc.prev_hop.channel_id,
7013 htlc_id: htlc.prev_hop.htlc_id,
7014 })
7015 } else {
7016 None
7017 }
7018 }).collect();
7019 let pending_mpp_claim_ptr_opt = if sources.len() > 1 {
7020 Some(Arc::new(Mutex::new(PendingMPPClaim {
7021 channels_without_preimage: mpp_parts.clone(),
7022 channels_with_preimage: Vec::new(),
7023 })))
7024 } else {
7025 None
7026 };
7027 let payment_info = Some(PaymentClaimDetails { mpp_parts, claiming_payment });
7028 for htlc in sources {
7029 let this_mpp_claim = pending_mpp_claim_ptr_opt.as_ref().and_then(|pending_mpp_claim|
7030 if let Some(cp_id) = htlc.prev_hop.counterparty_node_id {
7031 let claim_ptr = PendingMPPClaimPointer(Arc::clone(pending_mpp_claim));
7032 Some((cp_id, htlc.prev_hop.channel_id, htlc.prev_hop.htlc_id, claim_ptr))
7033 } else {
7034 None
7035 }
7036 );
7037 let raa_blocker = pending_mpp_claim_ptr_opt.as_ref().map(|pending_claim| {
7038 RAAMonitorUpdateBlockingAction::ClaimedMPPPayment {
7039 pending_claim: PendingMPPClaimPointer(Arc::clone(pending_claim)),
7040 }
7041 });
7042 self.claim_funds_from_hop(
7043 htlc.prev_hop, payment_preimage, payment_info.clone(),
7044 |_, definitely_duplicate| {
7045 debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment");
7046 (Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim: this_mpp_claim }), raa_blocker)
7047 }
7048 );
7049 }
7050 } else {
7051 for htlc in sources {
7052 let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
7053 htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes());
7054 let source = HTLCSource::PreviousHopData(htlc.prev_hop);
7055 let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data);
7056 let receiver = HTLCDestination::FailedPayment { payment_hash };
7057 self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
7058 }
7059 self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
7060 }
7061
7062 for (counterparty_node_id, err) in errs.drain(..) {
7064 let res: Result<(), _> = Err(err);
7065 let _ = handle_error!(self, res, counterparty_node_id);
7066 }
7067 }
7068
7069 fn claim_funds_from_hop<
7070 ComplFunc: FnOnce(Option<u64>, bool) -> (Option<MonitorUpdateCompletionAction>, Option<RAAMonitorUpdateBlockingAction>)
7071 >(
7072 &self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage,
7073 payment_info: Option<PaymentClaimDetails>, completion_action: ComplFunc,
7074 ) {
7075 let counterparty_node_id = prev_hop.counterparty_node_id.or_else(|| {
7076 let short_to_chan_info = self.short_to_chan_info.read().unwrap();
7077 short_to_chan_info.get(&prev_hop.short_channel_id).map(|(cp_id, _)| *cp_id)
7078 });
7079
7080 let htlc_source = HTLCClaimSource {
7081 counterparty_node_id,
7082 funding_txo: prev_hop.outpoint,
7083 channel_id: prev_hop.channel_id,
7084 htlc_id: prev_hop.htlc_id,
7085 };
7086 self.claim_mpp_part(htlc_source, payment_preimage, payment_info, completion_action)
7087 }
7088
7089 fn claim_mpp_part<
7090 ComplFunc: FnOnce(Option<u64>, bool) -> (Option<MonitorUpdateCompletionAction>, Option<RAAMonitorUpdateBlockingAction>)
7091 >(
7092 &self, prev_hop: HTLCClaimSource, payment_preimage: PaymentPreimage,
7093 payment_info: Option<PaymentClaimDetails>, completion_action: ComplFunc,
7094 ) {
7095 let during_init = !self.background_events_processed_since_startup.load(Ordering::Acquire);
7101
7102 debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
7105 debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread);
7106
7107 let per_peer_state = self.per_peer_state.read().unwrap();
7108 let chan_id = prev_hop.channel_id;
7109
7110 const MISSING_MON_ERROR: &'static str =
7111 "If we're going to claim an HTLC against a channel, we should always have *some* state for the channel, even if just the latest ChannelMonitor update_id. This failure indicates we need to claim an HTLC from a channel for which we did not have a ChannelMonitor at startup and didn't create one while running.";
7112
7113 let mut peer_state_opt = prev_hop.counterparty_node_id.as_ref().map(
7116 |counterparty_node_id| per_peer_state.get(counterparty_node_id)
7117 .map(|peer_mutex| peer_mutex.lock().unwrap())
7118 .expect(MISSING_MON_ERROR)
7119 );
7120
7121 if let Some(peer_state_lock) = peer_state_opt.as_mut() {
7122 let peer_state = &mut **peer_state_lock;
7123 if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(chan_id) {
7124 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
7125 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
7126 let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, payment_info, &&logger);
7127
7128 match fulfill_res {
7129 UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } => {
7130 let (action_opt, raa_blocker_opt) = completion_action(Some(htlc_value_msat), false);
7131 if let Some(action) = action_opt {
7132 log_trace!(logger, "Tracking monitor update completion action for channel {}: {:?}",
7133 chan_id, action);
7134 peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7135 }
7136 if let Some(raa_blocker) = raa_blocker_opt {
7137 peer_state.actions_blocking_raa_monitor_updates.entry(chan_id).or_insert_with(Vec::new).push(raa_blocker);
7138 }
7139 handle_new_monitor_update!(self, prev_hop.funding_txo, monitor_update, peer_state_opt,
7140 peer_state, per_peer_state, chan);
7141 }
7142 UpdateFulfillCommitFetch::DuplicateClaim {} => {
7143 let (action_opt, raa_blocker_opt) = completion_action(None, true);
7144 if let Some(raa_blocker) = raa_blocker_opt {
7145 debug_assert!(during_init ||
7154 peer_state.actions_blocking_raa_monitor_updates.get(&chan_id).unwrap().contains(&raa_blocker));
7155 }
7156 let action = if let Some(action) = action_opt {
7157 action
7158 } else {
7159 return;
7160 };
7161
7162 mem::drop(peer_state_opt);
7163
7164 log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
7165 chan_id, action);
7166 if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
7167 downstream_counterparty_node_id: node_id,
7168 downstream_funding_outpoint: _,
7169 blocking_action: blocker, downstream_channel_id: channel_id,
7170 } = action {
7171 if let Some(peer_state_mtx) = per_peer_state.get(&node_id) {
7172 let mut peer_state = peer_state_mtx.lock().unwrap();
7173 if let Some(blockers) = peer_state
7174 .actions_blocking_raa_monitor_updates
7175 .get_mut(&channel_id)
7176 {
7177 let mut found_blocker = false;
7178 blockers.retain(|iter| {
7179 let first_blocker = !found_blocker;
7183 if *iter == blocker { found_blocker = true; }
7184 *iter != blocker || !first_blocker
7185 });
7186 debug_assert!(found_blocker);
7187 }
7188 } else {
7189 debug_assert!(false);
7190 }
7191 } else if matches!(action, MonitorUpdateCompletionAction::PaymentClaimed { .. }) {
7192 debug_assert!(during_init,
7193 "Duplicate claims should always either be for forwarded payments(freeing another channel immediately) or during init (for claim replay)");
7194 mem::drop(per_peer_state);
7195 self.handle_monitor_update_completion_actions([action]);
7196 } else {
7197 debug_assert!(false,
7198 "Duplicate claims should always either be for forwarded payments(freeing another channel immediately) or during init (for claim replay)");
7199 return;
7200 };
7201 }
7202 }
7203 }
7204 return;
7205 }
7206 }
7207
7208 if prev_hop.counterparty_node_id.is_none() {
7209 let payment_hash: PaymentHash = payment_preimage.into();
7210 panic!(
7211 "Prior to upgrading to LDK 0.1, all pending HTLCs forwarded by LDK 0.0.123 or before must be resolved. It appears at least the HTLC with payment_hash {} (preimage {}) was not resolved. Please downgrade to LDK 0.0.125 and resolve the HTLC prior to upgrading.",
7212 payment_hash,
7213 payment_preimage,
7214 );
7215 }
7216 let counterparty_node_id = prev_hop.counterparty_node_id.expect("Checked immediately above");
7217 let mut peer_state = peer_state_opt.expect("peer_state_opt is always Some when the counterparty_node_id is Some");
7218
7219 let update_id = if let Some(latest_update_id) = peer_state.closed_channel_monitor_update_ids.get_mut(&chan_id) {
7220 *latest_update_id = latest_update_id.saturating_add(1);
7221 *latest_update_id
7222 } else {
7223 let err = "We need the latest ChannelMonitorUpdate ID to build a new update.
7224This should have been checked for availability on startup but somehow it is no longer available.
7225This indicates a bug inside LDK. Please report this error at https://github.com/lightningdevkit/rust-lightning/issues/new";
7226 log_error!(self.logger, "{}", err);
7227 panic!("{}", err);
7228 };
7229
7230 let preimage_update = ChannelMonitorUpdate {
7231 update_id,
7232 counterparty_node_id: Some(counterparty_node_id),
7233 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
7234 payment_preimage,
7235 payment_info,
7236 }],
7237 channel_id: Some(prev_hop.channel_id),
7238 };
7239
7240 let (action_opt, raa_blocker_opt) = completion_action(None, false);
7246
7247 if let Some(raa_blocker) = raa_blocker_opt {
7248 peer_state.actions_blocking_raa_monitor_updates
7249 .entry(prev_hop.channel_id)
7250 .or_default()
7251 .push(raa_blocker);
7252 }
7253
7254 let payment_hash = payment_preimage.into();
7257 let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(chan_id), Some(payment_hash));
7258
7259 if let Some(action) = action_opt {
7260 log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7261 chan_id, action);
7262 peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7263 }
7264
7265 handle_new_monitor_update!(
7266 self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state,
7267 counterparty_node_id, chan_id, POST_CHANNEL_CLOSE
7268 );
7269 }
7270
7271 fn finalize_claims(&self, sources: Vec<HTLCSource>) {
7272 self.pending_outbound_payments.finalize_claims(sources, &self.pending_events);
7273 }
7274
7275 fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
7276 forwarded_htlc_value_msat: Option<u64>, skimmed_fee_msat: Option<u64>, from_onchain: bool,
7277 startup_replay: bool, next_channel_counterparty_node_id: Option<PublicKey>,
7278 next_channel_outpoint: OutPoint, next_channel_id: ChannelId, next_user_channel_id: Option<u128>,
7279 ) {
7280 match source {
7281 HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
7282 debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire),
7283 "We don't support claim_htlc claims during startup - monitors may not be available yet");
7284 if let Some(pubkey) = next_channel_counterparty_node_id {
7285 debug_assert_eq!(pubkey, path.hops[0].pubkey);
7286 }
7287 let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
7288 channel_funding_outpoint: next_channel_outpoint, channel_id: next_channel_id,
7289 counterparty_node_id: path.hops[0].pubkey,
7290 };
7291 self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage,
7292 session_priv, path, from_onchain, ev_completion_action, &self.pending_events,
7293 &self.logger);
7294 },
7295 HTLCSource::PreviousHopData(hop_data) => {
7296 let prev_channel_id = hop_data.channel_id;
7297 let prev_user_channel_id = hop_data.user_channel_id;
7298 let prev_node_id = hop_data.counterparty_node_id;
7299 let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
7300 self.claim_funds_from_hop(hop_data, payment_preimage, None,
7301 |htlc_claim_value_msat, definitely_duplicate| {
7302 let chan_to_release =
7303 if let Some(node_id) = next_channel_counterparty_node_id {
7304 Some(EventUnblockedChannel {
7305 counterparty_node_id: node_id,
7306 funding_txo: next_channel_outpoint,
7307 channel_id: next_channel_id,
7308 blocking_action: completed_blocker
7309 })
7310 } else {
7311 None
7317 };
7318
7319 if definitely_duplicate && startup_replay {
7320 (None, None)
7325 } else if definitely_duplicate {
7326 if let Some(other_chan) = chan_to_release {
7327 (Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
7328 downstream_counterparty_node_id: other_chan.counterparty_node_id,
7329 downstream_funding_outpoint: other_chan.funding_txo,
7330 downstream_channel_id: other_chan.channel_id,
7331 blocking_action: other_chan.blocking_action,
7332 }), None)
7333 } else { (None, None) }
7334 } else {
7335 let total_fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
7336 if let Some(claimed_htlc_value) = htlc_claim_value_msat {
7337 Some(claimed_htlc_value - forwarded_htlc_value)
7338 } else { None }
7339 } else { None };
7340 debug_assert!(skimmed_fee_msat <= total_fee_earned_msat,
7341 "skimmed_fee_msat must always be included in total_fee_earned_msat");
7342 (Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
7343 event: events::Event::PaymentForwarded {
7344 prev_channel_id: Some(prev_channel_id),
7345 next_channel_id: Some(next_channel_id),
7346 prev_user_channel_id,
7347 next_user_channel_id,
7348 prev_node_id,
7349 next_node_id: next_channel_counterparty_node_id,
7350 total_fee_earned_msat,
7351 skimmed_fee_msat,
7352 claim_from_onchain_tx: from_onchain,
7353 outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
7354 },
7355 downstream_counterparty_and_funding_outpoint: chan_to_release,
7356 }), None)
7357 }
7358 });
7359 },
7360 }
7361 }
7362
7363 pub fn get_our_node_id(&self) -> PublicKey {
7365 self.our_network_pubkey
7366 }
7367
7368 fn handle_monitor_update_completion_actions<I: IntoIterator<Item=MonitorUpdateCompletionAction>>(&self, actions: I) {
7369 debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
7370 debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread);
7371 debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
7372
7373 let mut freed_channels = Vec::new();
7374
7375 for action in actions.into_iter() {
7376 match action {
7377 MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim } => {
7378 if let Some((counterparty_node_id, chan_id, htlc_id, claim_ptr)) = pending_mpp_claim {
7379 let per_peer_state = self.per_peer_state.read().unwrap();
7380 per_peer_state.get(&counterparty_node_id).map(|peer_state_mutex| {
7381 let mut peer_state = peer_state_mutex.lock().unwrap();
7382 let blockers_entry = peer_state.actions_blocking_raa_monitor_updates.entry(chan_id);
7383 if let btree_map::Entry::Occupied(mut blockers) = blockers_entry {
7384 blockers.get_mut().retain(|blocker|
7385 if let &RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { pending_claim } = &blocker {
7386 if *pending_claim == claim_ptr {
7387 let mut pending_claim_state_lock = pending_claim.0.lock().unwrap();
7388 let pending_claim_state = &mut *pending_claim_state_lock;
7389 pending_claim_state.channels_without_preimage.retain(|htlc_info| {
7390 let this_claim =
7391 htlc_info.counterparty_node_id == counterparty_node_id
7392 && htlc_info.channel_id == chan_id
7393 && htlc_info.htlc_id == htlc_id;
7394 if this_claim {
7395 pending_claim_state.channels_with_preimage.push(htlc_info.clone());
7396 false
7397 } else { true }
7398 });
7399 if pending_claim_state.channels_without_preimage.is_empty() {
7400 for htlc_info in pending_claim_state.channels_with_preimage.iter() {
7401 let freed_chan = (
7402 htlc_info.counterparty_node_id,
7403 htlc_info.funding_txo,
7404 htlc_info.channel_id,
7405 blocker.clone()
7406 );
7407 freed_channels.push(freed_chan);
7408 }
7409 }
7410 !pending_claim_state.channels_without_preimage.is_empty()
7411 } else { true }
7412 } else { true }
7413 );
7414 if blockers.get().is_empty() {
7415 blockers.remove();
7416 }
7417 }
7418 });
7419 }
7420
7421 let payment = self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
7422 if let Some(ClaimingPayment {
7423 amount_msat,
7424 payment_purpose: purpose,
7425 receiver_node_id,
7426 htlcs,
7427 sender_intended_value: sender_intended_total_msat,
7428 onion_fields,
7429 payment_id,
7430 }) = payment {
7431 let event = events::Event::PaymentClaimed {
7432 payment_hash,
7433 purpose,
7434 amount_msat,
7435 receiver_node_id: Some(receiver_node_id),
7436 htlcs,
7437 sender_intended_total_msat,
7438 onion_fields,
7439 payment_id,
7440 };
7441 let event_action = (event, None);
7442 let mut pending_events = self.pending_events.lock().unwrap();
7443 if !pending_events.contains(&event_action) {
7448 pending_events.push_back(event_action);
7449 }
7450 }
7451 },
7452 MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
7453 event, downstream_counterparty_and_funding_outpoint
7454 } => {
7455 self.pending_events.lock().unwrap().push_back((event, None));
7456 if let Some(unblocked) = downstream_counterparty_and_funding_outpoint {
7457 self.handle_monitor_update_release(
7458 unblocked.counterparty_node_id, unblocked.funding_txo,
7459 unblocked.channel_id, Some(unblocked.blocking_action),
7460 );
7461 }
7462 },
7463 MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
7464 downstream_counterparty_node_id, downstream_funding_outpoint, downstream_channel_id, blocking_action,
7465 } => {
7466 self.handle_monitor_update_release(
7467 downstream_counterparty_node_id,
7468 downstream_funding_outpoint,
7469 downstream_channel_id,
7470 Some(blocking_action),
7471 );
7472 },
7473 }
7474 }
7475
7476 for (node_id, funding_outpoint, channel_id, blocker) in freed_channels {
7477 self.handle_monitor_update_release(node_id, funding_outpoint, channel_id, Some(blocker));
7478 }
7479 }
7480
7481 fn handle_channel_resumption(&self, pending_msg_events: &mut Vec<MessageSendEvent>,
7484 channel: &mut Channel<SP>, raa: Option<msgs::RevokeAndACK>,
7485 commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
7486 pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_update_adds: Vec<msgs::UpdateAddHTLC>,
7487 funding_broadcastable: Option<Transaction>,
7488 channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>,
7489 tx_signatures: Option<msgs::TxSignatures>
7490 ) -> (Option<(u64, Option<PublicKey>, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec<msgs::UpdateAddHTLC>)>) {
7491 let logger = WithChannelContext::from(&self.logger, &channel.context, None);
7492 log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures",
7493 &channel.context.channel_id(),
7494 if raa.is_some() { "an" } else { "no" },
7495 if commitment_update.is_some() { "a" } else { "no" },
7496 pending_forwards.len(), pending_update_adds.len(),
7497 if funding_broadcastable.is_some() { "" } else { "not " },
7498 if channel_ready.is_some() { "sending" } else { "without" },
7499 if announcement_sigs.is_some() { "sending" } else { "without" },
7500 if tx_signatures.is_some() { "sending" } else { "without" },
7501 );
7502
7503 let counterparty_node_id = channel.context.get_counterparty_node_id();
7504 let short_channel_id = channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias());
7505
7506 let mut htlc_forwards = None;
7507 if !pending_forwards.is_empty() {
7508 htlc_forwards = Some((
7509 short_channel_id, Some(channel.context.get_counterparty_node_id()),
7510 channel.context.get_funding_txo().unwrap(), channel.context.channel_id(),
7511 channel.context.get_user_id(), pending_forwards
7512 ));
7513 }
7514 let mut decode_update_add_htlcs = None;
7515 if !pending_update_adds.is_empty() {
7516 decode_update_add_htlcs = Some((short_channel_id, pending_update_adds));
7517 }
7518
7519 if let Some(msg) = channel_ready {
7520 send_channel_ready!(self, pending_msg_events, channel, msg);
7521 }
7522 if let Some(msg) = announcement_sigs {
7523 pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
7524 node_id: counterparty_node_id,
7525 msg,
7526 });
7527 }
7528 if let Some(msg) = tx_signatures {
7529 pending_msg_events.push(events::MessageSendEvent::SendTxSignatures {
7530 node_id: counterparty_node_id,
7531 msg,
7532 });
7533 }
7534
7535 macro_rules! handle_cs { () => {
7536 if let Some(update) = commitment_update {
7537 pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
7538 node_id: counterparty_node_id,
7539 updates: update,
7540 });
7541 }
7542 } }
7543 macro_rules! handle_raa { () => {
7544 if let Some(revoke_and_ack) = raa {
7545 pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
7546 node_id: counterparty_node_id,
7547 msg: revoke_and_ack,
7548 });
7549 }
7550 } }
7551 match order {
7552 RAACommitmentOrder::CommitmentFirst => {
7553 handle_cs!();
7554 handle_raa!();
7555 },
7556 RAACommitmentOrder::RevokeAndACKFirst => {
7557 handle_raa!();
7558 handle_cs!();
7559 },
7560 }
7561
7562 if let Some(tx) = funding_broadcastable {
7563 if channel.context.is_manual_broadcast() {
7564 log_info!(logger, "Not broadcasting funding transaction with txid {} as it is manually managed", tx.compute_txid());
7565 let mut pending_events = self.pending_events.lock().unwrap();
7566 match channel.context.get_funding_txo() {
7567 Some(funding_txo) => {
7568 emit_funding_tx_broadcast_safe_event!(pending_events, channel, funding_txo.into_bitcoin_outpoint())
7569 },
7570 None => {
7571 debug_assert!(false, "Channel resumed without a funding txo, this should never happen!");
7572 return (htlc_forwards, decode_update_add_htlcs);
7573 }
7574 };
7575 } else {
7576 log_info!(logger, "Broadcasting funding transaction with txid {}", tx.compute_txid());
7577 self.tx_broadcaster.broadcast_transactions(&[&tx]);
7578 }
7579 }
7580
7581 {
7582 let mut pending_events = self.pending_events.lock().unwrap();
7583 emit_channel_pending_event!(pending_events, channel);
7584 emit_channel_ready_event!(pending_events, channel);
7585 }
7586
7587 (htlc_forwards, decode_update_add_htlcs)
7588 }
7589
7590 fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
7591 debug_assert!(self.total_consistency_lock.try_write().is_err()); let counterparty_node_id = match counterparty_node_id {
7594 Some(cp_id) => cp_id.clone(),
7595 None => {
7596 let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
7599 match outpoint_to_peer.get(funding_txo) {
7600 Some(cp_id) => cp_id.clone(),
7601 None => return,
7602 }
7603 }
7604 };
7605 let per_peer_state = self.per_peer_state.read().unwrap();
7606 let mut peer_state_lock;
7607 let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
7608 if peer_state_mutex_opt.is_none() { return }
7609 peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
7610 let peer_state = &mut *peer_state_lock;
7611
7612 let remaining_in_flight =
7613 if let Some(pending) = peer_state.in_flight_monitor_updates.get_mut(funding_txo) {
7614 pending.retain(|upd| upd.update_id > highest_applied_update_id);
7615 pending.len()
7616 } else { 0 };
7617
7618 let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*channel_id), None);
7619 log_trace!(logger, "ChannelMonitor updated to {}. {} pending in-flight updates.",
7620 highest_applied_update_id, remaining_in_flight);
7621
7622 if remaining_in_flight != 0 {
7623 return;
7624 }
7625
7626 if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(channel_id) {
7627 if chan.is_awaiting_monitor_update() {
7628 log_trace!(logger, "Channel is open and awaiting update, resuming it");
7629 handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
7630 } else {
7631 log_trace!(logger, "Channel is open but not awaiting update");
7632 }
7633 } else {
7634 let update_actions = peer_state.monitor_update_blocked_actions
7635 .remove(channel_id).unwrap_or(Vec::new());
7636 log_trace!(logger, "Channel is closed, applying {} post-update actions", update_actions.len());
7637 mem::drop(peer_state_lock);
7638 mem::drop(per_peer_state);
7639 self.handle_monitor_update_completion_actions(update_actions);
7640 }
7641 }
7642
7643 pub fn accept_inbound_channel(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
7664 self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, false, user_channel_id, vec![], Weight::from_wu(0))
7665 }
7666
7667 pub fn accept_inbound_channel_from_trusted_peer_0conf(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
7686 self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, true, user_channel_id, vec![], Weight::from_wu(0))
7687 }
7688
7689 fn do_accept_inbound_channel(
7690 &self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool,
7691 user_channel_id: u128, _funding_inputs: Vec<(TxIn, TransactionU16LenLimited)>,
7692 _total_witness_weight: Weight,
7693 ) -> Result<(), APIError> {
7694 let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id), None);
7695 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
7696
7697 let peers_without_funded_channels =
7698 self.peers_without_funded_channels(|peer| { peer.total_channel_count() > 0 });
7699 let per_peer_state = self.per_peer_state.read().unwrap();
7700 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
7701 .ok_or_else(|| {
7702 let err_str = format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id);
7703 log_error!(logger, "{}", err_str);
7704
7705 APIError::ChannelUnavailable { err: err_str }
7706 })?;
7707 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
7708 let peer_state = &mut *peer_state_lock;
7709 let is_only_peer_channel = peer_state.total_channel_count() == 1;
7710
7711 let res = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) {
7716 Some(unaccepted_channel) => {
7717 let best_block_height = self.best_block.read().unwrap().height;
7718 match unaccepted_channel.open_channel_msg {
7719 OpenChannelMessage::V1(open_channel_msg) => {
7720 InboundV1Channel::new(
7721 &self.fee_estimator, &self.entropy_source, &self.signer_provider, *counterparty_node_id,
7722 &self.channel_type_features(), &peer_state.latest_features, &open_channel_msg,
7723 user_channel_id, &self.default_configuration, best_block_height, &self.logger, accept_0conf
7724 ).map_err(|err| MsgHandleErrInternal::from_chan_no_close(err, *temporary_channel_id)
7725 ).map(|mut channel| {
7726 let logger = WithChannelContext::from(&self.logger, &channel.context, None);
7727 let message_send_event = channel.accept_inbound_channel(&&logger).map(|msg| {
7728 events::MessageSendEvent::SendAcceptChannel {
7729 node_id: *counterparty_node_id,
7730 msg,
7731 }
7732 });
7733 (*temporary_channel_id, ChannelPhase::UnfundedInboundV1(channel), message_send_event)
7734 })
7735 },
7736 #[cfg(dual_funding)]
7737 OpenChannelMessage::V2(open_channel_msg) => {
7738 InboundV2Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
7739 self.get_our_node_id(), *counterparty_node_id, &self.channel_type_features(), &peer_state.latest_features,
7740 &open_channel_msg, _funding_inputs, _total_witness_weight, user_channel_id,
7741 &self.default_configuration, best_block_height, &self.logger
7742 ).map_err(|_| MsgHandleErrInternal::from_chan_no_close(
7743 ChannelError::Close(
7744 (
7745 "V2 channel rejected due to sender error".into(),
7746 ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) },
7747 )
7748 ), *temporary_channel_id)
7749 ).map(|channel| {
7750 let message_send_event = events::MessageSendEvent::SendAcceptChannelV2 {
7751 node_id: channel.context.get_counterparty_node_id(),
7752 msg: channel.accept_inbound_dual_funded_channel()
7753 };
7754 (channel.context.channel_id(), ChannelPhase::UnfundedInboundV2(channel), Some(message_send_event))
7755 })
7756 },
7757 }
7758 },
7759 None => {
7760 let err_str = "No such channel awaiting to be accepted.".to_owned();
7761 log_error!(logger, "{}", err_str);
7762
7763 return Err(APIError::APIMisuseError { err: err_str });
7764 }
7765 };
7766
7767 let (channel_id, mut channel_phase, message_send_event) = match res {
7770 Ok(res) => res,
7771 Err(err) => {
7772 mem::drop(peer_state_lock);
7773 mem::drop(per_peer_state);
7774 match handle_error!(self, Result::<(), MsgHandleErrInternal>::Err(err), *counterparty_node_id) {
7776 Ok(_) => unreachable!("`handle_error` only returns Err as we've passed in an Err"),
7777 Err(e) => {
7778 return Err(APIError::ChannelUnavailable { err: e.err });
7779 },
7780 }
7781 }
7782 };
7783
7784 if accept_0conf {
7785 debug_assert!(channel_phase.context().minimum_depth().unwrap() == 0);
7787 } else if channel_phase.context().get_channel_type().requires_zero_conf() {
7788 let send_msg_err_event = events::MessageSendEvent::HandleError {
7789 node_id: channel_phase.context().get_counterparty_node_id(),
7790 action: msgs::ErrorAction::SendErrorMessage{
7791 msg: msgs::ErrorMessage { channel_id: *temporary_channel_id, data: "No zero confirmation channels accepted".to_owned(), }
7792 }
7793 };
7794 peer_state.pending_msg_events.push(send_msg_err_event);
7795 let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
7796 log_error!(logger, "{}", err_str);
7797
7798 return Err(APIError::APIMisuseError { err: err_str });
7799 } else {
7800 if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
7804 let send_msg_err_event = events::MessageSendEvent::HandleError {
7805 node_id: channel_phase.context().get_counterparty_node_id(),
7806 action: msgs::ErrorAction::SendErrorMessage{
7807 msg: msgs::ErrorMessage { channel_id: *temporary_channel_id, data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
7808 }
7809 };
7810 peer_state.pending_msg_events.push(send_msg_err_event);
7811 let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
7812 log_error!(logger, "{}", err_str);
7813
7814 return Err(APIError::APIMisuseError { err: err_str });
7815 }
7816 }
7817
7818 let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
7820 channel_phase.context_mut().set_outbound_scid_alias(outbound_scid_alias);
7821
7822 if let Some(message_send_event) = message_send_event {
7823 peer_state.pending_msg_events.push(message_send_event);
7824 }
7825 peer_state.channel_by_id.insert(channel_id, channel_phase);
7826
7827 Ok(())
7828 }
7829
7830 fn peers_without_funded_channels<Filter>(&self, maybe_count_peer: Filter) -> usize
7836 where Filter: Fn(&PeerState<SP>) -> bool {
7837 let mut peers_without_funded_channels = 0;
7838 let best_block_height = self.best_block.read().unwrap().height;
7839 {
7840 let peer_state_lock = self.per_peer_state.read().unwrap();
7841 for (_, peer_mtx) in peer_state_lock.iter() {
7842 let peer = peer_mtx.lock().unwrap();
7843 if !maybe_count_peer(&*peer) { continue; }
7844 let num_unfunded_channels = Self::unfunded_channel_count(&peer, best_block_height);
7845 if num_unfunded_channels == peer.total_channel_count() {
7846 peers_without_funded_channels += 1;
7847 }
7848 }
7849 }
7850 return peers_without_funded_channels;
7851 }
7852
7853 fn unfunded_channel_count(
7854 peer: &PeerState<SP>, best_block_height: u32
7855 ) -> usize {
7856 let mut num_unfunded_channels = 0;
7857 for (_, phase) in peer.channel_by_id.iter() {
7858 match phase {
7859 ChannelPhase::Funded(chan) => {
7860 if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 &&
7863 chan.context.get_funding_tx_confirmations(best_block_height) == 0
7864 {
7865 num_unfunded_channels += 1;
7866 }
7867 },
7868 ChannelPhase::UnfundedInboundV1(chan) => {
7869 if chan.context.minimum_depth().unwrap_or(1) != 0 {
7870 num_unfunded_channels += 1;
7871 }
7872 },
7873 ChannelPhase::UnfundedInboundV2(chan) => {
7874 if chan.context.minimum_depth().unwrap_or(1) != 0 &&
7877 chan.dual_funding_context.our_funding_satoshis == 0 {
7878 num_unfunded_channels += 1;
7879 }
7880 },
7881 ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedOutboundV2(_) => {
7882 continue;
7884 },
7885 }
7886 }
7887 num_unfunded_channels + peer.inbound_channel_request_by_id.len()
7888 }
7889
7890 fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: OpenChannelMessageRef<'_>) -> Result<(), MsgHandleErrInternal> {
7891 let common_fields = match msg {
7892 OpenChannelMessageRef::V1(msg) => &msg.common_fields,
7893 #[cfg(dual_funding)]
7894 OpenChannelMessageRef::V2(msg) => &msg.common_fields,
7895 };
7896
7897 if common_fields.chain_hash != self.chain_hash {
7902 return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(),
7903 common_fields.temporary_channel_id));
7904 }
7905
7906 if !self.default_configuration.accept_inbound_channels {
7907 return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(),
7908 common_fields.temporary_channel_id));
7909 }
7910
7911 let channeled_peers_without_funding =
7915 self.peers_without_funded_channels(|node| node.total_channel_count() > 0);
7916
7917 let per_peer_state = self.per_peer_state.read().unwrap();
7918 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
7919 .ok_or_else(|| {
7920 debug_assert!(false);
7921 MsgHandleErrInternal::send_err_msg_no_close(
7922 format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
7923 common_fields.temporary_channel_id)
7924 })?;
7925 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
7926 let peer_state = &mut *peer_state_lock;
7927
7928 if peer_state.total_channel_count() == 0 &&
7932 channeled_peers_without_funding >= MAX_UNFUNDED_CHANNEL_PEERS &&
7933 !self.default_configuration.manually_accept_inbound_channels
7934 {
7935 return Err(MsgHandleErrInternal::send_err_msg_no_close(
7936 "Have too many peers with unfunded channels, not accepting new ones".to_owned(),
7937 common_fields.temporary_channel_id));
7938 }
7939
7940 let best_block_height = self.best_block.read().unwrap().height;
7941 if Self::unfunded_channel_count(peer_state, best_block_height) >= MAX_UNFUNDED_CHANS_PER_PEER {
7942 return Err(MsgHandleErrInternal::send_err_msg_no_close(
7943 format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER),
7944 common_fields.temporary_channel_id));
7945 }
7946
7947 let channel_id = common_fields.temporary_channel_id;
7948 let channel_exists = peer_state.has_channel(&channel_id);
7949 if channel_exists {
7950 return Err(MsgHandleErrInternal::send_err_msg_no_close(
7951 "temporary_channel_id collision for the same peer!".to_owned(),
7952 common_fields.temporary_channel_id));
7953 }
7954
7955 let channel_type = channel::channel_type_from_open_channel(
7958 common_fields, &peer_state.latest_features, &self.channel_type_features()
7959 ).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, common_fields.temporary_channel_id))?;
7960
7961 if self.default_configuration.manually_accept_inbound_channels {
7963 let mut pending_events = self.pending_events.lock().unwrap();
7964 let is_announced = (common_fields.channel_flags & 1) == 1;
7965 pending_events.push_back((events::Event::OpenChannelRequest {
7966 temporary_channel_id: common_fields.temporary_channel_id,
7967 counterparty_node_id: *counterparty_node_id,
7968 funding_satoshis: common_fields.funding_satoshis,
7969 channel_negotiation_type: match msg {
7970 OpenChannelMessageRef::V1(msg) => InboundChannelFunds::PushMsat(msg.push_msat),
7971 #[cfg(dual_funding)]
7972 OpenChannelMessageRef::V2(_) => InboundChannelFunds::DualFunded,
7973 },
7974 channel_type,
7975 is_announced,
7976 params: common_fields.channel_parameters(),
7977 }, None));
7978 peer_state.inbound_channel_request_by_id.insert(channel_id, InboundChannelRequest {
7979 open_channel_msg: match msg {
7980 OpenChannelMessageRef::V1(msg) => OpenChannelMessage::V1(msg.clone()),
7981 #[cfg(dual_funding)]
7982 OpenChannelMessageRef::V2(msg) => OpenChannelMessage::V2(msg.clone()),
7983 },
7984 ticks_remaining: UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS,
7985 });
7986 return Ok(());
7987 }
7988
7989 let mut random_bytes = [0u8; 16];
7991 random_bytes.copy_from_slice(&self.entropy_source.get_secure_random_bytes()[..16]);
7992 let user_channel_id = u128::from_be_bytes(random_bytes);
7993
7994 if channel_type.requires_zero_conf() {
7995 return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), common_fields.temporary_channel_id));
7996 }
7997 if channel_type.requires_anchors_zero_fee_htlc_tx() {
7998 return Err(MsgHandleErrInternal::send_err_msg_no_close("No channels with anchor outputs accepted".to_owned(), common_fields.temporary_channel_id));
7999 }
8000
8001 let (mut channel_phase, message_send_event) = match msg {
8002 OpenChannelMessageRef::V1(msg) => {
8003 let mut channel = InboundV1Channel::new(
8004 &self.fee_estimator, &self.entropy_source, &self.signer_provider, *counterparty_node_id,
8005 &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id,
8006 &self.default_configuration, best_block_height, &self.logger, false
8007 ).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id))?;
8008 let logger = WithChannelContext::from(&self.logger, &channel.context, None);
8009 let message_send_event = channel.accept_inbound_channel(&&logger).map(|msg| {
8010 events::MessageSendEvent::SendAcceptChannel {
8011 node_id: *counterparty_node_id,
8012 msg,
8013 }
8014 });
8015 (ChannelPhase::UnfundedInboundV1(channel), message_send_event)
8016 },
8017 #[cfg(dual_funding)]
8018 OpenChannelMessageRef::V2(msg) => {
8019 let channel = InboundV2Channel::new(&self.fee_estimator, &self.entropy_source,
8020 &self.signer_provider, self.get_our_node_id(), *counterparty_node_id,
8021 &self.channel_type_features(), &peer_state.latest_features, msg, vec![], Weight::from_wu(0),
8022 user_channel_id, &self.default_configuration, best_block_height, &self.logger
8023 ).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id))?;
8024 let message_send_event = events::MessageSendEvent::SendAcceptChannelV2 {
8025 node_id: *counterparty_node_id,
8026 msg: channel.accept_inbound_dual_funded_channel(),
8027 };
8028 (ChannelPhase::UnfundedInboundV2(channel), Some(message_send_event))
8029 },
8030 };
8031
8032 let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
8033 channel_phase.context_mut().set_outbound_scid_alias(outbound_scid_alias);
8034
8035 if let Some(message_send_event) = message_send_event {
8036 peer_state.pending_msg_events.push(message_send_event);
8037 }
8038 peer_state.channel_by_id.insert(channel_phase.context().channel_id(), channel_phase);
8039
8040 Ok(())
8041 }
8042
8043 fn internal_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
8044 let (value, output_script, user_id) = {
8047 let per_peer_state = self.per_peer_state.read().unwrap();
8048 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8049 .ok_or_else(|| {
8050 debug_assert!(false);
8051 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id)
8052 })?;
8053 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8054 let peer_state = &mut *peer_state_lock;
8055 match peer_state.channel_by_id.entry(msg.common_fields.temporary_channel_id) {
8056 hash_map::Entry::Occupied(mut phase) => {
8057 match phase.get_mut() {
8058 ChannelPhase::UnfundedOutboundV1(chan) => {
8059 try_chan_phase_entry!(self, peer_state, chan.accept_channel(msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), phase);
8060 (chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_p2wsh(), chan.context.get_user_id())
8061 },
8062 _ => {
8063 return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id));
8064 }
8065 }
8066 },
8067 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id))
8068 }
8069 };
8070 let mut pending_events = self.pending_events.lock().unwrap();
8071 pending_events.push_back((events::Event::FundingGenerationReady {
8072 temporary_channel_id: msg.common_fields.temporary_channel_id,
8073 counterparty_node_id: *counterparty_node_id,
8074 channel_value_satoshis: value,
8075 output_script,
8076 user_channel_id: user_id,
8077 }, None));
8078 Ok(())
8079 }
8080
8081 fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
8082 let best_block = *self.best_block.read().unwrap();
8083
8084 let per_peer_state = self.per_peer_state.read().unwrap();
8085 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8086 .ok_or_else(|| {
8087 debug_assert!(false);
8088 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id)
8089 })?;
8090
8091 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8092 let peer_state = &mut *peer_state_lock;
8093 let (mut chan, funding_msg_opt, monitor) =
8094 match peer_state.channel_by_id.remove(&msg.temporary_channel_id) {
8095 Some(ChannelPhase::UnfundedInboundV1(inbound_chan)) => {
8096 let logger = WithChannelContext::from(&self.logger, &inbound_chan.context, None);
8097 match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) {
8098 Ok(res) => res,
8099 Err((inbound_chan, err)) => {
8100 debug_assert!(matches!(err, ChannelError::Close(_)));
8104 return Err(convert_chan_phase_err!(self, peer_state, err, &mut ChannelPhase::UnfundedInboundV1(inbound_chan), &msg.temporary_channel_id).1);
8108 },
8109 }
8110 },
8111 Some(mut phase) => {
8112 let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id);
8113 let err = ChannelError::close(err_msg);
8114 return Err(convert_chan_phase_err!(self, peer_state, err, &mut phase, &msg.temporary_channel_id).1);
8115 },
8116 None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
8117 };
8118
8119 let funded_channel_id = chan.context.channel_id();
8120
8121 macro_rules! fail_chan { ($err: expr) => { {
8122 let err = ChannelError::close($err.to_owned());
8128 chan.unset_funding_info(msg.temporary_channel_id);
8129 return Err(convert_chan_phase_err!(self, peer_state, err, chan, &funded_channel_id, UNFUNDED_CHANNEL).1);
8130 } } }
8131
8132 match peer_state.channel_by_id.entry(funded_channel_id) {
8133 hash_map::Entry::Occupied(_) => {
8134 fail_chan!("Already had channel with the new channel_id");
8135 },
8136 hash_map::Entry::Vacant(e) => {
8137 let mut outpoint_to_peer_lock = self.outpoint_to_peer.lock().unwrap();
8138 match outpoint_to_peer_lock.entry(monitor.get_funding_txo().0) {
8139 hash_map::Entry::Occupied(_) => {
8140 fail_chan!("The funding_created message had the same funding_txid as an existing channel - funding is not possible");
8141 },
8142 hash_map::Entry::Vacant(i_e) => {
8143 let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
8144 if let Ok(persist_state) = monitor_res {
8145 i_e.insert(chan.context.get_counterparty_node_id());
8146 mem::drop(outpoint_to_peer_lock);
8147
8148 if let Some(msg) = funding_msg_opt {
8153 peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
8154 node_id: counterparty_node_id.clone(),
8155 msg,
8156 });
8157 }
8158
8159 if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) {
8160 handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
8161 per_peer_state, chan, INITIAL_MONITOR);
8162 } else {
8163 unreachable!("This must be a funded channel as we just inserted it.");
8164 }
8165 Ok(())
8166 } else {
8167 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8168 log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
8169 fail_chan!("Duplicate funding outpoint");
8170 }
8171 }
8172 }
8173 }
8174 }
8175 }
8176
8177 fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
8178 let best_block = *self.best_block.read().unwrap();
8179 let per_peer_state = self.per_peer_state.read().unwrap();
8180 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8181 .ok_or_else(|| {
8182 debug_assert!(false);
8183 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8184 })?;
8185
8186 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8187 let peer_state = &mut *peer_state_lock;
8188 match peer_state.channel_by_id.entry(msg.channel_id) {
8189 hash_map::Entry::Occupied(chan_phase_entry) => {
8190 if matches!(chan_phase_entry.get(), ChannelPhase::UnfundedOutboundV1(_)) {
8191 let chan = if let ChannelPhase::UnfundedOutboundV1(chan) = chan_phase_entry.remove() { chan } else { unreachable!() };
8192 let logger = WithContext::from(
8193 &self.logger,
8194 Some(chan.context.get_counterparty_node_id()),
8195 Some(chan.context.channel_id()),
8196 None
8197 );
8198 let res =
8199 chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger);
8200 match res {
8201 Ok((mut chan, monitor)) => {
8202 if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
8203 let mut chan = peer_state.channel_by_id.entry(msg.channel_id).or_insert(ChannelPhase::Funded(chan));
8207 if let ChannelPhase::Funded(ref mut chan) = &mut chan {
8208 handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
8209 } else { unreachable!(); }
8210 Ok(())
8211 } else {
8212 let e = ChannelError::close("Channel funding outpoint was a duplicate".to_owned());
8213 chan.unset_funding_info(msg.channel_id);
8218 return Err(convert_chan_phase_err!(self, peer_state, e, &mut ChannelPhase::Funded(chan), &msg.channel_id).1);
8219 }
8220 },
8221 Err((chan, e)) => {
8222 debug_assert!(matches!(e, ChannelError::Close(_)),
8223 "We don't have a channel anymore, so the error better have expected close");
8224 return Err(convert_chan_phase_err!(self, peer_state, e, &mut ChannelPhase::UnfundedOutboundV1(chan), &msg.channel_id).1);
8228 }
8229 }
8230 } else {
8231 return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id));
8232 }
8233 },
8234 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
8235 }
8236 }
8237
8238 fn internal_tx_msg<HandleTxMsgFn: Fn(&mut ChannelPhase<SP>) -> Result<MessageSendEvent, &'static str>>(
8239 &self, counterparty_node_id: &PublicKey, channel_id: ChannelId, tx_msg_handler: HandleTxMsgFn
8240 ) -> Result<(), MsgHandleErrInternal> {
8241 let per_peer_state = self.per_peer_state.read().unwrap();
8242 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8243 .ok_or_else(|| {
8244 debug_assert!(false);
8245 MsgHandleErrInternal::send_err_msg_no_close(
8246 format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
8247 channel_id)
8248 })?;
8249 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8250 let peer_state = &mut *peer_state_lock;
8251 match peer_state.channel_by_id.entry(channel_id) {
8252 hash_map::Entry::Occupied(mut chan_phase_entry) => {
8253 let channel_phase = chan_phase_entry.get_mut();
8254 let msg_send_event = match tx_msg_handler(channel_phase) {
8255 Ok(msg_send_event) => msg_send_event,
8256 Err(tx_msg_str) => return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(
8257 format!("Got a {tx_msg_str} message with no interactive transaction construction expected or in-progress")
8258 ), channel_id)),
8259 };
8260 peer_state.pending_msg_events.push(msg_send_event);
8261 Ok(())
8262 },
8263 hash_map::Entry::Vacant(_) => {
8264 Err(MsgHandleErrInternal::send_err_msg_no_close(format!(
8265 "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}",
8266 counterparty_node_id), channel_id)
8267 )
8268 }
8269 }
8270 }
8271
8272 fn internal_tx_add_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddInput) -> Result<(), MsgHandleErrInternal> {
8273 self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase<SP>| {
8274 match channel_phase {
8275 ChannelPhase::UnfundedInboundV2(ref mut channel) => {
8276 Ok(channel.tx_add_input(msg).into_msg_send_event(counterparty_node_id))
8277 },
8278 ChannelPhase::UnfundedOutboundV2(ref mut channel) => {
8279 Ok(channel.tx_add_input(msg).into_msg_send_event(counterparty_node_id))
8280 },
8281 _ => Err("tx_add_input"),
8282 }
8283 })
8284 }
8285
8286 fn internal_tx_add_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddOutput) -> Result<(), MsgHandleErrInternal> {
8287 self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase<SP>| {
8288 match channel_phase {
8289 ChannelPhase::UnfundedInboundV2(ref mut channel) => {
8290 Ok(channel.tx_add_output(msg).into_msg_send_event(counterparty_node_id))
8291 },
8292 ChannelPhase::UnfundedOutboundV2(ref mut channel) => {
8293 Ok(channel.tx_add_output(msg).into_msg_send_event(counterparty_node_id))
8294 },
8295 _ => Err("tx_add_output"),
8296 }
8297 })
8298 }
8299
8300 fn internal_tx_remove_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveInput) -> Result<(), MsgHandleErrInternal> {
8301 self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase<SP>| {
8302 match channel_phase {
8303 ChannelPhase::UnfundedInboundV2(ref mut channel) => {
8304 Ok(channel.tx_remove_input(msg).into_msg_send_event(counterparty_node_id))
8305 },
8306 ChannelPhase::UnfundedOutboundV2(ref mut channel) => {
8307 Ok(channel.tx_remove_input(msg).into_msg_send_event(counterparty_node_id))
8308 },
8309 _ => Err("tx_remove_input"),
8310 }
8311 })
8312 }
8313
8314 fn internal_tx_remove_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveOutput) -> Result<(), MsgHandleErrInternal> {
8315 self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase<SP>| {
8316 match channel_phase {
8317 ChannelPhase::UnfundedInboundV2(ref mut channel) => {
8318 Ok(channel.tx_remove_output(msg).into_msg_send_event(counterparty_node_id))
8319 },
8320 ChannelPhase::UnfundedOutboundV2(ref mut channel) => {
8321 Ok(channel.tx_remove_output(msg).into_msg_send_event(counterparty_node_id))
8322 },
8323 _ => Err("tx_remove_output"),
8324 }
8325 })
8326 }
8327
8328 fn internal_tx_complete(&self, counterparty_node_id: PublicKey, msg: &msgs::TxComplete) -> Result<(), MsgHandleErrInternal> {
8329 let per_peer_state = self.per_peer_state.read().unwrap();
8330 let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
8331 .ok_or_else(|| {
8332 debug_assert!(false);
8333 MsgHandleErrInternal::send_err_msg_no_close(
8334 format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
8335 msg.channel_id)
8336 })?;
8337 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8338 let peer_state = &mut *peer_state_lock;
8339 match peer_state.channel_by_id.entry(msg.channel_id) {
8340 hash_map::Entry::Occupied(mut chan_phase_entry) => {
8341 let channel_phase = chan_phase_entry.get_mut();
8342 let (msg_send_event_opt, signing_session_opt) = match channel_phase {
8343 ChannelPhase::UnfundedInboundV2(channel) => channel.tx_complete(msg)
8344 .into_msg_send_event_or_signing_session(counterparty_node_id),
8345 ChannelPhase::UnfundedOutboundV2(channel) => channel.tx_complete(msg)
8346 .into_msg_send_event_or_signing_session(counterparty_node_id),
8347 _ => try_chan_phase_entry!(self, peer_state, Err(ChannelError::Close(
8348 (
8349 "Got a tx_complete message with no interactive transaction construction expected or in-progress".into(),
8350 ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) },
8351 ))), chan_phase_entry)
8352 };
8353 if let Some(msg_send_event) = msg_send_event_opt {
8354 peer_state.pending_msg_events.push(msg_send_event);
8355 };
8356 if let Some(mut signing_session) = signing_session_opt {
8357 let (commitment_signed, funding_ready_for_sig_event_opt) = match chan_phase_entry.get_mut() {
8358 ChannelPhase::UnfundedOutboundV2(chan) => {
8359 chan.funding_tx_constructed(&mut signing_session, &self.logger)
8360 },
8361 ChannelPhase::UnfundedInboundV2(chan) => {
8362 chan.funding_tx_constructed(&mut signing_session, &self.logger)
8363 },
8364 _ => Err(ChannelError::Warn(
8365 "Got a tx_complete message with no interactive transaction construction expected or in-progress"
8366 .into())),
8367 }.map_err(|err| MsgHandleErrInternal::send_err_msg_no_close(format!("{}", err), msg.channel_id))?;
8368 let (channel_id, channel_phase) = chan_phase_entry.remove_entry();
8369 let channel = match channel_phase {
8370 ChannelPhase::UnfundedOutboundV2(chan) => chan.into_channel(signing_session),
8371 ChannelPhase::UnfundedInboundV2(chan) => chan.into_channel(signing_session),
8372 _ => {
8373 debug_assert!(false); Err(ChannelError::Warn(
8375 "Got a tx_complete message with no interactive transaction construction expected or in-progress"
8376 .into()))
8377 },
8378 }.map_err(|err| MsgHandleErrInternal::send_err_msg_no_close(format!("{}", err), msg.channel_id))?;
8379 peer_state.channel_by_id.insert(channel_id, ChannelPhase::Funded(channel));
8380 if let Some(funding_ready_for_sig_event) = funding_ready_for_sig_event_opt {
8381 let mut pending_events = self.pending_events.lock().unwrap();
8382 pending_events.push_back((funding_ready_for_sig_event, None));
8383 }
8384 peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
8385 node_id: counterparty_node_id,
8386 updates: CommitmentUpdate {
8387 commitment_signed,
8388 update_add_htlcs: vec![],
8389 update_fulfill_htlcs: vec![],
8390 update_fail_htlcs: vec![],
8391 update_fail_malformed_htlcs: vec![],
8392 update_fee: None,
8393 },
8394 });
8395 }
8396 Ok(())
8397 },
8398 hash_map::Entry::Vacant(_) => {
8399 Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8400 }
8401 }
8402 }
8403
8404 fn internal_tx_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxSignatures)
8405 -> Result<(), MsgHandleErrInternal> {
8406 let per_peer_state = self.per_peer_state.read().unwrap();
8407 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8408 .ok_or_else(|| {
8409 debug_assert!(false);
8410 MsgHandleErrInternal::send_err_msg_no_close(
8411 format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
8412 msg.channel_id)
8413 })?;
8414 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8415 let peer_state = &mut *peer_state_lock;
8416 match peer_state.channel_by_id.entry(msg.channel_id) {
8417 hash_map::Entry::Occupied(mut chan_phase_entry) => {
8418 let channel_phase = chan_phase_entry.get_mut();
8419 match channel_phase {
8420 ChannelPhase::Funded(chan) => {
8421 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8422 let (tx_signatures_opt, funding_tx_opt) = try_chan_phase_entry!(self, peer_state, chan.tx_signatures(msg, &&logger), chan_phase_entry);
8423 if let Some(tx_signatures) = tx_signatures_opt {
8424 peer_state.pending_msg_events.push(events::MessageSendEvent::SendTxSignatures {
8425 node_id: *counterparty_node_id,
8426 msg: tx_signatures,
8427 });
8428 }
8429 if let Some(ref funding_tx) = funding_tx_opt {
8430 self.tx_broadcaster.broadcast_transactions(&[funding_tx]);
8431 {
8432 let mut pending_events = self.pending_events.lock().unwrap();
8433 emit_channel_pending_event!(pending_events, chan);
8434 }
8435 }
8436 },
8437 _ => try_chan_phase_entry!(self, peer_state, Err(ChannelError::Close(
8438 (
8439 "Got an unexpected tx_signatures message".into(),
8440 ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) },
8441 ))), chan_phase_entry)
8442 }
8443 Ok(())
8444 },
8445 hash_map::Entry::Vacant(_) => {
8446 Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8447 }
8448 }
8449 }
8450
8451 fn internal_tx_abort(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAbort)
8452 -> Result<(), MsgHandleErrInternal> {
8453 let per_peer_state = self.per_peer_state.read().unwrap();
8454 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8455 .ok_or_else(|| {
8456 debug_assert!(false);
8457 MsgHandleErrInternal::send_err_msg_no_close(
8458 format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
8459 msg.channel_id)
8460 })?;
8461 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8462 let peer_state = &mut *peer_state_lock;
8463 match peer_state.channel_by_id.entry(msg.channel_id) {
8464 hash_map::Entry::Occupied(mut chan_phase_entry) => {
8465 let channel_phase = chan_phase_entry.get_mut();
8466 let tx_constructor = match channel_phase {
8467 ChannelPhase::UnfundedInboundV2(chan) => chan.interactive_tx_constructor_mut(),
8468 ChannelPhase::UnfundedOutboundV2(chan) => chan.interactive_tx_constructor_mut(),
8469 ChannelPhase::Funded(_) => {
8470 try_chan_phase_entry!(self, peer_state, Err(ChannelError::Warn(
8475 "Got an unexpected tx_abort message: After initial funding transaction is signed, \
8476 splicing and RBF attempts of interactive funding transactions are not supported yet so \
8477 we don't have any negotiation in progress".into(),
8478 )), chan_phase_entry)
8479 }
8480 ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
8481 try_chan_phase_entry!(self, peer_state, Err(ChannelError::Warn(
8482 "Got an unexpected tx_abort message: This is an unfunded channel created with V1 channel \
8483 establishment".into(),
8484 )), chan_phase_entry)
8485 },
8486 };
8487 if tx_constructor.take().is_some() {
8493 let msg = msgs::TxAbort {
8494 channel_id: msg.channel_id,
8495 data: "Acknowledged tx_abort".to_string().into_bytes(),
8496 };
8497 peer_state.pending_msg_events.push(events::MessageSendEvent::SendTxAbort {
8504 node_id: *counterparty_node_id,
8505 msg,
8506 });
8507 }
8508 Ok(())
8509 },
8510 hash_map::Entry::Vacant(_) => {
8511 Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8512 }
8513 }
8514 }
8515
8516 fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {
8517 let per_peer_state = self.per_peer_state.read().unwrap();
8520 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8521 .ok_or_else(|| {
8522 debug_assert!(false);
8523 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8524 })?;
8525 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8526 let peer_state = &mut *peer_state_lock;
8527 match peer_state.channel_by_id.entry(msg.channel_id) {
8528 hash_map::Entry::Occupied(mut chan_phase_entry) => {
8529 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8530 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8531 let announcement_sigs_opt = try_chan_phase_entry!(self, peer_state, chan.channel_ready(&msg, &self.node_signer,
8532 self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &&logger), chan_phase_entry);
8533 if let Some(announcement_sigs) = announcement_sigs_opt {
8534 log_trace!(logger, "Sending announcement_signatures for channel {}", chan.context.channel_id());
8535 peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
8536 node_id: counterparty_node_id.clone(),
8537 msg: announcement_sigs,
8538 });
8539 } else if chan.context.is_usable() {
8540 log_trace!(logger, "Sending private initial channel_update for our counterparty on channel {}", chan.context.channel_id());
8546 if let Ok(msg) = self.get_channel_update_for_unicast(chan) {
8547 peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
8548 node_id: counterparty_node_id.clone(),
8549 msg,
8550 });
8551 }
8552 }
8553
8554 {
8555 let mut pending_events = self.pending_events.lock().unwrap();
8556 emit_channel_ready_event!(pending_events, chan);
8557 }
8558
8559 Ok(())
8560 } else {
8561 try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8562 "Got a channel_ready message for an unfunded channel!".into())), chan_phase_entry)
8563 }
8564 },
8565 hash_map::Entry::Vacant(_) => {
8566 Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8567 }
8568 }
8569 }
8570
8571 fn internal_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
8572 let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new();
8573 let mut finish_shutdown = None;
8574 {
8575 let per_peer_state = self.per_peer_state.read().unwrap();
8576 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8577 .ok_or_else(|| {
8578 debug_assert!(false);
8579 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8580 })?;
8581 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8582 let peer_state = &mut *peer_state_lock;
8583 if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) {
8584 let phase = chan_phase_entry.get_mut();
8585 match phase {
8586 ChannelPhase::Funded(chan) => {
8587 if !chan.received_shutdown() {
8588 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8589 log_info!(logger, "Received a shutdown message from our counterparty for channel {}{}.",
8590 msg.channel_id,
8591 if chan.sent_shutdown() { " after we initiated shutdown" } else { "" });
8592 }
8593
8594 let funding_txo_opt = chan.context.get_funding_txo();
8595 let (shutdown, monitor_update_opt, htlcs) = try_chan_phase_entry!(self, peer_state,
8596 chan.shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_phase_entry);
8597 dropped_htlcs = htlcs;
8598
8599 if let Some(msg) = shutdown {
8600 peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
8604 node_id: *counterparty_node_id,
8605 msg,
8606 });
8607 }
8608 if let Some(monitor_update) = monitor_update_opt {
8610 handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
8611 peer_state_lock, peer_state, per_peer_state, chan);
8612 }
8613 },
8614 ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) |
8615 ChannelPhase::UnfundedInboundV2(_) | ChannelPhase::UnfundedOutboundV2(_) => {
8616 let context = phase.context_mut();
8617 let logger = WithChannelContext::from(&self.logger, context, None);
8618 log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
8619 let mut close_res = phase.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
8620 remove_channel_phase!(self, peer_state, chan_phase_entry, close_res);
8621 finish_shutdown = Some(close_res);
8622 },
8623 }
8624 } else {
8625 return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8626 }
8627 }
8628 for htlc_source in dropped_htlcs.drain(..) {
8629 let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id };
8630 let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
8631 self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
8632 }
8633 if let Some(shutdown_res) = finish_shutdown {
8634 self.finish_close_channel(shutdown_res);
8635 }
8636
8637 Ok(())
8638 }
8639
8640 fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
8641 let per_peer_state = self.per_peer_state.read().unwrap();
8642 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8643 .ok_or_else(|| {
8644 debug_assert!(false);
8645 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8646 })?;
8647 let (tx, chan_option, shutdown_result) = {
8648 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8649 let peer_state = &mut *peer_state_lock;
8650 match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
8651 hash_map::Entry::Occupied(mut chan_phase_entry) => {
8652 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8653 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8654 let (closing_signed, tx, shutdown_result) = try_chan_phase_entry!(self, peer_state, chan.closing_signed(&self.fee_estimator, &msg, &&logger), chan_phase_entry);
8655 debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown());
8656 if let Some(msg) = closing_signed {
8657 peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
8658 node_id: counterparty_node_id.clone(),
8659 msg,
8660 });
8661 }
8662 if let Some(mut close_res) = shutdown_result {
8663 debug_assert!(tx.is_some());
8669 let channel_phase = remove_channel_phase!(self, peer_state, chan_phase_entry, close_res);
8670 (tx, Some(channel_phase), Some(close_res))
8671 } else {
8672 debug_assert!(tx.is_none());
8673 (tx, None, None)
8674 }
8675 } else {
8676 return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8677 "Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry);
8678 }
8679 },
8680 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8681 }
8682 };
8683 if let Some(broadcast_tx) = tx {
8684 let channel_id = chan_option.as_ref().map(|channel| channel.context().channel_id());
8685 log_info!(WithContext::from(&self.logger, Some(*counterparty_node_id), channel_id, None), "Broadcasting {}", log_tx!(broadcast_tx));
8686 self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
8687 }
8688 if let Some(ChannelPhase::Funded(chan)) = chan_option {
8689 if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
8690 let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
8691 pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
8692 msg: update
8693 });
8694 }
8695 }
8696 mem::drop(per_peer_state);
8697 if let Some(shutdown_result) = shutdown_result {
8698 self.finish_close_channel(shutdown_result);
8699 }
8700 Ok(())
8701 }
8702
8703 fn internal_update_add_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), MsgHandleErrInternal> {
8704 let decoded_hop_res = self.decode_update_add_htlc_onion(msg, counterparty_node_id);
8717 let per_peer_state = self.per_peer_state.read().unwrap();
8718 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8719 .ok_or_else(|| {
8720 debug_assert!(false);
8721 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8722 })?;
8723 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8724 let peer_state = &mut *peer_state_lock;
8725 match peer_state.channel_by_id.entry(msg.channel_id) {
8726 hash_map::Entry::Occupied(mut chan_phase_entry) => {
8727 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8728 let mut pending_forward_info = match decoded_hop_res {
8729 Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
8730 self.construct_pending_htlc_status(
8731 msg, counterparty_node_id, shared_secret, next_hop,
8732 chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt,
8733 ),
8734 Err(e) => PendingHTLCStatus::Fail(e)
8735 };
8736 let logger = WithChannelContext::from(&self.logger, &chan.context, Some(msg.payment_hash));
8737 if let Err((_, error_code)) = chan.can_accept_incoming_htlc(&msg, &self.fee_estimator, &logger) {
8741 if msg.blinding_point.is_some() {
8742 pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
8743 msgs::UpdateFailMalformedHTLC {
8744 channel_id: msg.channel_id,
8745 htlc_id: msg.htlc_id,
8746 sha256_of_onion: [0; 32],
8747 failure_code: INVALID_ONION_BLINDING,
8748 }
8749 ))
8750 } else {
8751 match pending_forward_info {
8752 PendingHTLCStatus::Forward(PendingHTLCInfo {
8753 ref incoming_shared_secret, ref routing, ..
8754 }) => {
8755 let reason = if routing.blinded_failure().is_some() {
8756 HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
8757 } else if (error_code & 0x1000) != 0 {
8758 let error_data = self.get_htlc_inbound_temp_fail_data(error_code);
8759 HTLCFailReason::reason(error_code, error_data)
8760 } else {
8761 HTLCFailReason::from_failure_code(error_code)
8762 }.get_encrypted_failure_packet(incoming_shared_secret, &None);
8763 let msg = msgs::UpdateFailHTLC {
8764 channel_id: msg.channel_id,
8765 htlc_id: msg.htlc_id,
8766 reason
8767 };
8768 pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg));
8769 },
8770 _ => {},
8771 }
8772 }
8773 }
8774 try_chan_phase_entry!(self, peer_state, chan.update_add_htlc(&msg, pending_forward_info, &self.fee_estimator), chan_phase_entry);
8775 } else {
8776 return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8777 "Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry);
8778 }
8779 },
8780 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8781 }
8782 Ok(())
8783 }
8784
8785 fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
8786 let funding_txo;
8787 let next_user_channel_id;
8788 let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = {
8789 let per_peer_state = self.per_peer_state.read().unwrap();
8790 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8791 .ok_or_else(|| {
8792 debug_assert!(false);
8793 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8794 })?;
8795 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8796 let peer_state = &mut *peer_state_lock;
8797 match peer_state.channel_by_id.entry(msg.channel_id) {
8798 hash_map::Entry::Occupied(mut chan_phase_entry) => {
8799 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8800 let res = try_chan_phase_entry!(self, peer_state, chan.update_fulfill_htlc(&msg), chan_phase_entry);
8801 if let HTLCSource::PreviousHopData(prev_hop) = &res.0 {
8802 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8803 log_trace!(logger,
8804 "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
8805 msg.channel_id);
8806 peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id)
8807 .or_insert_with(Vec::new)
8808 .push(RAAMonitorUpdateBlockingAction::from_prev_hop_data(&prev_hop));
8809 }
8810 funding_txo = chan.context.get_funding_txo().expect("We won't accept a fulfill until funded");
8817 next_user_channel_id = chan.context.get_user_id();
8818 res
8819 } else {
8820 return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8821 "Got an update_fulfill_htlc message for an unfunded channel!".into())), chan_phase_entry);
8822 }
8823 },
8824 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8825 }
8826 };
8827 self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(),
8828 Some(forwarded_htlc_value), skimmed_fee_msat, false, false, Some(*counterparty_node_id),
8829 funding_txo, msg.channel_id, Some(next_user_channel_id),
8830 );
8831
8832 Ok(())
8833 }
8834
8835 fn internal_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
8836 let per_peer_state = self.per_peer_state.read().unwrap();
8839 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8840 .ok_or_else(|| {
8841 debug_assert!(false);
8842 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8843 })?;
8844 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8845 let peer_state = &mut *peer_state_lock;
8846 match peer_state.channel_by_id.entry(msg.channel_id) {
8847 hash_map::Entry::Occupied(mut chan_phase_entry) => {
8848 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8849 try_chan_phase_entry!(self, peer_state, chan.update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan_phase_entry);
8850 } else {
8851 return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8852 "Got an update_fail_htlc message for an unfunded channel!".into())), chan_phase_entry);
8853 }
8854 },
8855 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8856 }
8857 Ok(())
8858 }
8859
8860 fn internal_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
8861 let per_peer_state = self.per_peer_state.read().unwrap();
8864 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8865 .ok_or_else(|| {
8866 debug_assert!(false);
8867 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8868 })?;
8869 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8870 let peer_state = &mut *peer_state_lock;
8871 match peer_state.channel_by_id.entry(msg.channel_id) {
8872 hash_map::Entry::Occupied(mut chan_phase_entry) => {
8873 if (msg.failure_code & 0x8000) == 0 {
8874 let chan_err = ChannelError::close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
8875 try_chan_phase_entry!(self, peer_state, Err(chan_err), chan_phase_entry);
8876 }
8877 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8878 try_chan_phase_entry!(self, peer_state, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan_phase_entry);
8879 } else {
8880 return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8881 "Got an update_fail_malformed_htlc message for an unfunded channel!".into())), chan_phase_entry);
8882 }
8883 Ok(())
8884 },
8885 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8886 }
8887 }
8888
8889 fn internal_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> {
8890 let best_block = *self.best_block.read().unwrap();
8891 let per_peer_state = self.per_peer_state.read().unwrap();
8892 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8893 .ok_or_else(|| {
8894 debug_assert!(false);
8895 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8896 })?;
8897 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8898 let peer_state = &mut *peer_state_lock;
8899 match peer_state.channel_by_id.entry(msg.channel_id) {
8900 hash_map::Entry::Occupied(mut chan_phase_entry) => {
8901 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8902 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8903 let funding_txo = chan.context.get_funding_txo();
8904
8905 if chan.interactive_tx_signing_session.is_some() {
8906 let monitor = try_chan_phase_entry!(
8907 self, peer_state, chan.commitment_signed_initial_v2(msg, best_block, &self.signer_provider, &&logger),
8908 chan_phase_entry);
8909 let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
8910 if let Ok(persist_state) = monitor_res {
8911 handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
8912 per_peer_state, chan, INITIAL_MONITOR);
8913 } else {
8914 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8915 log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
8916 try_chan_phase_entry!(self, peer_state, Err(ChannelError::Close(
8917 (
8918 "Channel funding outpoint was a duplicate".to_owned(),
8919 ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) },
8920 )
8921 )), chan_phase_entry)
8922 }
8923 } else {
8924 let monitor_update_opt = try_chan_phase_entry!(
8925 self, peer_state, chan.commitment_signed(msg, &&logger), chan_phase_entry);
8926 if let Some(monitor_update) = monitor_update_opt {
8927 handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
8928 peer_state, per_peer_state, chan);
8929 }
8930 }
8931 Ok(())
8932 } else {
8933 return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8934 "Got a commitment_signed message for an unfunded channel!".into())), chan_phase_entry);
8935 }
8936 },
8937 hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8938 }
8939 }
8940
8941 fn push_decode_update_add_htlcs(&self, mut update_add_htlcs: (u64, Vec<msgs::UpdateAddHTLC>)) {
8942 let mut push_forward_event = self.forward_htlcs.lock().unwrap().is_empty();
8943 let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
8944 push_forward_event &= decode_update_add_htlcs.is_empty();
8945 let scid = update_add_htlcs.0;
8946 match decode_update_add_htlcs.entry(scid) {
8947 hash_map::Entry::Occupied(mut e) => { e.get_mut().append(&mut update_add_htlcs.1); },
8948 hash_map::Entry::Vacant(e) => { e.insert(update_add_htlcs.1); },
8949 }
8950 if push_forward_event { self.push_pending_forwards_ev(); }
8951 }
8952
8953 #[inline]
8954 fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Option<PublicKey>, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) {
8955 let push_forward_event = self.forward_htlcs_without_forward_event(per_source_pending_forwards);
8956 if push_forward_event { self.push_pending_forwards_ev() }
8957 }
8958
8959 #[inline]
8960 fn forward_htlcs_without_forward_event(&self, per_source_pending_forwards: &mut [(u64, Option<PublicKey>, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) -> bool {
8961 let mut push_forward_event = false;
8962 for &mut (prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
8963 let mut new_intercept_events = VecDeque::new();
8964 let mut failed_intercept_forwards = Vec::new();
8965 if !pending_forwards.is_empty() {
8966 for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
8967 let scid = match forward_info.routing {
8968 PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
8969 PendingHTLCRouting::Receive { .. } => 0,
8970 PendingHTLCRouting::ReceiveKeysend { .. } => 0,
8971 };
8972 let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid);
8974
8975 let decode_update_add_htlcs_empty = self.decode_update_add_htlcs.lock().unwrap().is_empty();
8976 let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
8977 let forward_htlcs_empty = forward_htlcs.is_empty();
8978 match forward_htlcs.entry(scid) {
8979 hash_map::Entry::Occupied(mut entry) => {
8980 entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
8981 prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint,
8982 prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info
8983 }));
8984 },
8985 hash_map::Entry::Vacant(entry) => {
8986 if !is_our_scid && forward_info.incoming_amt_msat.is_some() &&
8987 fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, scid, &self.chain_hash)
8988 {
8989 let intercept_id = InterceptId(Sha256::hash(&forward_info.incoming_shared_secret).to_byte_array());
8990 let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
8991 match pending_intercepts.entry(intercept_id) {
8992 hash_map::Entry::Vacant(entry) => {
8993 new_intercept_events.push_back((events::Event::HTLCIntercepted {
8994 requested_next_hop_scid: scid,
8995 payment_hash: forward_info.payment_hash,
8996 inbound_amount_msat: forward_info.incoming_amt_msat.unwrap(),
8997 expected_outbound_amount_msat: forward_info.outgoing_amt_msat,
8998 intercept_id
8999 }, None));
9000 entry.insert(PendingAddHTLCInfo {
9001 prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint,
9002 prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info
9003 });
9004 },
9005 hash_map::Entry::Occupied(_) => {
9006 let logger = WithContext::from(&self.logger, None, Some(prev_channel_id), Some(forward_info.payment_hash));
9007 log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
9008 let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
9009 short_channel_id: prev_short_channel_id,
9010 user_channel_id: Some(prev_user_channel_id),
9011 counterparty_node_id: prev_counterparty_node_id,
9012 outpoint: prev_funding_outpoint,
9013 channel_id: prev_channel_id,
9014 htlc_id: prev_htlc_id,
9015 incoming_packet_shared_secret: forward_info.incoming_shared_secret,
9016 phantom_shared_secret: None,
9017 blinded_failure: forward_info.routing.blinded_failure(),
9018 cltv_expiry: forward_info.routing.incoming_cltv_expiry(),
9019 });
9020
9021 failed_intercept_forwards.push((htlc_source, forward_info.payment_hash,
9022 HTLCFailReason::from_failure_code(0x4000 | 10),
9023 HTLCDestination::InvalidForward { requested_forward_scid: scid },
9024 ));
9025 }
9026 }
9027 } else {
9028 push_forward_event |= forward_htlcs_empty && decode_update_add_htlcs_empty;
9031 entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
9032 prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint,
9033 prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info
9034 })));
9035 }
9036 }
9037 }
9038 }
9039 }
9040
9041 for (htlc_source, payment_hash, failure_reason, destination) in failed_intercept_forwards.drain(..) {
9042 push_forward_event |= self.fail_htlc_backwards_internal_without_forward_event(&htlc_source, &payment_hash, &failure_reason, destination);
9043 }
9044
9045 if !new_intercept_events.is_empty() {
9046 let mut events = self.pending_events.lock().unwrap();
9047 events.append(&mut new_intercept_events);
9048 }
9049 }
9050 push_forward_event
9051 }
9052
9053 fn push_pending_forwards_ev(&self) {
9054 let mut pending_events = self.pending_events.lock().unwrap();
9055 let is_processing_events = self.pending_events_processor.load(Ordering::Acquire);
9056 let num_forward_events = pending_events.iter().filter(|(ev, _)|
9057 if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false }
9058 ).count();
9059 if (is_processing_events && num_forward_events <= 1) || num_forward_events < 1 {
9066 pending_events.push_back((Event::PendingHTLCsForwardable {
9067 time_forwardable: Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS),
9068 }, None));
9069 }
9070 }
9071
9072 fn raa_monitor_updates_held(&self,
9077 actions_blocking_raa_monitor_updates: &BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
9078 channel_funding_outpoint: OutPoint, channel_id: ChannelId, counterparty_node_id: PublicKey
9079 ) -> bool {
9080 actions_blocking_raa_monitor_updates
9081 .get(&channel_id).map(|v| !v.is_empty()).unwrap_or(false)
9082 || self.pending_events.lock().unwrap().iter().any(|(_, action)| {
9083 action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
9084 channel_funding_outpoint,
9085 channel_id,
9086 counterparty_node_id,
9087 })
9088 })
9089 }
9090
9091 #[cfg(any(test, feature = "_test_utils"))]
9092 pub(crate) fn test_raa_monitor_updates_held(&self,
9093 counterparty_node_id: PublicKey, channel_id: ChannelId
9094 ) -> bool {
9095 let per_peer_state = self.per_peer_state.read().unwrap();
9096 if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
9097 let mut peer_state_lck = peer_state_mtx.lock().unwrap();
9098 let peer_state = &mut *peer_state_lck;
9099
9100 if let Some(chan) = peer_state.channel_by_id.get(&channel_id) {
9101 return self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
9102 chan.context().get_funding_txo().unwrap(), channel_id, counterparty_node_id);
9103 }
9104 }
9105 false
9106 }
9107
9108 fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
9109 let htlcs_to_fail = {
9110 let per_peer_state = self.per_peer_state.read().unwrap();
9111 let mut peer_state_lock = per_peer_state.get(counterparty_node_id)
9112 .ok_or_else(|| {
9113 debug_assert!(false);
9114 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
9115 }).map(|mtx| mtx.lock().unwrap())?;
9116 let peer_state = &mut *peer_state_lock;
9117 match peer_state.channel_by_id.entry(msg.channel_id) {
9118 hash_map::Entry::Occupied(mut chan_phase_entry) => {
9119 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
9120 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
9121 let funding_txo_opt = chan.context.get_funding_txo();
9122 let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
9123 self.raa_monitor_updates_held(
9124 &peer_state.actions_blocking_raa_monitor_updates, funding_txo, msg.channel_id,
9125 *counterparty_node_id)
9126 } else { false };
9127 let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self, peer_state,
9128 chan.revoke_and_ack(&msg, &self.fee_estimator, &&logger, mon_update_blocked), chan_phase_entry);
9129 if let Some(monitor_update) = monitor_update_opt {
9130 let funding_txo = funding_txo_opt
9131 .expect("Funding outpoint must have been set for RAA handling to succeed");
9132 handle_new_monitor_update!(self, funding_txo, monitor_update,
9133 peer_state_lock, peer_state, per_peer_state, chan);
9134 }
9135 htlcs_to_fail
9136 } else {
9137 return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
9138 "Got a revoke_and_ack message for an unfunded channel!".into())), chan_phase_entry);
9139 }
9140 },
9141 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
9142 }
9143 };
9144 self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id);
9145 Ok(())
9146 }
9147
9148 fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
9149 let per_peer_state = self.per_peer_state.read().unwrap();
9150 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
9151 .ok_or_else(|| {
9152 debug_assert!(false);
9153 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
9154 })?;
9155 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9156 let peer_state = &mut *peer_state_lock;
9157 match peer_state.channel_by_id.entry(msg.channel_id) {
9158 hash_map::Entry::Occupied(mut chan_phase_entry) => {
9159 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
9160 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
9161 try_chan_phase_entry!(self, peer_state, chan.update_fee(&self.fee_estimator, &msg, &&logger), chan_phase_entry);
9162 } else {
9163 return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
9164 "Got an update_fee message for an unfunded channel!".into())), chan_phase_entry);
9165 }
9166 },
9167 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
9168 }
9169 Ok(())
9170 }
9171
9172 fn internal_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> {
9173 let per_peer_state = self.per_peer_state.read().unwrap();
9174 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
9175 .ok_or_else(|| {
9176 debug_assert!(false);
9177 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
9178 })?;
9179 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9180 let peer_state = &mut *peer_state_lock;
9181 match peer_state.channel_by_id.entry(msg.channel_id) {
9182 hash_map::Entry::Occupied(mut chan_phase_entry) => {
9183 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
9184 if !chan.context.is_usable() {
9185 return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
9186 }
9187
9188 peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
9189 msg: try_chan_phase_entry!(self, peer_state, chan.announcement_signatures(
9190 &self.node_signer, self.chain_hash, self.best_block.read().unwrap().height,
9191 msg, &self.default_configuration
9192 ), chan_phase_entry),
9193 update_msg: Some(self.get_channel_update_for_broadcast(chan).unwrap()),
9196 });
9197 } else {
9198 return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
9199 "Got an announcement_signatures message for an unfunded channel!".into())), chan_phase_entry);
9200 }
9201 },
9202 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
9203 }
9204 Ok(())
9205 }
9206
9207 fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
9209 let (chan_counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&msg.contents.short_channel_id) {
9210 Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
9211 None => {
9212 return Ok(NotifyOption::SkipPersistNoEvents)
9214 }
9215 };
9216 let per_peer_state = self.per_peer_state.read().unwrap();
9217 let peer_state_mutex_opt = per_peer_state.get(&chan_counterparty_node_id);
9218 if peer_state_mutex_opt.is_none() {
9219 return Ok(NotifyOption::SkipPersistNoEvents)
9220 }
9221 let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
9222 let peer_state = &mut *peer_state_lock;
9223 match peer_state.channel_by_id.entry(chan_id) {
9224 hash_map::Entry::Occupied(mut chan_phase_entry) => {
9225 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
9226 if chan.context.get_counterparty_node_id() != *counterparty_node_id {
9227 if chan.context.should_announce() {
9228 return Ok(NotifyOption::SkipPersistNoEvents);
9232 }
9233 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
9234 }
9235 let were_node_one = self.get_our_node_id().serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
9236 let msg_from_node_one = msg.contents.channel_flags & 1 == 0;
9237 if were_node_one == msg_from_node_one {
9238 return Ok(NotifyOption::SkipPersistNoEvents);
9239 } else {
9240 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
9241 log_debug!(logger, "Received channel_update {:?} for channel {}.", msg, chan_id);
9242 let did_change = try_chan_phase_entry!(self, peer_state, chan.channel_update(&msg), chan_phase_entry);
9243 if !did_change {
9246 return Ok(NotifyOption::SkipPersistNoEvents);
9247 }
9248 }
9249 } else {
9250 return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
9251 "Got a channel_update for an unfunded channel!".into())), chan_phase_entry);
9252 }
9253 },
9254 hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersistNoEvents)
9255 }
9256 Ok(NotifyOption::DoPersist)
9257 }
9258
9259 fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<NotifyOption, MsgHandleErrInternal> {
9260 let need_lnd_workaround = {
9261 let per_peer_state = self.per_peer_state.read().unwrap();
9262
9263 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
9264 .ok_or_else(|| {
9265 debug_assert!(false);
9266 MsgHandleErrInternal::send_err_msg_no_close(
9267 format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
9268 msg.channel_id
9269 )
9270 })?;
9271 let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), None);
9272 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9273 let peer_state = &mut *peer_state_lock;
9274 match peer_state.channel_by_id.entry(msg.channel_id) {
9275 hash_map::Entry::Occupied(mut chan_phase_entry) => {
9276 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
9277 let responses = try_chan_phase_entry!(self, peer_state, chan.channel_reestablish(
9282 msg, &&logger, &self.node_signer, self.chain_hash,
9283 &self.default_configuration, &*self.best_block.read().unwrap()), chan_phase_entry);
9284 let mut channel_update = None;
9285 if let Some(msg) = responses.shutdown_msg {
9286 peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
9287 node_id: counterparty_node_id.clone(),
9288 msg,
9289 });
9290 } else if chan.context.is_usable() {
9291 if let Ok(msg) = self.get_channel_update_for_unicast(chan) {
9295 channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
9296 node_id: chan.context.get_counterparty_node_id(),
9297 msg,
9298 });
9299 }
9300 }
9301 let need_lnd_workaround = chan.context.workaround_lnd_bug_4006.take();
9302 let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption(
9303 &mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.order,
9304 Vec::new(), Vec::new(), None, responses.channel_ready, responses.announcement_sigs, None);
9305 debug_assert!(htlc_forwards.is_none());
9306 debug_assert!(decode_update_add_htlcs.is_none());
9307 if let Some(upd) = channel_update {
9308 peer_state.pending_msg_events.push(upd);
9309 }
9310 need_lnd_workaround
9311 } else {
9312 return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
9313 "Got a channel_reestablish message for an unfunded channel!".into())), chan_phase_entry);
9314 }
9315 },
9316 hash_map::Entry::Vacant(_) => {
9317 log_debug!(logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure",
9318 msg.channel_id);
9319 peer_state.pending_msg_events.push(MessageSendEvent::SendChannelReestablish {
9334 node_id: *counterparty_node_id,
9335 msg: msgs::ChannelReestablish {
9336 channel_id: msg.channel_id,
9337 next_local_commitment_number: 0,
9338 next_remote_commitment_number: 0,
9339 your_last_per_commitment_secret: [1u8; 32],
9340 my_current_per_commitment_point: PublicKey::from_slice(&[2u8; 33]).unwrap(),
9341 next_funding_txid: None,
9342 },
9343 });
9344 return Err(MsgHandleErrInternal::send_err_msg_no_close(
9345 format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}",
9346 counterparty_node_id), msg.channel_id)
9347 )
9348 }
9349 }
9350 };
9351
9352 if let Some(channel_ready_msg) = need_lnd_workaround {
9353 self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
9354 }
9355 Ok(NotifyOption::SkipPersistHandleEvents)
9356 }
9357
9358 fn process_pending_monitor_events(&self) -> bool {
9360 debug_assert!(self.total_consistency_lock.try_write().is_err()); let mut failed_channels = Vec::new();
9363 let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
9364 let has_pending_monitor_events = !pending_monitor_events.is_empty();
9365 for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
9366 for monitor_event in monitor_events.drain(..) {
9367 match monitor_event {
9368 MonitorEvent::HTLCEvent(htlc_update) => {
9369 let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id), Some(htlc_update.payment_hash));
9370 if let Some(preimage) = htlc_update.payment_preimage {
9371 log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
9372 self.claim_funds_internal(htlc_update.source, preimage,
9373 htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
9374 false, counterparty_node_id, funding_outpoint, channel_id, None);
9375 } else {
9376 log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
9377 let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
9378 let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
9379 self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
9380 }
9381 },
9382 MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => {
9383 let counterparty_node_id_opt = match counterparty_node_id {
9384 Some(cp_id) => Some(cp_id),
9385 None => {
9386 let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
9389 outpoint_to_peer.get(&funding_outpoint).cloned()
9390 }
9391 };
9392 if let Some(counterparty_node_id) = counterparty_node_id_opt {
9393 let per_peer_state = self.per_peer_state.read().unwrap();
9394 if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
9395 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9396 let peer_state = &mut *peer_state_lock;
9397 let pending_msg_events = &mut peer_state.pending_msg_events;
9398 if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) {
9399 let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
9400 reason
9401 } else {
9402 ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
9403 };
9404 let mut shutdown_res = chan_phase_entry.get_mut().context_mut().force_shutdown(false, reason.clone());
9405 let chan_phase = remove_channel_phase!(self, peer_state, chan_phase_entry, shutdown_res);
9406 failed_channels.push(shutdown_res);
9407 if let ChannelPhase::Funded(chan) = chan_phase {
9408 if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
9409 let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9410 pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
9411 msg: update
9412 });
9413 }
9414 pending_msg_events.push(events::MessageSendEvent::HandleError {
9415 node_id: chan.context.get_counterparty_node_id(),
9416 action: msgs::ErrorAction::DisconnectPeer {
9417 msg: Some(msgs::ErrorMessage {
9418 channel_id: chan.context.channel_id(),
9419 data: reason.to_string()
9420 })
9421 },
9422 });
9423 }
9424 }
9425 }
9426 }
9427 },
9428 MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => {
9429 self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref());
9430 },
9431 }
9432 }
9433 }
9434
9435 for failure in failed_channels.drain(..) {
9436 self.finish_close_channel(failure);
9437 }
9438
9439 has_pending_monitor_events
9440 }
9441
9442 #[cfg(fuzzing)]
9446 pub fn process_monitor_events(&self) {
9447 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
9448 self.process_pending_monitor_events();
9449 }
9450
9451 fn check_free_holding_cells(&self) -> bool {
9455 let mut has_monitor_update = false;
9456 let mut failed_htlcs = Vec::new();
9457
9458 'peer_loop: loop {
9463 let per_peer_state = self.per_peer_state.read().unwrap();
9464 for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
9465 'chan_loop: loop {
9466 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9467 let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
9468 for (channel_id, chan) in peer_state.channel_by_id.iter_mut().filter_map(
9469 |(chan_id, phase)| if let ChannelPhase::Funded(chan) = phase { Some((chan_id, chan)) } else { None }
9470 ) {
9471 let counterparty_node_id = chan.context.get_counterparty_node_id();
9472 let funding_txo = chan.context.get_funding_txo();
9473 let (monitor_opt, holding_cell_failed_htlcs) =
9474 chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &&WithChannelContext::from(&self.logger, &chan.context, None));
9475 if !holding_cell_failed_htlcs.is_empty() {
9476 failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
9477 }
9478 if let Some(monitor_update) = monitor_opt {
9479 has_monitor_update = true;
9480
9481 handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
9482 peer_state_lock, peer_state, per_peer_state, chan);
9483 continue 'peer_loop;
9484 }
9485 }
9486 break 'chan_loop;
9487 }
9488 }
9489 break 'peer_loop;
9490 }
9491
9492 let has_update = has_monitor_update || !failed_htlcs.is_empty();
9493 for (failures, channel_id, counterparty_node_id) in failed_htlcs.drain(..) {
9494 self.fail_holding_cell_htlcs(failures, channel_id, &counterparty_node_id);
9495 }
9496
9497 has_update
9498 }
9499
9500 pub fn signer_unblocked(&self, channel_opt: Option<(PublicKey, ChannelId)>) {
9508 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
9509
9510 let unblock_chan = |phase: &mut ChannelPhase<SP>, pending_msg_events: &mut Vec<MessageSendEvent>| -> Option<ShutdownResult> {
9512 let node_id = phase.context().get_counterparty_node_id();
9513 match phase {
9514 ChannelPhase::Funded(chan) => {
9515 let msgs = chan.signer_maybe_unblocked(&self.logger);
9516 let cu_msg = msgs.commitment_update.map(|updates| events::MessageSendEvent::UpdateHTLCs {
9517 node_id,
9518 updates,
9519 });
9520 let raa_msg = msgs.revoke_and_ack.map(|msg| events::MessageSendEvent::SendRevokeAndACK {
9521 node_id,
9522 msg,
9523 });
9524 match (cu_msg, raa_msg) {
9525 (Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::CommitmentFirst => {
9526 pending_msg_events.push(cu);
9527 pending_msg_events.push(raa);
9528 },
9529 (Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::RevokeAndACKFirst => {
9530 pending_msg_events.push(raa);
9531 pending_msg_events.push(cu);
9532 },
9533 (Some(cu), _) => pending_msg_events.push(cu),
9534 (_, Some(raa)) => pending_msg_events.push(raa),
9535 (_, _) => {},
9536 }
9537 if let Some(msg) = msgs.funding_signed {
9538 pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
9539 node_id,
9540 msg,
9541 });
9542 }
9543 if let Some(msg) = msgs.channel_ready {
9544 send_channel_ready!(self, pending_msg_events, chan, msg);
9545 }
9546 if let Some(msg) = msgs.closing_signed {
9547 pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
9548 node_id,
9549 msg,
9550 });
9551 }
9552 if let Some(broadcast_tx) = msgs.signed_closing_tx {
9553 let channel_id = chan.context.channel_id();
9554 let counterparty_node_id = chan.context.get_counterparty_node_id();
9555 let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None);
9556 log_info!(logger, "Broadcasting closing tx {}", log_tx!(broadcast_tx));
9557 self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
9558
9559 if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
9560 pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
9561 msg: update
9562 });
9563 }
9564 }
9565 msgs.shutdown_result
9566 }
9567 ChannelPhase::UnfundedOutboundV1(chan) => {
9568 let (open_channel, funding_created) = chan.signer_maybe_unblocked(self.chain_hash.clone(), &self.logger);
9569 if let Some(msg) = open_channel {
9570 pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
9571 node_id,
9572 msg,
9573 });
9574 }
9575 if let Some(msg) = funding_created {
9576 pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
9577 node_id,
9578 msg,
9579 });
9580 }
9581 None
9582 }
9583 ChannelPhase::UnfundedInboundV1(chan) => {
9584 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
9585 if let Some(msg) = chan.signer_maybe_unblocked(&&logger) {
9586 pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
9587 node_id,
9588 msg,
9589 });
9590 }
9591 None
9592 },
9593 ChannelPhase::UnfundedInboundV2(_) | ChannelPhase::UnfundedOutboundV2(_) => None,
9594 }
9595 };
9596
9597 let mut shutdown_results = Vec::new();
9598 let per_peer_state = self.per_peer_state.read().unwrap();
9599 let per_peer_state_iter = per_peer_state.iter().filter(|(cp_id, _)| {
9600 if let Some((counterparty_node_id, _)) = channel_opt {
9601 **cp_id == counterparty_node_id
9602 } else { true }
9603 });
9604 for (_cp_id, peer_state_mutex) in per_peer_state_iter {
9605 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9606 let peer_state = &mut *peer_state_lock;
9607 peer_state.channel_by_id.retain(|_, chan| {
9608 let shutdown_result = match channel_opt {
9609 Some((_, channel_id)) if chan.context().channel_id() != channel_id => None,
9610 _ => unblock_chan(chan, &mut peer_state.pending_msg_events),
9611 };
9612 if let Some(mut shutdown_result) = shutdown_result {
9613 let context = &chan.context();
9614 let logger = WithChannelContext::from(&self.logger, context, None);
9615 log_trace!(logger, "Removing channel {} now that the signer is unblocked", context.channel_id());
9616 locked_close_channel!(self, peer_state, context, shutdown_result);
9617 shutdown_results.push(shutdown_result);
9618 false
9619 } else {
9620 true
9621 }
9622 });
9623 }
9624 drop(per_peer_state);
9625 for shutdown_result in shutdown_results.drain(..) {
9626 self.finish_close_channel(shutdown_result);
9627 }
9628 }
9629
9630 fn maybe_generate_initial_closing_signed(&self) -> bool {
9634 let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new();
9635 let mut has_update = false;
9636 let mut shutdown_results = Vec::new();
9637 {
9638 let per_peer_state = self.per_peer_state.read().unwrap();
9639
9640 for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
9641 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9642 let peer_state = &mut *peer_state_lock;
9643 let pending_msg_events = &mut peer_state.pending_msg_events;
9644 peer_state.channel_by_id.retain(|channel_id, phase| {
9645 match phase {
9646 ChannelPhase::Funded(chan) => {
9647 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
9648 match chan.maybe_propose_closing_signed(&self.fee_estimator, &&logger) {
9649 Ok((msg_opt, tx_opt, shutdown_result_opt)) => {
9650 if let Some(msg) = msg_opt {
9651 has_update = true;
9652 pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
9653 node_id: chan.context.get_counterparty_node_id(), msg,
9654 });
9655 }
9656 debug_assert_eq!(shutdown_result_opt.is_some(), chan.is_shutdown());
9657 if let Some(mut shutdown_result) = shutdown_result_opt {
9658 locked_close_channel!(self, peer_state, &chan.context, shutdown_result);
9659 shutdown_results.push(shutdown_result);
9660 }
9661 if let Some(tx) = tx_opt {
9662 if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
9665 let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9666 pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
9667 msg: update
9668 });
9669 }
9670
9671 log_info!(logger, "Broadcasting {}", log_tx!(tx));
9672 self.tx_broadcaster.broadcast_transactions(&[&tx]);
9673 false
9674 } else { true }
9675 },
9676 Err(e) => {
9677 has_update = true;
9678 let (close_channel, res) = convert_chan_phase_err!(self, peer_state, e, chan, channel_id, FUNDED_CHANNEL);
9679 handle_errors.push((chan.context.get_counterparty_node_id(), Err(res)));
9680 !close_channel
9681 }
9682 }
9683 },
9684 _ => true, }
9686 });
9687 }
9688 }
9689
9690 for (counterparty_node_id, err) in handle_errors.drain(..) {
9691 let _ = handle_error!(self, err, counterparty_node_id);
9692 }
9693
9694 for shutdown_result in shutdown_results.drain(..) {
9695 self.finish_close_channel(shutdown_result);
9696 }
9697
9698 has_update
9699 }
9700
9701 pub fn create_bolt11_invoice(
9706 &self, params: Bolt11InvoiceParameters,
9707 ) -> Result<Bolt11Invoice, SignOrCreationError<()>> {
9708 let Bolt11InvoiceParameters {
9709 amount_msats, description, invoice_expiry_delta_secs, min_final_cltv_expiry_delta,
9710 payment_hash,
9711 } = params;
9712
9713 let currency =
9714 Network::from_chain_hash(self.chain_hash).map(Into::into).unwrap_or(Currency::Bitcoin);
9715
9716 #[cfg(feature = "std")]
9717 let duration_since_epoch = {
9718 use std::time::SystemTime;
9719 SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
9720 .expect("SystemTime::now() should be after SystemTime::UNIX_EPOCH")
9721 };
9722
9723 #[cfg(not(feature = "std"))]
9727 let duration_since_epoch =
9728 Duration::from_secs(self.highest_seen_timestamp.load(Ordering::Acquire) as u64);
9729
9730 if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta {
9731 if min_final_cltv_expiry_delta.saturating_add(3) < MIN_FINAL_CLTV_EXPIRY_DELTA {
9732 return Err(SignOrCreationError::CreationError(CreationError::MinFinalCltvExpiryDeltaTooShort));
9733 }
9734 }
9735
9736 let (payment_hash, payment_secret) = match payment_hash {
9737 Some(payment_hash) => {
9738 let payment_secret = self
9739 .create_inbound_payment_for_hash(
9740 payment_hash, amount_msats,
9741 invoice_expiry_delta_secs.unwrap_or(DEFAULT_EXPIRY_TIME as u32),
9742 min_final_cltv_expiry_delta,
9743 )
9744 .map_err(|()| SignOrCreationError::CreationError(CreationError::InvalidAmount))?;
9745 (payment_hash, payment_secret)
9746 },
9747 None => {
9748 self
9749 .create_inbound_payment(
9750 amount_msats, invoice_expiry_delta_secs.unwrap_or(DEFAULT_EXPIRY_TIME as u32),
9751 min_final_cltv_expiry_delta,
9752 )
9753 .map_err(|()| SignOrCreationError::CreationError(CreationError::InvalidAmount))?
9754 },
9755 };
9756
9757 log_trace!(self.logger, "Creating invoice with payment hash {}", &payment_hash);
9758
9759 let invoice = Bolt11InvoiceBuilder::new(currency);
9760 let invoice = match description {
9761 Bolt11InvoiceDescription::Direct(description) => invoice.description(description.into_inner().0),
9762 Bolt11InvoiceDescription::Hash(hash) => invoice.description_hash(hash.0),
9763 };
9764
9765 let mut invoice = invoice
9766 .duration_since_epoch(duration_since_epoch)
9767 .payee_pub_key(self.get_our_node_id())
9768 .payment_hash(Hash::from_slice(&payment_hash.0).unwrap())
9769 .payment_secret(payment_secret)
9770 .basic_mpp()
9771 .min_final_cltv_expiry_delta(
9772 min_final_cltv_expiry_delta.map(|x| x.saturating_add(3)).unwrap_or(MIN_FINAL_CLTV_EXPIRY_DELTA).into()
9774 );
9775
9776 if let Some(invoice_expiry_delta_secs) = invoice_expiry_delta_secs{
9777 invoice = invoice.expiry_time(Duration::from_secs(invoice_expiry_delta_secs.into()));
9778 }
9779
9780 if let Some(amount_msats) = amount_msats {
9781 invoice = invoice.amount_milli_satoshis(amount_msats);
9782 }
9783
9784 let channels = self.list_channels();
9785 let route_hints = super::invoice_utils::sort_and_filter_channels(channels, amount_msats, &self.logger);
9786 for hint in route_hints {
9787 invoice = invoice.private_route(hint);
9788 }
9789
9790 let raw_invoice = invoice.build_raw().map_err(|e| SignOrCreationError::CreationError(e))?;
9791 let signature = self.node_signer.sign_invoice(&raw_invoice, Recipient::Node);
9792
9793 raw_invoice
9794 .sign(|_| signature)
9795 .map(|invoice| Bolt11Invoice::from_signed(invoice).unwrap())
9796 .map_err(|e| SignOrCreationError::SignError(e))
9797 }
9798}
9799
9800pub struct Bolt11InvoiceParameters {
9804 pub amount_msats: Option<u64>,
9806
9807 pub description: Bolt11InvoiceDescription,
9809
9810 pub invoice_expiry_delta_secs: Option<u32>,
9817
9818 pub min_final_cltv_expiry_delta: Option<u16>,
9824
9825 pub payment_hash: Option<PaymentHash>,
9832}
9833
9834impl Default for Bolt11InvoiceParameters {
9835 fn default() -> Self {
9836 Self {
9837 amount_msats: None,
9838 description: Bolt11InvoiceDescription::Direct(Description::empty()),
9839 invoice_expiry_delta_secs: None,
9840 min_final_cltv_expiry_delta: None,
9841 payment_hash: None,
9842 }
9843 }
9844}
9845
9846macro_rules! create_offer_builder { ($self: ident, $builder: ty) => {
9847 pub fn create_offer_builder(
9872 &$self, absolute_expiry: Option<Duration>
9873 ) -> Result<$builder, Bolt12SemanticError> {
9874 let node_id = $self.get_our_node_id();
9875 let expanded_key = &$self.inbound_payment_key;
9876 let entropy = &*$self.entropy_source;
9877 let secp_ctx = &$self.secp_ctx;
9878
9879 let nonce = Nonce::from_entropy_source(entropy);
9880 let context = OffersContext::InvoiceRequest { nonce };
9881 let path = $self.create_blinded_paths_using_absolute_expiry(context, absolute_expiry)
9882 .and_then(|paths| paths.into_iter().next().ok_or(()))
9883 .map_err(|_| Bolt12SemanticError::MissingPaths)?;
9884 let builder = OfferBuilder::deriving_signing_pubkey(node_id, expanded_key, nonce, secp_ctx)
9885 .chain_hash($self.chain_hash)
9886 .path(path);
9887
9888 let builder = match absolute_expiry {
9889 None => builder,
9890 Some(absolute_expiry) => builder.absolute_expiry(absolute_expiry),
9891 };
9892
9893 Ok(builder.into())
9894 }
9895} }
9896
9897macro_rules! create_refund_builder { ($self: ident, $builder: ty) => {
9898 pub fn create_refund_builder(
9944 &$self, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId,
9945 retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>
9946 ) -> Result<$builder, Bolt12SemanticError> {
9947 let node_id = $self.get_our_node_id();
9948 let expanded_key = &$self.inbound_payment_key;
9949 let entropy = &*$self.entropy_source;
9950 let secp_ctx = &$self.secp_ctx;
9951
9952 let nonce = Nonce::from_entropy_source(entropy);
9953 let context = OffersContext::OutboundPayment { payment_id, nonce, hmac: None };
9954 let path = $self.create_blinded_paths_using_absolute_expiry(context, Some(absolute_expiry))
9955 .and_then(|paths| paths.into_iter().next().ok_or(()))
9956 .map_err(|_| Bolt12SemanticError::MissingPaths)?;
9957
9958 let builder = RefundBuilder::deriving_signing_pubkey(
9959 node_id, expanded_key, nonce, secp_ctx, amount_msats, payment_id
9960 )?
9961 .chain_hash($self.chain_hash)
9962 .absolute_expiry(absolute_expiry)
9963 .path(path);
9964
9965 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop($self);
9966
9967 let expiration = StaleExpiration::AbsoluteTimeout(absolute_expiry);
9968 $self.pending_outbound_payments
9969 .add_new_awaiting_invoice(
9970 payment_id, expiration, retry_strategy, max_total_routing_fee_msat, None,
9971 )
9972 .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?;
9973
9974 Ok(builder.into())
9975 }
9976} }
9977
9978const OFFERS_MESSAGE_REQUEST_LIMIT: usize = 10;
9984
9985impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
9986where
9987 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
9988 T::Target: BroadcasterInterface,
9989 ES::Target: EntropySource,
9990 NS::Target: NodeSigner,
9991 SP::Target: SignerProvider,
9992 F::Target: FeeEstimator,
9993 R::Target: Router,
9994 MR::Target: MessageRouter,
9995 L::Target: Logger,
9996{
9997 #[cfg(not(c_bindings))]
9998 create_offer_builder!(self, OfferBuilder<DerivedMetadata, secp256k1::All>);
9999 #[cfg(not(c_bindings))]
10000 create_refund_builder!(self, RefundBuilder<secp256k1::All>);
10001
10002 #[cfg(c_bindings)]
10003 create_offer_builder!(self, OfferWithDerivedMetadataBuilder);
10004 #[cfg(c_bindings)]
10005 create_refund_builder!(self, RefundMaybeWithDerivedMetadataBuilder);
10006
10007 pub fn pay_for_offer(
10061 &self, offer: &Offer, quantity: Option<u64>, amount_msats: Option<u64>,
10062 payer_note: Option<String>, payment_id: PaymentId, retry_strategy: Retry,
10063 max_total_routing_fee_msat: Option<u64>
10064 ) -> Result<(), Bolt12SemanticError> {
10065 self.pay_for_offer_intern(offer, quantity, amount_msats, payer_note, payment_id, None, |invoice_request, nonce| {
10066 let expiration = StaleExpiration::TimerTicks(1);
10067 let retryable_invoice_request = RetryableInvoiceRequest {
10068 invoice_request: invoice_request.clone(),
10069 nonce,
10070 };
10071 self.pending_outbound_payments
10072 .add_new_awaiting_invoice(
10073 payment_id, expiration, retry_strategy, max_total_routing_fee_msat,
10074 Some(retryable_invoice_request)
10075 )
10076 .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
10077 })
10078 }
10079
10080 fn pay_for_offer_intern<CPP: FnOnce(&InvoiceRequest, Nonce) -> Result<(), Bolt12SemanticError>>(
10081 &self, offer: &Offer, quantity: Option<u64>, amount_msats: Option<u64>,
10082 payer_note: Option<String>, payment_id: PaymentId,
10083 human_readable_name: Option<HumanReadableName>, create_pending_payment: CPP,
10084 ) -> Result<(), Bolt12SemanticError> {
10085 let expanded_key = &self.inbound_payment_key;
10086 let entropy = &*self.entropy_source;
10087 let secp_ctx = &self.secp_ctx;
10088
10089 let nonce = Nonce::from_entropy_source(entropy);
10090 let builder: InvoiceRequestBuilder<secp256k1::All> = offer
10091 .request_invoice(expanded_key, nonce, secp_ctx, payment_id)?
10092 .into();
10093 let builder = builder.chain_hash(self.chain_hash)?;
10094
10095 let builder = match quantity {
10096 None => builder,
10097 Some(quantity) => builder.quantity(quantity)?,
10098 };
10099 let builder = match amount_msats {
10100 None => builder,
10101 Some(amount_msats) => builder.amount_msats(amount_msats)?,
10102 };
10103 let builder = match payer_note {
10104 None => builder,
10105 Some(payer_note) => builder.payer_note(payer_note),
10106 };
10107 let builder = match human_readable_name {
10108 None => builder,
10109 Some(hrn) => builder.sourced_from_human_readable_name(hrn),
10110 };
10111 let invoice_request = builder.build_and_sign()?;
10112
10113 let hmac = payment_id.hmac_for_offer_payment(nonce, expanded_key);
10114 let context = MessageContext::Offers(
10115 OffersContext::OutboundPayment { payment_id, nonce, hmac: Some(hmac) }
10116 );
10117 let reply_paths = self.create_blinded_paths(context)
10118 .map_err(|_| Bolt12SemanticError::MissingPaths)?;
10119
10120 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
10121
10122 create_pending_payment(&invoice_request, nonce)?;
10123
10124 self.enqueue_invoice_request(invoice_request, reply_paths)
10125 }
10126
10127 fn enqueue_invoice_request(
10128 &self,
10129 invoice_request: InvoiceRequest,
10130 reply_paths: Vec<BlindedMessagePath>,
10131 ) -> Result<(), Bolt12SemanticError> {
10132 let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
10133 if !invoice_request.paths().is_empty() {
10134 reply_paths
10135 .iter()
10136 .flat_map(|reply_path| invoice_request.paths().iter().map(move |path| (path, reply_path)))
10137 .take(OFFERS_MESSAGE_REQUEST_LIMIT)
10138 .for_each(|(path, reply_path)| {
10139 let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
10140 destination: Destination::BlindedPath(path.clone()),
10141 reply_path: reply_path.clone(),
10142 };
10143 let message = OffersMessage::InvoiceRequest(invoice_request.clone());
10144 pending_offers_messages.push((message, instructions));
10145 });
10146 } else if let Some(node_id) = invoice_request.issuer_signing_pubkey() {
10147 for reply_path in reply_paths {
10148 let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
10149 destination: Destination::Node(node_id),
10150 reply_path,
10151 };
10152 let message = OffersMessage::InvoiceRequest(invoice_request.clone());
10153 pending_offers_messages.push((message, instructions));
10154 }
10155 } else {
10156 debug_assert!(false);
10157 return Err(Bolt12SemanticError::MissingIssuerSigningPubkey);
10158 }
10159
10160 Ok(())
10161 }
10162
10163 pub fn request_refund_payment(
10186 &self, refund: &Refund
10187 ) -> Result<Bolt12Invoice, Bolt12SemanticError> {
10188 let expanded_key = &self.inbound_payment_key;
10189 let entropy = &*self.entropy_source;
10190 let secp_ctx = &self.secp_ctx;
10191
10192 let amount_msats = refund.amount_msats();
10193 let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
10194
10195 if refund.chain() != self.chain_hash {
10196 return Err(Bolt12SemanticError::UnsupportedChain);
10197 }
10198
10199 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
10200
10201 match self.create_inbound_payment(Some(amount_msats), relative_expiry, None) {
10202 Ok((payment_hash, payment_secret)) => {
10203 let payment_context = PaymentContext::Bolt12Refund(Bolt12RefundContext {});
10204 let payment_paths = self.create_blinded_payment_paths(
10205 amount_msats, payment_secret, payment_context
10206 )
10207 .map_err(|_| Bolt12SemanticError::MissingPaths)?;
10208
10209 #[cfg(feature = "std")]
10210 let builder = refund.respond_using_derived_keys(
10211 payment_paths, payment_hash, expanded_key, entropy
10212 )?;
10213 #[cfg(not(feature = "std"))]
10214 let created_at = Duration::from_secs(
10215 self.highest_seen_timestamp.load(Ordering::Acquire) as u64
10216 );
10217 #[cfg(not(feature = "std"))]
10218 let builder = refund.respond_using_derived_keys_no_std(
10219 payment_paths, payment_hash, created_at, expanded_key, entropy
10220 )?;
10221 let builder: InvoiceBuilder<DerivedSigningPubkey> = builder.into();
10222 let invoice = builder.allow_mpp().build_and_sign(secp_ctx)?;
10223
10224 let nonce = Nonce::from_entropy_source(entropy);
10225 let hmac = payment_hash.hmac_for_offer_payment(nonce, expanded_key);
10226 let context = MessageContext::Offers(OffersContext::InboundPayment {
10227 payment_hash: invoice.payment_hash(), nonce, hmac
10228 });
10229 let reply_paths = self.create_blinded_paths(context)
10230 .map_err(|_| Bolt12SemanticError::MissingPaths)?;
10231
10232 let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
10233 if refund.paths().is_empty() {
10234 for reply_path in reply_paths {
10235 let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
10236 destination: Destination::Node(refund.payer_signing_pubkey()),
10237 reply_path,
10238 };
10239 let message = OffersMessage::Invoice(invoice.clone());
10240 pending_offers_messages.push((message, instructions));
10241 }
10242 } else {
10243 reply_paths
10244 .iter()
10245 .flat_map(|reply_path| refund.paths().iter().map(move |path| (path, reply_path)))
10246 .take(OFFERS_MESSAGE_REQUEST_LIMIT)
10247 .for_each(|(path, reply_path)| {
10248 let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
10249 destination: Destination::BlindedPath(path.clone()),
10250 reply_path: reply_path.clone(),
10251 };
10252 let message = OffersMessage::Invoice(invoice.clone());
10253 pending_offers_messages.push((message, instructions));
10254 });
10255 }
10256
10257 Ok(invoice)
10258 },
10259 Err(()) => Err(Bolt12SemanticError::InvalidAmount),
10260 }
10261 }
10262
10263 #[cfg(feature = "dnssec")]
10304 pub fn pay_for_offer_from_human_readable_name(
10305 &self, name: HumanReadableName, amount_msats: u64, payment_id: PaymentId,
10306 retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>,
10307 dns_resolvers: Vec<Destination>,
10308 ) -> Result<(), ()> {
10309 let (onion_message, context) =
10310 self.hrn_resolver.resolve_name(payment_id, name, &*self.entropy_source)?;
10311 let reply_paths = self.create_blinded_paths(MessageContext::DNSResolver(context))?;
10312 let expiration = StaleExpiration::TimerTicks(1);
10313 self.pending_outbound_payments.add_new_awaiting_offer(payment_id, expiration, retry_strategy, max_total_routing_fee_msat, amount_msats)?;
10314 let message_params = dns_resolvers
10315 .iter()
10316 .flat_map(|destination| reply_paths.iter().map(move |path| (path, destination)))
10317 .take(OFFERS_MESSAGE_REQUEST_LIMIT);
10318 for (reply_path, destination) in message_params {
10319 self.pending_dns_onion_messages.lock().unwrap().push((
10320 DNSResolverMessage::DNSSECQuery(onion_message.clone()),
10321 MessageSendInstructions::WithSpecifiedReplyPath {
10322 destination: destination.clone(),
10323 reply_path: reply_path.clone(),
10324 },
10325 ));
10326 }
10327 Ok(())
10328 }
10329
10330 pub fn create_inbound_payment(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32,
10361 min_final_cltv_expiry_delta: Option<u16>) -> Result<(PaymentHash, PaymentSecret), ()> {
10362 inbound_payment::create(&self.inbound_payment_key, min_value_msat, invoice_expiry_delta_secs,
10363 &self.entropy_source, self.highest_seen_timestamp.load(Ordering::Acquire) as u64,
10364 min_final_cltv_expiry_delta)
10365 }
10366
10367 pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>,
10414 invoice_expiry_delta_secs: u32, min_final_cltv_expiry: Option<u16>) -> Result<PaymentSecret, ()> {
10415 inbound_payment::create_from_hash(&self.inbound_payment_key, min_value_msat, payment_hash,
10416 invoice_expiry_delta_secs, self.highest_seen_timestamp.load(Ordering::Acquire) as u64,
10417 min_final_cltv_expiry)
10418 }
10419
10420 pub fn get_payment_preimage(&self, payment_hash: PaymentHash, payment_secret: PaymentSecret) -> Result<PaymentPreimage, APIError> {
10425 inbound_payment::get_payment_preimage(payment_hash, payment_secret, &self.inbound_payment_key)
10426 }
10427
10428 fn create_blinded_paths_using_absolute_expiry(
10435 &self, context: OffersContext, absolute_expiry: Option<Duration>,
10436 ) -> Result<Vec<BlindedMessagePath>, ()> {
10437 let now = self.duration_since_epoch();
10438 let max_short_lived_absolute_expiry = now.saturating_add(MAX_SHORT_LIVED_RELATIVE_EXPIRY);
10439
10440 if absolute_expiry.unwrap_or(Duration::MAX) <= max_short_lived_absolute_expiry {
10441 self.create_compact_blinded_paths(context)
10442 } else {
10443 self.create_blinded_paths(MessageContext::Offers(context))
10444 }
10445 }
10446
10447 pub(super) fn duration_since_epoch(&self) -> Duration {
10448 #[cfg(not(feature = "std"))]
10449 let now = Duration::from_secs(
10450 self.highest_seen_timestamp.load(Ordering::Acquire) as u64
10451 );
10452 #[cfg(feature = "std")]
10453 let now = std::time::SystemTime::now()
10454 .duration_since(std::time::SystemTime::UNIX_EPOCH)
10455 .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
10456
10457 now
10458 }
10459
10460 fn create_blinded_paths(&self, context: MessageContext) -> Result<Vec<BlindedMessagePath>, ()> {
10465 let recipient = self.get_our_node_id();
10466 let secp_ctx = &self.secp_ctx;
10467
10468 let peers = self.per_peer_state.read().unwrap()
10469 .iter()
10470 .map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
10471 .filter(|(_, peer)| peer.is_connected)
10472 .filter(|(_, peer)| peer.latest_features.supports_onion_messages())
10473 .map(|(node_id, _)| *node_id)
10474 .collect::<Vec<_>>();
10475
10476 self.message_router
10477 .create_blinded_paths(recipient, context, peers, secp_ctx)
10478 .and_then(|paths| (!paths.is_empty()).then(|| paths).ok_or(()))
10479 }
10480
10481 fn create_compact_blinded_paths(&self, context: OffersContext) -> Result<Vec<BlindedMessagePath>, ()> {
10486 let recipient = self.get_our_node_id();
10487 let secp_ctx = &self.secp_ctx;
10488
10489 let peers = self.per_peer_state.read().unwrap()
10490 .iter()
10491 .map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
10492 .filter(|(_, peer)| peer.is_connected)
10493 .filter(|(_, peer)| peer.latest_features.supports_onion_messages())
10494 .map(|(node_id, peer)| MessageForwardNode {
10495 node_id: *node_id,
10496 short_channel_id: peer.channel_by_id
10497 .iter()
10498 .filter(|(_, channel)| channel.context().is_usable())
10499 .min_by_key(|(_, channel)| channel.context().channel_creation_height)
10500 .and_then(|(_, channel)| channel.context().get_short_channel_id()),
10501 })
10502 .collect::<Vec<_>>();
10503
10504 self.message_router
10505 .create_compact_blinded_paths(recipient, MessageContext::Offers(context), peers, secp_ctx)
10506 .and_then(|paths| (!paths.is_empty()).then(|| paths).ok_or(()))
10507 }
10508
10509 fn create_blinded_payment_paths(
10512 &self, amount_msats: u64, payment_secret: PaymentSecret, payment_context: PaymentContext
10513 ) -> Result<Vec<BlindedPaymentPath>, ()> {
10514 let expanded_key = &self.inbound_payment_key;
10515 let entropy = &*self.entropy_source;
10516 let secp_ctx = &self.secp_ctx;
10517
10518 let first_hops = self.list_usable_channels();
10519 let payee_node_id = self.get_our_node_id();
10520 let max_cltv_expiry = self.best_block.read().unwrap().height + CLTV_FAR_FAR_AWAY
10521 + LATENCY_GRACE_PERIOD_BLOCKS;
10522
10523 let payee_tlvs = UnauthenticatedReceiveTlvs {
10524 payment_secret,
10525 payment_constraints: PaymentConstraints {
10526 max_cltv_expiry,
10527 htlc_minimum_msat: 1,
10528 },
10529 payment_context,
10530 };
10531 let nonce = Nonce::from_entropy_source(entropy);
10532 let payee_tlvs = payee_tlvs.authenticate(nonce, expanded_key);
10533
10534 self.router.create_blinded_payment_paths(
10535 payee_node_id, first_hops, payee_tlvs, amount_msats, secp_ctx
10536 )
10537 }
10538
10539 pub fn get_phantom_scid(&self) -> u64 {
10544 let best_block_height = self.best_block.read().unwrap().height;
10545 let short_to_chan_info = self.short_to_chan_info.read().unwrap();
10546 loop {
10547 let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
10548 match short_to_chan_info.get(&scid_candidate) {
10550 Some(_) => continue,
10551 None => return scid_candidate
10552 }
10553 }
10554 }
10555
10556 pub fn get_phantom_route_hints(&self) -> PhantomRouteHints {
10560 PhantomRouteHints {
10561 channels: self.list_usable_channels(),
10562 phantom_scid: self.get_phantom_scid(),
10563 real_node_pubkey: self.get_our_node_id(),
10564 }
10565 }
10566
10567 pub fn get_intercept_scid(&self) -> u64 {
10574 let best_block_height = self.best_block.read().unwrap().height;
10575 let short_to_chan_info = self.short_to_chan_info.read().unwrap();
10576 loop {
10577 let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
10578 if short_to_chan_info.contains_key(&scid_candidate) { continue }
10580 return scid_candidate
10581 }
10582 }
10583
10584 pub fn compute_inflight_htlcs(&self) -> InFlightHtlcs {
10587 let mut inflight_htlcs = InFlightHtlcs::new();
10588
10589 let per_peer_state = self.per_peer_state.read().unwrap();
10590 for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
10591 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10592 let peer_state = &mut *peer_state_lock;
10593 for chan in peer_state.channel_by_id.values().filter_map(
10594 |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
10595 ) {
10596 for (htlc_source, _) in chan.inflight_htlc_sources() {
10597 if let HTLCSource::OutboundRoute { path, .. } = htlc_source {
10598 inflight_htlcs.process_path(path, self.get_our_node_id());
10599 }
10600 }
10601 }
10602 }
10603
10604 inflight_htlcs
10605 }
10606
10607 #[cfg(any(test, feature = "_test_utils"))]
10608 pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
10609 let events = core::cell::RefCell::new(Vec::new());
10610 let event_handler = |event: events::Event| Ok(events.borrow_mut().push(event));
10611 self.process_pending_events(&event_handler);
10612 events.into_inner()
10613 }
10614
10615 #[cfg(feature = "_test_utils")]
10616 pub fn push_pending_event(&self, event: events::Event) {
10617 let mut events = self.pending_events.lock().unwrap();
10618 events.push_back((event, None));
10619 }
10620
10621 #[cfg(test)]
10622 pub fn pop_pending_event(&self) -> Option<events::Event> {
10623 let mut events = self.pending_events.lock().unwrap();
10624 events.pop_front().map(|(e, _)| e)
10625 }
10626
10627 #[cfg(test)]
10628 pub fn has_pending_payments(&self) -> bool {
10629 self.pending_outbound_payments.has_pending_payments()
10630 }
10631
10632 #[cfg(test)]
10633 pub fn clear_pending_payments(&self) {
10634 self.pending_outbound_payments.clear_pending_payments()
10635 }
10636
10637 fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey,
10642 channel_funding_outpoint: OutPoint, channel_id: ChannelId,
10643 mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
10644
10645 let logger = WithContext::from(
10646 &self.logger, Some(counterparty_node_id), Some(channel_id), None
10647 );
10648 loop {
10649 let per_peer_state = self.per_peer_state.read().unwrap();
10650 if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
10651 let mut peer_state_lck = peer_state_mtx.lock().unwrap();
10652 let peer_state = &mut *peer_state_lck;
10653 if let Some(blocker) = completed_blocker.take() {
10654 if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
10656 .get_mut(&channel_id)
10657 {
10658 blockers.retain(|iter| iter != &blocker);
10659 }
10660 }
10661
10662 if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
10663 channel_funding_outpoint, channel_id, counterparty_node_id) {
10664 log_trace!(logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
10668 &channel_id);
10669 break;
10670 }
10671
10672 if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(
10673 channel_id) {
10674 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
10675 debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint);
10676 if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
10677 log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor",
10678 channel_id);
10679 handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
10680 peer_state_lck, peer_state, per_peer_state, chan);
10681 if further_update_exists {
10682 continue;
10685 }
10686 } else {
10687 log_trace!(logger, "Unlocked monitor updating for channel {} without monitors to update",
10688 channel_id);
10689 }
10690 }
10691 }
10692 } else {
10693 log_debug!(logger,
10694 "Got a release post-RAA monitor update for peer {} but the channel is gone",
10695 log_pubkey!(counterparty_node_id));
10696 }
10697 break;
10698 }
10699 }
10700
10701 fn handle_post_event_actions(&self, actions: Vec<EventCompletionAction>) {
10702 for action in actions {
10703 match action {
10704 EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
10705 channel_funding_outpoint, channel_id, counterparty_node_id
10706 } => {
10707 self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, channel_id, None);
10708 }
10709 }
10710 }
10711 }
10712
10713 pub async fn process_pending_events_async<Future: core::future::Future<Output = Result<(), ReplayEvent>>, H: Fn(Event) -> Future>(
10718 &self, handler: H
10719 ) {
10720 let mut ev;
10721 process_events_body!(self, ev, { handler(ev).await });
10722 }
10723}
10724
10725impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
10726where
10727 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
10728 T::Target: BroadcasterInterface,
10729 ES::Target: EntropySource,
10730 NS::Target: NodeSigner,
10731 SP::Target: SignerProvider,
10732 F::Target: FeeEstimator,
10733 R::Target: Router,
10734 MR::Target: MessageRouter,
10735 L::Target: Logger,
10736{
10737 fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
10751 let events = RefCell::new(Vec::new());
10752 PersistenceNotifierGuard::optionally_notify(self, || {
10753 let mut result = NotifyOption::SkipPersistNoEvents;
10754
10755 if self.process_pending_monitor_events() {
10758 result = NotifyOption::DoPersist;
10759 }
10760
10761 if self.check_free_holding_cells() {
10762 result = NotifyOption::DoPersist;
10763 }
10764 if self.maybe_generate_initial_closing_signed() {
10765 result = NotifyOption::DoPersist;
10766 }
10767
10768 let mut is_any_peer_connected = false;
10769 let mut pending_events = Vec::new();
10770 let per_peer_state = self.per_peer_state.read().unwrap();
10771 for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
10772 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10773 let peer_state = &mut *peer_state_lock;
10774 if peer_state.pending_msg_events.len() > 0 {
10775 pending_events.append(&mut peer_state.pending_msg_events);
10776 }
10777 if peer_state.is_connected {
10778 is_any_peer_connected = true
10779 }
10780 }
10781
10782 if is_any_peer_connected {
10784 let mut broadcast_msgs = self.pending_broadcast_messages.lock().unwrap();
10785 pending_events.append(&mut broadcast_msgs);
10786 }
10787
10788 if !pending_events.is_empty() {
10789 events.replace(pending_events);
10790 }
10791
10792 result
10793 });
10794 events.into_inner()
10795 }
10796}
10797
10798impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> EventsProvider for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
10799where
10800 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
10801 T::Target: BroadcasterInterface,
10802 ES::Target: EntropySource,
10803 NS::Target: NodeSigner,
10804 SP::Target: SignerProvider,
10805 F::Target: FeeEstimator,
10806 R::Target: Router,
10807 MR::Target: MessageRouter,
10808 L::Target: Logger,
10809{
10810 fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
10815 let mut ev;
10816 process_events_body!(self, ev, handler.handle_event(ev));
10817 }
10818}
10819
10820impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> chain::Listen for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
10821where
10822 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
10823 T::Target: BroadcasterInterface,
10824 ES::Target: EntropySource,
10825 NS::Target: NodeSigner,
10826 SP::Target: SignerProvider,
10827 F::Target: FeeEstimator,
10828 R::Target: Router,
10829 MR::Target: MessageRouter,
10830 L::Target: Logger,
10831{
10832 fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
10833 {
10834 let best_block = self.best_block.read().unwrap();
10835 assert_eq!(best_block.block_hash, header.prev_blockhash,
10836 "Blocks must be connected in chain-order - the connected header must build on the last connected header");
10837 assert_eq!(best_block.height, height - 1,
10838 "Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
10839 }
10840
10841 self.transactions_confirmed(header, txdata, height);
10842 self.best_block_updated(header, height);
10843 }
10844
10845 fn block_disconnected(&self, header: &Header, height: u32) {
10846 let _persistence_guard =
10847 PersistenceNotifierGuard::optionally_notify_skipping_background_events(
10848 self, || -> NotifyOption { NotifyOption::DoPersist });
10849 let new_height = height - 1;
10850 {
10851 let mut best_block = self.best_block.write().unwrap();
10852 assert_eq!(best_block.block_hash, header.block_hash(),
10853 "Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
10854 assert_eq!(best_block.height, height,
10855 "Blocks must be disconnected in chain-order - the disconnected block must have the correct height");
10856 *best_block = BestBlock::new(header.prev_blockhash, new_height)
10857 }
10858
10859 self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)));
10860 }
10861}
10862
10863impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> chain::Confirm for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
10864where
10865 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
10866 T::Target: BroadcasterInterface,
10867 ES::Target: EntropySource,
10868 NS::Target: NodeSigner,
10869 SP::Target: SignerProvider,
10870 F::Target: FeeEstimator,
10871 R::Target: Router,
10872 MR::Target: MessageRouter,
10873 L::Target: Logger,
10874{
10875 fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) {
10876 let block_hash = header.block_hash();
10881 log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);
10882
10883 let _persistence_guard =
10884 PersistenceNotifierGuard::optionally_notify_skipping_background_events(
10885 self, || -> NotifyOption { NotifyOption::DoPersist });
10886 self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None))
10887 .map(|(a, b)| (a, Vec::new(), b)));
10888
10889 let last_best_block_height = self.best_block.read().unwrap().height;
10890 if height < last_best_block_height {
10891 let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
10892 self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)));
10893 }
10894 }
10895
10896 fn best_block_updated(&self, header: &Header, height: u32) {
10897 let block_hash = header.block_hash();
10902 log_trace!(self.logger, "New best block: {} at height {}", block_hash, height);
10903
10904 let _persistence_guard =
10905 PersistenceNotifierGuard::optionally_notify_skipping_background_events(
10906 self, || -> NotifyOption { NotifyOption::DoPersist });
10907 *self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
10908
10909 let mut min_anchor_feerate = None;
10910 let mut min_non_anchor_feerate = None;
10911 if self.background_events_processed_since_startup.load(Ordering::Relaxed) {
10912 let mut last_days_feerates = self.last_days_feerates.lock().unwrap();
10914 if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS {
10915 last_days_feerates.pop_front();
10916 }
10917 let anchor_feerate = self.fee_estimator
10918 .bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedAnchorChannelRemoteFee);
10919 let non_anchor_feerate = self.fee_estimator
10920 .bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee);
10921 last_days_feerates.push_back((anchor_feerate, non_anchor_feerate));
10922 if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS {
10923 min_anchor_feerate = last_days_feerates.iter().map(|(f, _)| f).min().copied();
10924 min_non_anchor_feerate = last_days_feerates.iter().map(|(_, f)| f).min().copied();
10925 }
10926 }
10927
10928 self.do_chain_event(Some(height), |channel| {
10929 let logger = WithChannelContext::from(&self.logger, &channel.context, None);
10930 if channel.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
10931 if let Some(feerate) = min_anchor_feerate {
10932 channel.check_for_stale_feerate(&logger, feerate)?;
10933 }
10934 } else {
10935 if let Some(feerate) = min_non_anchor_feerate {
10936 channel.check_for_stale_feerate(&logger, feerate)?;
10937 }
10938 }
10939 channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None))
10940 });
10941
10942 macro_rules! max_time {
10943 ($timestamp: expr) => {
10944 loop {
10945 let old_serial = $timestamp.load(Ordering::Acquire);
10951 if old_serial >= header.time as usize { break; }
10952 if $timestamp.compare_exchange(old_serial, header.time as usize, Ordering::AcqRel, Ordering::Relaxed).is_ok() {
10953 break;
10954 }
10955 }
10956 }
10957 }
10958 max_time!(self.highest_seen_timestamp);
10959 #[cfg(feature = "dnssec")] {
10960 let timestamp = self.highest_seen_timestamp.load(Ordering::Relaxed) as u32;
10961 self.hrn_resolver.new_best_block(height, timestamp);
10962 }
10963 }
10964
10965 fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option<BlockHash>)> {
10966 let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
10967 for (_cp_id, peer_state_mutex) in self.per_peer_state.read().unwrap().iter() {
10968 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10969 let peer_state = &mut *peer_state_lock;
10970 for chan in peer_state.channel_by_id.values().filter_map(|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }) {
10971 let txid_opt = chan.context.get_funding_txo();
10972 let height_opt = chan.context.get_funding_tx_confirmation_height();
10973 let hash_opt = chan.context.get_funding_tx_confirmed_in();
10974 if let (Some(funding_txo), Some(conf_height), Some(block_hash)) = (txid_opt, height_opt, hash_opt) {
10975 res.push((funding_txo.txid, conf_height, Some(block_hash)));
10976 }
10977 }
10978 }
10979 res
10980 }
10981
10982 fn transaction_unconfirmed(&self, txid: &Txid) {
10983 let _persistence_guard =
10984 PersistenceNotifierGuard::optionally_notify_skipping_background_events(
10985 self, || -> NotifyOption { NotifyOption::DoPersist });
10986 self.do_chain_event(None, |channel| {
10987 if let Some(funding_txo) = channel.context.get_funding_txo() {
10988 if funding_txo.txid == *txid {
10989 channel.funding_transaction_unconfirmed(&&WithChannelContext::from(&self.logger, &channel.context, None)).map(|()| (None, Vec::new(), None))
10990 } else { Ok((None, Vec::new(), None)) }
10991 } else { Ok((None, Vec::new(), None)) }
10992 });
10993 }
10994}
10995
10996impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
10997where
10998 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
10999 T::Target: BroadcasterInterface,
11000 ES::Target: EntropySource,
11001 NS::Target: NodeSigner,
11002 SP::Target: SignerProvider,
11003 F::Target: FeeEstimator,
11004 R::Target: Router,
11005 MR::Target: MessageRouter,
11006 L::Target: Logger,
11007{
11008 fn do_chain_event<FN: Fn(&mut Channel<SP>) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>>
11012 (&self, height_opt: Option<u32>, f: FN) {
11013 let mut failed_channels = Vec::new();
11018 let mut timed_out_htlcs = Vec::new();
11019 {
11020 let per_peer_state = self.per_peer_state.read().unwrap();
11021 for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
11022 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11023 let peer_state = &mut *peer_state_lock;
11024 let pending_msg_events = &mut peer_state.pending_msg_events;
11025
11026 peer_state.channel_by_id.retain(|_, phase| {
11027 match phase {
11028 ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) |
11030 ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => true,
11031 ChannelPhase::Funded(channel) => {
11032 let res = f(channel);
11033 if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
11034 for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
11035 let failure_code = 0x1000|14; let data = self.get_htlc_inbound_temp_fail_data(failure_code);
11037 timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
11038 HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() }));
11039 }
11040 let logger = WithChannelContext::from(&self.logger, &channel.context, None);
11041 if let Some(channel_ready) = channel_ready_opt {
11042 send_channel_ready!(self, pending_msg_events, channel, channel_ready);
11043 if channel.context.is_usable() {
11044 log_trace!(logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", channel.context.channel_id());
11045 if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
11046 pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
11047 node_id: channel.context.get_counterparty_node_id(),
11048 msg,
11049 });
11050 }
11051 } else {
11052 log_trace!(logger, "Sending channel_ready WITHOUT channel_update for {}", channel.context.channel_id());
11053 }
11054 }
11055
11056 {
11057 let mut pending_events = self.pending_events.lock().unwrap();
11058 emit_channel_ready_event!(pending_events, channel);
11059 }
11060
11061 if let Some(height) = height_opt {
11062 let funding_conf_height =
11065 channel.context.get_funding_tx_confirmation_height().unwrap_or(height);
11066 let rebroadcast_announcement = funding_conf_height < height + 1008
11071 && funding_conf_height % 6 == height % 6;
11072 #[allow(unused_mut, unused_assignments)]
11073 let mut should_announce = announcement_sigs.is_some() || rebroadcast_announcement;
11074 #[cfg(test)]
11078 {
11079 should_announce = announcement_sigs.is_some();
11080 }
11081 if should_announce {
11082 if let Some(announcement) = channel.get_signed_channel_announcement(
11083 &self.node_signer, self.chain_hash, height, &self.default_configuration,
11084 ) {
11085 pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
11086 msg: announcement,
11087 update_msg: Some(self.get_channel_update_for_broadcast(channel).unwrap()),
11092 });
11093 }
11094 }
11095 }
11096 if let Some(announcement_sigs) = announcement_sigs {
11097 log_trace!(logger, "Sending announcement_signatures for channel {}", channel.context.channel_id());
11098 pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
11099 node_id: channel.context.get_counterparty_node_id(),
11100 msg: announcement_sigs,
11101 });
11102 }
11103 if channel.is_our_channel_ready() {
11104 if let Some(real_scid) = channel.context.get_short_channel_id() {
11105 let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
11112 let scid_insert = short_to_chan_info.insert(real_scid, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
11113 assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.context.get_counterparty_node_id(), channel.context.channel_id()),
11114 "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
11115 fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
11116 }
11117 }
11118 } else if let Err(reason) = res {
11119 let reason_message = format!("{}", reason);
11122 let mut close_res = channel.context.force_shutdown(true, reason);
11123 locked_close_channel!(self, peer_state, &channel.context, close_res);
11124 failed_channels.push(close_res);
11125 if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
11126 let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
11127 pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
11128 msg: update
11129 });
11130 }
11131 pending_msg_events.push(events::MessageSendEvent::HandleError {
11132 node_id: channel.context.get_counterparty_node_id(),
11133 action: msgs::ErrorAction::DisconnectPeer {
11134 msg: Some(msgs::ErrorMessage {
11135 channel_id: channel.context.channel_id(),
11136 data: reason_message,
11137 })
11138 },
11139 });
11140 return false;
11141 }
11142 true
11143 }
11144 }
11145 });
11146 }
11147 }
11148
11149 if let Some(height) = height_opt {
11150 self.claimable_payments.lock().unwrap().claimable_payments.retain(|payment_hash, payment| {
11151 payment.htlcs.retain(|htlc| {
11152 if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
11157 let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
11158 htlc_msat_height_data.extend_from_slice(&height.to_be_bytes());
11159
11160 timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(),
11161 HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data),
11162 HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
11163 false
11164 } else { true }
11165 });
11166 !payment.htlcs.is_empty() });
11168
11169 let mut intercepted_htlcs = self.pending_intercepted_htlcs.lock().unwrap();
11170 intercepted_htlcs.retain(|_, htlc| {
11171 if height >= htlc.forward_info.outgoing_cltv_value - HTLC_FAIL_BACK_BUFFER {
11172 let prev_hop_data = HTLCSource::PreviousHopData(HTLCPreviousHopData {
11173 short_channel_id: htlc.prev_short_channel_id,
11174 user_channel_id: Some(htlc.prev_user_channel_id),
11175 htlc_id: htlc.prev_htlc_id,
11176 incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret,
11177 phantom_shared_secret: None,
11178 counterparty_node_id: htlc.prev_counterparty_node_id,
11179 outpoint: htlc.prev_funding_outpoint,
11180 channel_id: htlc.prev_channel_id,
11181 blinded_failure: htlc.forward_info.routing.blinded_failure(),
11182 cltv_expiry: htlc.forward_info.routing.incoming_cltv_expiry(),
11183 });
11184
11185 let requested_forward_scid = match htlc.forward_info.routing {
11186 PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
11187 _ => unreachable!(),
11188 };
11189 timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash,
11190 HTLCFailReason::from_failure_code(0x2000 | 2),
11191 HTLCDestination::InvalidForward { requested_forward_scid }));
11192 let logger = WithContext::from(
11193 &self.logger, None, Some(htlc.prev_channel_id), Some(htlc.forward_info.payment_hash)
11194 );
11195 log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
11196 false
11197 } else { true }
11198 });
11199 }
11200
11201 for failure in failed_channels {
11202 self.finish_close_channel(failure);
11203 }
11204
11205 for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) {
11206 self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, destination);
11207 }
11208 }
11209
11210 pub fn get_event_or_persistence_needed_future(&self) -> Future {
11219 self.event_persist_notifier.get_future()
11220 }
11221
11222 pub fn get_and_clear_needs_persistence(&self) -> bool {
11227 self.needs_persist_flag.swap(false, Ordering::AcqRel)
11228 }
11229
11230 #[cfg(any(test, feature = "_test_utils"))]
11231 pub fn get_event_or_persist_condvar_value(&self) -> bool {
11232 self.event_persist_notifier.notify_pending()
11233 }
11234
11235 pub fn current_best_block(&self) -> BestBlock {
11238 self.best_block.read().unwrap().clone()
11239 }
11240
11241 pub fn node_features(&self) -> NodeFeatures {
11244 provided_node_features(&self.default_configuration)
11245 }
11246
11247 #[cfg(any(feature = "_test_utils", test))]
11253 pub fn bolt11_invoice_features(&self) -> Bolt11InvoiceFeatures {
11254 provided_bolt11_invoice_features(&self.default_configuration)
11255 }
11256
11257 fn bolt12_invoice_features(&self) -> Bolt12InvoiceFeatures {
11260 provided_bolt12_invoice_features(&self.default_configuration)
11261 }
11262
11263 pub fn channel_features(&self) -> ChannelFeatures {
11266 provided_channel_features(&self.default_configuration)
11267 }
11268
11269 pub fn channel_type_features(&self) -> ChannelTypeFeatures {
11272 provided_channel_type_features(&self.default_configuration)
11273 }
11274
11275 pub fn init_features(&self) -> InitFeatures {
11278 provided_init_features(&self.default_configuration)
11279 }
11280}
11281
11282impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
11283 ChannelMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
11284where
11285 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
11286 T::Target: BroadcasterInterface,
11287 ES::Target: EntropySource,
11288 NS::Target: NodeSigner,
11289 SP::Target: SignerProvider,
11290 F::Target: FeeEstimator,
11291 R::Target: Router,
11292 MR::Target: MessageRouter,
11293 L::Target: Logger,
11294{
11295 fn handle_open_channel(&self, counterparty_node_id: PublicKey, msg: &msgs::OpenChannel) {
11296 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11300 let res = self.internal_open_channel(&counterparty_node_id, OpenChannelMessageRef::V1(msg));
11301 let persist = match &res {
11302 Err(e) if e.closes_channel() => {
11303 debug_assert!(false, "We shouldn't close a new channel");
11304 NotifyOption::DoPersist
11305 },
11306 _ => NotifyOption::SkipPersistHandleEvents,
11307 };
11308 let _ = handle_error!(self, res, counterparty_node_id);
11309 persist
11310 });
11311 }
11312
11313 fn handle_open_channel_v2(&self, counterparty_node_id: PublicKey, msg: &msgs::OpenChannelV2) {
11314 #[cfg(dual_funding)]
11318 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11319 let res = self.internal_open_channel(&counterparty_node_id, OpenChannelMessageRef::V2(msg));
11320 let persist = match &res {
11321 Err(e) if e.closes_channel() => {
11322 debug_assert!(false, "We shouldn't close a new channel");
11323 NotifyOption::DoPersist
11324 },
11325 _ => NotifyOption::SkipPersistHandleEvents,
11326 };
11327 let _ = handle_error!(self, res, counterparty_node_id);
11328 persist
11329 });
11330 #[cfg(not(dual_funding))]
11331 let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11332 "Dual-funded channels not supported".to_owned(),
11333 msg.common_fields.temporary_channel_id.clone())), counterparty_node_id);
11334 }
11335
11336 fn handle_accept_channel(&self, counterparty_node_id: PublicKey, msg: &msgs::AcceptChannel) {
11337 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11341 let _ = handle_error!(self, self.internal_accept_channel(&counterparty_node_id, msg), counterparty_node_id);
11342 NotifyOption::SkipPersistHandleEvents
11343 });
11344 }
11345
11346 fn handle_accept_channel_v2(&self, counterparty_node_id: PublicKey, msg: &msgs::AcceptChannelV2) {
11347 let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11348 "Dual-funded channels not supported".to_owned(),
11349 msg.common_fields.temporary_channel_id.clone())), counterparty_node_id);
11350 }
11351
11352 fn handle_funding_created(&self, counterparty_node_id: PublicKey, msg: &msgs::FundingCreated) {
11353 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11354 let _ = handle_error!(self, self.internal_funding_created(&counterparty_node_id, msg), counterparty_node_id);
11355 }
11356
11357 fn handle_funding_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::FundingSigned) {
11358 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11359 let _ = handle_error!(self, self.internal_funding_signed(&counterparty_node_id, msg), counterparty_node_id);
11360 }
11361
11362 fn handle_channel_ready(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelReady) {
11363 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11368 let res = self.internal_channel_ready(&counterparty_node_id, msg);
11369 let persist = match &res {
11370 Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11371 _ => NotifyOption::SkipPersistHandleEvents,
11372 };
11373 let _ = handle_error!(self, res, counterparty_node_id);
11374 persist
11375 });
11376 }
11377
11378 fn handle_stfu(&self, counterparty_node_id: PublicKey, msg: &msgs::Stfu) {
11379 let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11380 "Quiescence not supported".to_owned(),
11381 msg.channel_id.clone())), counterparty_node_id);
11382 }
11383
11384 #[cfg(splicing)]
11385 fn handle_splice_init(&self, counterparty_node_id: PublicKey, msg: &msgs::SpliceInit) {
11386 let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11387 "Splicing not supported".to_owned(),
11388 msg.channel_id.clone())), counterparty_node_id);
11389 }
11390
11391 #[cfg(splicing)]
11392 fn handle_splice_ack(&self, counterparty_node_id: PublicKey, msg: &msgs::SpliceAck) {
11393 let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11394 "Splicing not supported (splice_ack)".to_owned(),
11395 msg.channel_id.clone())), counterparty_node_id);
11396 }
11397
11398 #[cfg(splicing)]
11399 fn handle_splice_locked(&self, counterparty_node_id: PublicKey, msg: &msgs::SpliceLocked) {
11400 let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11401 "Splicing not supported (splice_locked)".to_owned(),
11402 msg.channel_id.clone())), counterparty_node_id);
11403 }
11404
11405 fn handle_shutdown(&self, counterparty_node_id: PublicKey, msg: &msgs::Shutdown) {
11406 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11407 let _ = handle_error!(self, self.internal_shutdown(&counterparty_node_id, msg), counterparty_node_id);
11408 }
11409
11410 fn handle_closing_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::ClosingSigned) {
11411 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11412 let _ = handle_error!(self, self.internal_closing_signed(&counterparty_node_id, msg), counterparty_node_id);
11413 }
11414
11415 fn handle_update_add_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateAddHTLC) {
11416 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11420 let res = self.internal_update_add_htlc(&counterparty_node_id, msg);
11421 let persist = match &res {
11422 Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11423 Err(_) => NotifyOption::SkipPersistHandleEvents,
11424 Ok(()) => NotifyOption::SkipPersistNoEvents,
11425 };
11426 let _ = handle_error!(self, res, counterparty_node_id);
11427 persist
11428 });
11429 }
11430
11431 fn handle_update_fulfill_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFulfillHTLC) {
11432 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11433 let _ = handle_error!(self, self.internal_update_fulfill_htlc(&counterparty_node_id, msg), counterparty_node_id);
11434 }
11435
11436 fn handle_update_fail_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFailHTLC) {
11437 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11441 let res = self.internal_update_fail_htlc(&counterparty_node_id, msg);
11442 let persist = match &res {
11443 Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11444 Err(_) => NotifyOption::SkipPersistHandleEvents,
11445 Ok(()) => NotifyOption::SkipPersistNoEvents,
11446 };
11447 let _ = handle_error!(self, res, counterparty_node_id);
11448 persist
11449 });
11450 }
11451
11452 fn handle_update_fail_malformed_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFailMalformedHTLC) {
11453 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11457 let res = self.internal_update_fail_malformed_htlc(&counterparty_node_id, msg);
11458 let persist = match &res {
11459 Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11460 Err(_) => NotifyOption::SkipPersistHandleEvents,
11461 Ok(()) => NotifyOption::SkipPersistNoEvents,
11462 };
11463 let _ = handle_error!(self, res, counterparty_node_id);
11464 persist
11465 });
11466 }
11467
11468 fn handle_commitment_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::CommitmentSigned) {
11469 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11470 let _ = handle_error!(self, self.internal_commitment_signed(&counterparty_node_id, msg), counterparty_node_id);
11471 }
11472
11473 fn handle_revoke_and_ack(&self, counterparty_node_id: PublicKey, msg: &msgs::RevokeAndACK) {
11474 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11475 let _ = handle_error!(self, self.internal_revoke_and_ack(&counterparty_node_id, msg), counterparty_node_id);
11476 }
11477
11478 fn handle_update_fee(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFee) {
11479 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11483 let res = self.internal_update_fee(&counterparty_node_id, msg);
11484 let persist = match &res {
11485 Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11486 Err(_) => NotifyOption::SkipPersistHandleEvents,
11487 Ok(()) => NotifyOption::SkipPersistNoEvents,
11488 };
11489 let _ = handle_error!(self, res, counterparty_node_id);
11490 persist
11491 });
11492 }
11493
11494 fn handle_announcement_signatures(&self, counterparty_node_id: PublicKey, msg: &msgs::AnnouncementSignatures) {
11495 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11496 let _ = handle_error!(self, self.internal_announcement_signatures(&counterparty_node_id, msg), counterparty_node_id);
11497 }
11498
11499 fn handle_channel_update(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelUpdate) {
11500 PersistenceNotifierGuard::optionally_notify(self, || {
11501 if let Ok(persist) = handle_error!(self, self.internal_channel_update(&counterparty_node_id, msg), counterparty_node_id) {
11502 persist
11503 } else {
11504 NotifyOption::DoPersist
11505 }
11506 });
11507 }
11508
11509 fn handle_channel_reestablish(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelReestablish) {
11510 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11511 let res = self.internal_channel_reestablish(&counterparty_node_id, msg);
11512 let persist = match &res {
11513 Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11514 Err(_) => NotifyOption::SkipPersistHandleEvents,
11515 Ok(persist) => *persist,
11516 };
11517 let _ = handle_error!(self, res, counterparty_node_id);
11518 persist
11519 });
11520 }
11521
11522 fn peer_disconnected(&self, counterparty_node_id: PublicKey) {
11523 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(
11524 self, || NotifyOption::SkipPersistHandleEvents);
11525 let mut failed_channels = Vec::new();
11526 let mut per_peer_state = self.per_peer_state.write().unwrap();
11527 let remove_peer = {
11528 log_debug!(
11529 WithContext::from(&self.logger, Some(counterparty_node_id), None, None),
11530 "Marking channels with {} disconnected and generating channel_updates.",
11531 log_pubkey!(counterparty_node_id)
11532 );
11533 if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
11534 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11535 let peer_state = &mut *peer_state_lock;
11536 let pending_msg_events = &mut peer_state.pending_msg_events;
11537 peer_state.channel_by_id.retain(|_, phase| {
11538 let context = match phase {
11539 ChannelPhase::Funded(chan) => {
11540 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
11541 if chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok() {
11542 return true;
11544 }
11545 &mut chan.context
11546 },
11547 ChannelPhase::UnfundedOutboundV1(chan) if chan.is_resumable() => return true,
11554 ChannelPhase::UnfundedOutboundV1(chan) => &mut chan.context,
11555 ChannelPhase::UnfundedInboundV1(chan) => {
11557 &mut chan.context
11558 },
11559 ChannelPhase::UnfundedOutboundV2(chan) => {
11560 &mut chan.context
11561 },
11562 ChannelPhase::UnfundedInboundV2(chan) => {
11563 &mut chan.context
11564 },
11565 };
11566 let mut close_res = context.force_shutdown(false, ClosureReason::DisconnectedPeer);
11568 locked_close_channel!(self, peer_state, &context, close_res);
11569 failed_channels.push(close_res);
11570 false
11571 });
11572 peer_state.inbound_channel_request_by_id.clear();
11575 pending_msg_events.retain(|msg| {
11576 match msg {
11577 &events::MessageSendEvent::SendAcceptChannel { .. } => false,
11579 &events::MessageSendEvent::SendOpenChannel { .. } => false,
11580 &events::MessageSendEvent::SendFundingCreated { .. } => false,
11581 &events::MessageSendEvent::SendFundingSigned { .. } => false,
11582 &events::MessageSendEvent::SendAcceptChannelV2 { .. } => false,
11584 &events::MessageSendEvent::SendOpenChannelV2 { .. } => false,
11585 &events::MessageSendEvent::SendChannelReady { .. } => false,
11587 &events::MessageSendEvent::SendAnnouncementSignatures { .. } => false,
11588 &events::MessageSendEvent::SendStfu { .. } => false,
11590 &events::MessageSendEvent::SendSpliceInit { .. } => false,
11592 &events::MessageSendEvent::SendSpliceAck { .. } => false,
11593 &events::MessageSendEvent::SendSpliceLocked { .. } => false,
11594 &events::MessageSendEvent::SendTxAddInput { .. } => false,
11596 &events::MessageSendEvent::SendTxAddOutput { .. } => false,
11597 &events::MessageSendEvent::SendTxRemoveInput { .. } => false,
11598 &events::MessageSendEvent::SendTxRemoveOutput { .. } => false,
11599 &events::MessageSendEvent::SendTxComplete { .. } => false,
11600 &events::MessageSendEvent::SendTxSignatures { .. } => false,
11601 &events::MessageSendEvent::SendTxInitRbf { .. } => false,
11602 &events::MessageSendEvent::SendTxAckRbf { .. } => false,
11603 &events::MessageSendEvent::SendTxAbort { .. } => false,
11604 &events::MessageSendEvent::UpdateHTLCs { .. } => false,
11606 &events::MessageSendEvent::SendRevokeAndACK { .. } => false,
11607 &events::MessageSendEvent::SendClosingSigned { .. } => false,
11608 &events::MessageSendEvent::SendShutdown { .. } => false,
11609 &events::MessageSendEvent::SendChannelReestablish { .. } => false,
11610 &events::MessageSendEvent::HandleError { .. } => false,
11611 &events::MessageSendEvent::SendChannelAnnouncement { .. } => false,
11613 &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
11614 &events::MessageSendEvent::BroadcastChannelUpdate { .. } => {
11617 debug_assert!(false, "This event shouldn't have been here");
11618 false
11619 },
11620 &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
11621 &events::MessageSendEvent::SendChannelUpdate { .. } => false,
11622 &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
11623 &events::MessageSendEvent::SendShortIdsQuery { .. } => false,
11624 &events::MessageSendEvent::SendReplyChannelRange { .. } => false,
11625 &events::MessageSendEvent::SendGossipTimestampFilter { .. } => false,
11626 }
11627 });
11628 debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect");
11629 peer_state.is_connected = false;
11630 peer_state.ok_to_remove(true)
11631 } else { debug_assert!(false, "Unconnected peer disconnected"); true }
11632 };
11633 if remove_peer {
11634 per_peer_state.remove(&counterparty_node_id);
11635 }
11636 mem::drop(per_peer_state);
11637
11638 for failure in failed_channels.drain(..) {
11639 self.finish_close_channel(failure);
11640 }
11641 }
11642
11643 fn peer_connected(&self, counterparty_node_id: PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> {
11644 let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None);
11645 if !init_msg.features.supports_static_remote_key() {
11646 log_debug!(logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
11647 return Err(());
11648 }
11649
11650 let mut res = Ok(());
11651
11652 PersistenceNotifierGuard::optionally_notify(self, || {
11653 let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected);
11658 let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
11659
11660 {
11661 let mut peer_state_lock = self.per_peer_state.write().unwrap();
11662 match peer_state_lock.entry(counterparty_node_id.clone()) {
11663 hash_map::Entry::Vacant(e) => {
11664 if inbound_peer_limited {
11665 res = Err(());
11666 return NotifyOption::SkipPersistNoEvents;
11667 }
11668 e.insert(Mutex::new(PeerState {
11669 channel_by_id: new_hash_map(),
11670 inbound_channel_request_by_id: new_hash_map(),
11671 latest_features: init_msg.features.clone(),
11672 pending_msg_events: Vec::new(),
11673 in_flight_monitor_updates: BTreeMap::new(),
11674 monitor_update_blocked_actions: BTreeMap::new(),
11675 actions_blocking_raa_monitor_updates: BTreeMap::new(),
11676 closed_channel_monitor_update_ids: BTreeMap::new(),
11677 is_connected: true,
11678 }));
11679 },
11680 hash_map::Entry::Occupied(e) => {
11681 let mut peer_state = e.get().lock().unwrap();
11682 peer_state.latest_features = init_msg.features.clone();
11683
11684 let best_block_height = self.best_block.read().unwrap().height;
11685 if inbound_peer_limited &&
11686 Self::unfunded_channel_count(&*peer_state, best_block_height) ==
11687 peer_state.channel_by_id.len()
11688 {
11689 res = Err(());
11690 return NotifyOption::SkipPersistNoEvents;
11691 }
11692
11693 debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
11694 peer_state.is_connected = true;
11695 },
11696 }
11697 }
11698
11699 log_debug!(logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
11700
11701 let per_peer_state = self.per_peer_state.read().unwrap();
11702 if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
11703 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11704 let peer_state = &mut *peer_state_lock;
11705 let pending_msg_events = &mut peer_state.pending_msg_events;
11706
11707 for (_, phase) in peer_state.channel_by_id.iter_mut() {
11708 match phase {
11709 ChannelPhase::Funded(chan) => {
11710 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
11711 pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
11712 node_id: chan.context.get_counterparty_node_id(),
11713 msg: chan.get_channel_reestablish(&&logger),
11714 });
11715 }
11716
11717 ChannelPhase::UnfundedOutboundV1(chan) => {
11718 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
11719 if let Some(msg) = chan.get_open_channel(self.chain_hash, &&logger) {
11720 pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
11721 node_id: chan.context.get_counterparty_node_id(),
11722 msg,
11723 });
11724 }
11725 }
11726
11727 ChannelPhase::UnfundedOutboundV2(chan) => {
11728 pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 {
11729 node_id: chan.context.get_counterparty_node_id(),
11730 msg: chan.get_open_channel_v2(self.chain_hash),
11731 });
11732 },
11733
11734 ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedInboundV2(_) => {
11735 debug_assert!(false);
11739 }
11740 }
11741 }
11742 }
11743
11744 return NotifyOption::SkipPersistHandleEvents;
11745 });
11747 res
11748 }
11749
11750 fn handle_error(&self, counterparty_node_id: PublicKey, msg: &msgs::ErrorMessage) {
11751 match &msg.data as &str {
11752 "cannot co-op close channel w/ active htlcs"|
11753 "link failed to shutdown" =>
11754 {
11755 if !msg.channel_id.is_zero() {
11763 PersistenceNotifierGuard::optionally_notify(
11764 self,
11765 || -> NotifyOption {
11766 let per_peer_state = self.per_peer_state.read().unwrap();
11767 let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
11768 if peer_state_mutex_opt.is_none() { return NotifyOption::SkipPersistNoEvents; }
11769 let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
11770 if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) {
11771 if let Some(msg) = chan.get_outbound_shutdown() {
11772 peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
11773 node_id: counterparty_node_id,
11774 msg,
11775 });
11776 }
11777 peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
11778 node_id: counterparty_node_id,
11779 action: msgs::ErrorAction::SendWarningMessage {
11780 msg: msgs::WarningMessage {
11781 channel_id: msg.channel_id,
11782 data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned()
11783 },
11784 log_level: Level::Trace,
11785 }
11786 });
11787 return NotifyOption::SkipPersistHandleEvents;
11790 }
11791 NotifyOption::SkipPersistNoEvents
11792 }
11793 );
11794 }
11795 return;
11796 }
11797 _ => {}
11798 }
11799
11800 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11801
11802 if msg.channel_id.is_zero() {
11803 let channel_ids: Vec<ChannelId> = {
11804 let per_peer_state = self.per_peer_state.read().unwrap();
11805 let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
11806 if peer_state_mutex_opt.is_none() { return; }
11807 let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
11808 let peer_state = &mut *peer_state_lock;
11809 peer_state.inbound_channel_request_by_id.clear();
11812 peer_state.channel_by_id.keys().cloned().collect()
11813 };
11814 for channel_id in channel_ids {
11815 let _ = self.force_close_channel_with_peer(&channel_id, &counterparty_node_id, Some(&msg.data), true);
11817 }
11818 } else {
11819 {
11820 let per_peer_state = self.per_peer_state.read().unwrap();
11822 let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
11823 if peer_state_mutex_opt.is_none() { return; }
11824 let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
11825 let peer_state = &mut *peer_state_lock;
11826 match peer_state.channel_by_id.get_mut(&msg.channel_id) {
11827 Some(ChannelPhase::UnfundedOutboundV1(ref mut chan)) => {
11828 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
11829 if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator, &&logger) {
11830 peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
11831 node_id: counterparty_node_id,
11832 msg,
11833 });
11834 return;
11835 }
11836 },
11837 Some(ChannelPhase::UnfundedOutboundV2(ref mut chan)) => {
11838 if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
11839 peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 {
11840 node_id: counterparty_node_id,
11841 msg,
11842 });
11843 return;
11844 }
11845 },
11846 None | Some(ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedInboundV2(_) | ChannelPhase::Funded(_)) => (),
11847 }
11848 }
11849
11850 let _ = self.force_close_channel_with_peer(&msg.channel_id, &counterparty_node_id, Some(&msg.data), true);
11852 }
11853 }
11854
11855 fn provided_node_features(&self) -> NodeFeatures {
11856 provided_node_features(&self.default_configuration)
11857 }
11858
11859 fn provided_init_features(&self, _their_init_features: PublicKey) -> InitFeatures {
11860 provided_init_features(&self.default_configuration)
11861 }
11862
11863 fn get_chain_hashes(&self) -> Option<Vec<ChainHash>> {
11864 Some(vec![self.chain_hash])
11865 }
11866
11867 fn handle_tx_add_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddInput) {
11868 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11872 let _ = handle_error!(self, self.internal_tx_add_input(counterparty_node_id, msg), counterparty_node_id);
11873 NotifyOption::SkipPersistHandleEvents
11874 });
11875 }
11876
11877 fn handle_tx_add_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddOutput) {
11878 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11882 let _ = handle_error!(self, self.internal_tx_add_output(counterparty_node_id, msg), counterparty_node_id);
11883 NotifyOption::SkipPersistHandleEvents
11884 });
11885 }
11886
11887 fn handle_tx_remove_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveInput) {
11888 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11892 let _ = handle_error!(self, self.internal_tx_remove_input(counterparty_node_id, msg), counterparty_node_id);
11893 NotifyOption::SkipPersistHandleEvents
11894 });
11895 }
11896
11897 fn handle_tx_remove_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveOutput) {
11898 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11902 let _ = handle_error!(self, self.internal_tx_remove_output(counterparty_node_id, msg), counterparty_node_id);
11903 NotifyOption::SkipPersistHandleEvents
11904 });
11905 }
11906
11907 fn handle_tx_complete(&self, counterparty_node_id: PublicKey, msg: &msgs::TxComplete) {
11908 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11912 let _ = handle_error!(self, self.internal_tx_complete(counterparty_node_id, msg), counterparty_node_id);
11913 NotifyOption::SkipPersistHandleEvents
11914 });
11915 }
11916
11917 fn handle_tx_signatures(&self, counterparty_node_id: PublicKey, msg: &msgs::TxSignatures) {
11918 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11919 let _ = handle_error!(self, self.internal_tx_signatures(&counterparty_node_id, msg), counterparty_node_id);
11920 }
11921
11922 fn handle_tx_init_rbf(&self, counterparty_node_id: PublicKey, msg: &msgs::TxInitRbf) {
11923 let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11924 "Dual-funded channels not supported".to_owned(),
11925 msg.channel_id.clone())), counterparty_node_id);
11926 }
11927
11928 fn handle_tx_ack_rbf(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAckRbf) {
11929 let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11930 "Dual-funded channels not supported".to_owned(),
11931 msg.channel_id.clone())), counterparty_node_id);
11932 }
11933
11934 fn handle_tx_abort(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAbort) {
11935 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11939 let _ = handle_error!(self, self.internal_tx_abort(&counterparty_node_id, msg), counterparty_node_id);
11940 NotifyOption::SkipPersistHandleEvents
11941 });
11942 }
11943
11944 fn message_received(&self) {
11945 for (payment_id, retryable_invoice_request) in self
11946 .pending_outbound_payments
11947 .release_invoice_requests_awaiting_invoice()
11948 {
11949 let RetryableInvoiceRequest { invoice_request, nonce } = retryable_invoice_request;
11950 let hmac = payment_id.hmac_for_offer_payment(nonce, &self.inbound_payment_key);
11951 let context = MessageContext::Offers(OffersContext::OutboundPayment {
11952 payment_id,
11953 nonce,
11954 hmac: Some(hmac)
11955 });
11956 match self.create_blinded_paths(context) {
11957 Ok(reply_paths) => match self.enqueue_invoice_request(invoice_request, reply_paths) {
11958 Ok(_) => {}
11959 Err(_) => {
11960 log_warn!(self.logger,
11961 "Retry failed for an invoice request with payment_id: {}",
11962 payment_id
11963 );
11964 }
11965 },
11966 Err(_) => {
11967 log_warn!(self.logger,
11968 "Retry failed for an invoice request with payment_id: {}. \
11969 Reason: router could not find a blinded path to include as the reply path",
11970 payment_id
11971 );
11972 }
11973 }
11974 }
11975 }
11976}
11977
11978impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
11979OffersMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
11980where
11981 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
11982 T::Target: BroadcasterInterface,
11983 ES::Target: EntropySource,
11984 NS::Target: NodeSigner,
11985 SP::Target: SignerProvider,
11986 F::Target: FeeEstimator,
11987 R::Target: Router,
11988 MR::Target: MessageRouter,
11989 L::Target: Logger,
11990{
11991 fn handle_message(
11992 &self, message: OffersMessage, context: Option<OffersContext>, responder: Option<Responder>,
11993 ) -> Option<(OffersMessage, ResponseInstruction)> {
11994 let secp_ctx = &self.secp_ctx;
11995 let expanded_key = &self.inbound_payment_key;
11996
11997 macro_rules! handle_pay_invoice_res {
11998 ($res: expr, $invoice: expr, $logger: expr) => {{
11999 let error = match $res {
12000 Err(Bolt12PaymentError::UnknownRequiredFeatures) => {
12001 log_trace!(
12002 $logger, "Invoice requires unknown features: {:?}",
12003 $invoice.invoice_features()
12004 );
12005 InvoiceError::from(Bolt12SemanticError::UnknownRequiredFeatures)
12006 },
12007 Err(Bolt12PaymentError::SendingFailed(e)) => {
12008 log_trace!($logger, "Failed paying invoice: {:?}", e);
12009 InvoiceError::from_string(format!("{:?}", e))
12010 },
12011 #[cfg(async_payments)]
12012 Err(Bolt12PaymentError::BlindedPathCreationFailed) => {
12013 let err_msg = "Failed to create a blinded path back to ourselves";
12014 log_trace!($logger, "{}", err_msg);
12015 InvoiceError::from_string(err_msg.to_string())
12016 },
12017 Err(Bolt12PaymentError::UnexpectedInvoice)
12018 | Err(Bolt12PaymentError::DuplicateInvoice)
12019 | Ok(()) => return None,
12020 };
12021
12022 match responder {
12023 Some(responder) => return Some((OffersMessage::InvoiceError(error), responder.respond())),
12024 None => {
12025 log_trace!($logger, "No reply path to send error: {:?}", error);
12026 return None
12027 },
12028 }
12029 }}
12030 }
12031
12032 match message {
12033 OffersMessage::InvoiceRequest(invoice_request) => {
12034 let responder = match responder {
12035 Some(responder) => responder,
12036 None => return None,
12037 };
12038
12039 let nonce = match context {
12040 None if invoice_request.metadata().is_some() => None,
12041 Some(OffersContext::InvoiceRequest { nonce }) => Some(nonce),
12042 _ => return None,
12043 };
12044
12045 let invoice_request = match nonce {
12046 Some(nonce) => match invoice_request.verify_using_recipient_data(
12047 nonce, expanded_key, secp_ctx,
12048 ) {
12049 Ok(invoice_request) => invoice_request,
12050 Err(()) => return None,
12051 },
12052 None => match invoice_request.verify_using_metadata(expanded_key, secp_ctx) {
12053 Ok(invoice_request) => invoice_request,
12054 Err(()) => return None,
12055 },
12056 };
12057
12058 let amount_msats = match InvoiceBuilder::<DerivedSigningPubkey>::amount_msats(
12059 &invoice_request.inner
12060 ) {
12061 Ok(amount_msats) => amount_msats,
12062 Err(error) => return Some((OffersMessage::InvoiceError(error.into()), responder.respond())),
12063 };
12064
12065 let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
12066 let (payment_hash, payment_secret) = match self.create_inbound_payment(
12067 Some(amount_msats), relative_expiry, None
12068 ) {
12069 Ok((payment_hash, payment_secret)) => (payment_hash, payment_secret),
12070 Err(()) => {
12071 let error = Bolt12SemanticError::InvalidAmount;
12072 return Some((OffersMessage::InvoiceError(error.into()), responder.respond()));
12073 },
12074 };
12075
12076 let payment_context = PaymentContext::Bolt12Offer(Bolt12OfferContext {
12077 offer_id: invoice_request.offer_id,
12078 invoice_request: invoice_request.fields(),
12079 });
12080 let payment_paths = match self.create_blinded_payment_paths(
12081 amount_msats, payment_secret, payment_context
12082 ) {
12083 Ok(payment_paths) => payment_paths,
12084 Err(()) => {
12085 let error = Bolt12SemanticError::MissingPaths;
12086 return Some((OffersMessage::InvoiceError(error.into()), responder.respond()));
12087 },
12088 };
12089
12090 #[cfg(not(feature = "std"))]
12091 let created_at = Duration::from_secs(
12092 self.highest_seen_timestamp.load(Ordering::Acquire) as u64
12093 );
12094
12095 let response = if invoice_request.keys.is_some() {
12096 #[cfg(feature = "std")]
12097 let builder = invoice_request.respond_using_derived_keys(
12098 payment_paths, payment_hash
12099 );
12100 #[cfg(not(feature = "std"))]
12101 let builder = invoice_request.respond_using_derived_keys_no_std(
12102 payment_paths, payment_hash, created_at
12103 );
12104 builder
12105 .map(InvoiceBuilder::<DerivedSigningPubkey>::from)
12106 .and_then(|builder| builder.allow_mpp().build_and_sign(secp_ctx))
12107 .map_err(InvoiceError::from)
12108 } else {
12109 #[cfg(feature = "std")]
12110 let builder = invoice_request.respond_with(payment_paths, payment_hash);
12111 #[cfg(not(feature = "std"))]
12112 let builder = invoice_request.respond_with_no_std(
12113 payment_paths, payment_hash, created_at
12114 );
12115 builder
12116 .map(InvoiceBuilder::<ExplicitSigningPubkey>::from)
12117 .and_then(|builder| builder.allow_mpp().build())
12118 .map_err(InvoiceError::from)
12119 .and_then(|invoice| {
12120 #[cfg(c_bindings)]
12121 let mut invoice = invoice;
12122 invoice
12123 .sign(|invoice: &UnsignedBolt12Invoice|
12124 self.node_signer.sign_bolt12_invoice(invoice)
12125 )
12126 .map_err(InvoiceError::from)
12127 })
12128 };
12129
12130 match response {
12131 Ok(invoice) => {
12132 let nonce = Nonce::from_entropy_source(&*self.entropy_source);
12133 let hmac = payment_hash.hmac_for_offer_payment(nonce, expanded_key);
12134 let context = MessageContext::Offers(OffersContext::InboundPayment { payment_hash, nonce, hmac });
12135 Some((OffersMessage::Invoice(invoice), responder.respond_with_reply_path(context)))
12136 },
12137 Err(error) => Some((OffersMessage::InvoiceError(error.into()), responder.respond())),
12138 }
12139 },
12140 OffersMessage::Invoice(invoice) => {
12141 let payment_id = match self.verify_bolt12_invoice(&invoice, context.as_ref()) {
12142 Ok(payment_id) => payment_id,
12143 Err(()) => return None,
12144 };
12145
12146 let logger = WithContext::from(
12147 &self.logger, None, None, Some(invoice.payment_hash()),
12148 );
12149
12150 if self.default_configuration.manually_handle_bolt12_invoices {
12151 let event = Event::InvoiceReceived {
12152 payment_id, invoice, context, responder,
12153 };
12154 self.pending_events.lock().unwrap().push_back((event, None));
12155 return None;
12156 }
12157
12158 let res = self.send_payment_for_verified_bolt12_invoice(&invoice, payment_id);
12159 handle_pay_invoice_res!(res, invoice, logger);
12160 },
12161 #[cfg(async_payments)]
12162 OffersMessage::StaticInvoice(invoice) => {
12163 let payment_id = match context {
12164 Some(OffersContext::OutboundPayment { payment_id, nonce, hmac: Some(hmac) }) => {
12165 if payment_id.verify_for_offer_payment(hmac, nonce, expanded_key).is_err() {
12166 return None
12167 }
12168 payment_id
12169 },
12170 _ => return None
12171 };
12172 let res = self.initiate_async_payment(&invoice, payment_id);
12173 handle_pay_invoice_res!(res, invoice, self.logger);
12174 },
12175 OffersMessage::InvoiceError(invoice_error) => {
12176 let payment_hash = match context {
12177 Some(OffersContext::InboundPayment { payment_hash, nonce, hmac }) => {
12178 match payment_hash.verify_for_offer_payment(hmac, nonce, expanded_key) {
12179 Ok(_) => Some(payment_hash),
12180 Err(_) => None,
12181 }
12182 },
12183 _ => None,
12184 };
12185
12186 let logger = WithContext::from(&self.logger, None, None, payment_hash);
12187 log_trace!(logger, "Received invoice_error: {}", invoice_error);
12188
12189 match context {
12190 Some(OffersContext::OutboundPayment { payment_id, nonce, hmac: Some(hmac) }) => {
12191 if let Ok(()) = payment_id.verify_for_offer_payment(hmac, nonce, expanded_key) {
12192 self.abandon_payment_with_reason(
12193 payment_id, PaymentFailureReason::InvoiceRequestRejected,
12194 );
12195 }
12196 },
12197 _ => {},
12198 }
12199
12200 None
12201 },
12202 }
12203 }
12204
12205 fn release_pending_messages(&self) -> Vec<(OffersMessage, MessageSendInstructions)> {
12206 core::mem::take(&mut self.pending_offers_messages.lock().unwrap())
12207 }
12208}
12209
12210impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
12211AsyncPaymentsMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
12212where
12213 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
12214 T::Target: BroadcasterInterface,
12215 ES::Target: EntropySource,
12216 NS::Target: NodeSigner,
12217 SP::Target: SignerProvider,
12218 F::Target: FeeEstimator,
12219 R::Target: Router,
12220 MR::Target: MessageRouter,
12221 L::Target: Logger,
12222{
12223 fn handle_held_htlc_available(
12224 &self, _message: HeldHtlcAvailable, _responder: Option<Responder>
12225 ) -> Option<(ReleaseHeldHtlc, ResponseInstruction)> {
12226 None
12227 }
12228
12229 fn handle_release_held_htlc(&self, _message: ReleaseHeldHtlc, _context: AsyncPaymentsContext) {
12230 #[cfg(async_payments)] {
12231 let AsyncPaymentsContext::OutboundPayment { payment_id, hmac, nonce } = _context;
12232 if payment_id.verify_for_async_payment(hmac, nonce, &self.inbound_payment_key).is_err() { return }
12233 if let Err(e) = self.send_payment_for_static_invoice(payment_id) {
12234 log_trace!(
12235 self.logger, "Failed to release held HTLC with payment id {}: {:?}", payment_id, e
12236 );
12237 }
12238 }
12239 }
12240
12241 fn release_pending_messages(&self) -> Vec<(AsyncPaymentsMessage, MessageSendInstructions)> {
12242 core::mem::take(&mut self.pending_async_payments_messages.lock().unwrap())
12243 }
12244}
12245
12246#[cfg(feature = "dnssec")]
12247impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
12248DNSResolverMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
12249where
12250 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
12251 T::Target: BroadcasterInterface,
12252 ES::Target: EntropySource,
12253 NS::Target: NodeSigner,
12254 SP::Target: SignerProvider,
12255 F::Target: FeeEstimator,
12256 R::Target: Router,
12257 MR::Target: MessageRouter,
12258 L::Target: Logger,
12259{
12260 fn handle_dnssec_query(
12261 &self, _message: DNSSECQuery, _responder: Option<Responder>,
12262 ) -> Option<(DNSResolverMessage, ResponseInstruction)> {
12263 None
12264 }
12265
12266 fn handle_dnssec_proof(&self, message: DNSSECProof, context: DNSResolverContext) {
12267 let offer_opt = self.hrn_resolver.handle_dnssec_proof_for_offer(message, context);
12268 #[cfg_attr(not(feature = "_test_utils"), allow(unused_mut))]
12269 if let Some((completed_requests, mut offer)) = offer_opt {
12270 for (name, payment_id) in completed_requests {
12271 #[cfg(feature = "_test_utils")]
12272 if let Some(replacement_offer) = self.testing_dnssec_proof_offer_resolution_override.lock().unwrap().remove(&name) {
12273 offer = replacement_offer;
12276 }
12277 if let Ok(amt_msats) = self.pending_outbound_payments.amt_msats_for_payment_awaiting_offer(payment_id) {
12278 let offer_pay_res =
12279 self.pay_for_offer_intern(&offer, None, Some(amt_msats), None, payment_id, Some(name),
12280 |invoice_request, nonce| {
12281 let retryable_invoice_request = RetryableInvoiceRequest {
12282 invoice_request: invoice_request.clone(),
12283 nonce,
12284 };
12285 self.pending_outbound_payments
12286 .received_offer(payment_id, Some(retryable_invoice_request))
12287 .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
12288 });
12289 if offer_pay_res.is_err() {
12290 self.pending_outbound_payments.abandon_payment(
12296 payment_id, PaymentFailureReason::RouteNotFound, &self.pending_events,
12297 );
12298 }
12299 }
12300 }
12301 }
12302 }
12303
12304 fn release_pending_messages(&self) -> Vec<(DNSResolverMessage, MessageSendInstructions)> {
12305 core::mem::take(&mut self.pending_dns_onion_messages.lock().unwrap())
12306 }
12307}
12308
12309impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
12310NodeIdLookUp for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
12311where
12312 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
12313 T::Target: BroadcasterInterface,
12314 ES::Target: EntropySource,
12315 NS::Target: NodeSigner,
12316 SP::Target: SignerProvider,
12317 F::Target: FeeEstimator,
12318 R::Target: Router,
12319 MR::Target: MessageRouter,
12320 L::Target: Logger,
12321{
12322 fn next_node_id(&self, short_channel_id: u64) -> Option<PublicKey> {
12323 self.short_to_chan_info.read().unwrap().get(&short_channel_id).map(|(pubkey, _)| *pubkey)
12324 }
12325}
12326
12327pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures {
12330 let mut node_features = provided_init_features(config).to_context();
12331 node_features.set_keysend_optional();
12332 node_features
12333}
12334
12335#[cfg(any(feature = "_test_utils", test))]
12341pub(crate) fn provided_bolt11_invoice_features(config: &UserConfig) -> Bolt11InvoiceFeatures {
12342 provided_init_features(config).to_context()
12343}
12344
12345pub(crate) fn provided_bolt12_invoice_features(config: &UserConfig) -> Bolt12InvoiceFeatures {
12348 provided_init_features(config).to_context()
12349}
12350
12351pub(crate) fn provided_channel_features(config: &UserConfig) -> ChannelFeatures {
12354 provided_init_features(config).to_context()
12355}
12356
12357pub(crate) fn provided_channel_type_features(config: &UserConfig) -> ChannelTypeFeatures {
12360 ChannelTypeFeatures::from_init(&provided_init_features(config))
12361}
12362
12363pub fn provided_init_features(config: &UserConfig) -> InitFeatures {
12366 let mut features = InitFeatures::empty();
12370 features.set_data_loss_protect_required();
12371 features.set_upfront_shutdown_script_optional();
12372 features.set_variable_length_onion_required();
12373 features.set_static_remote_key_required();
12374 features.set_payment_secret_required();
12375 features.set_basic_mpp_optional();
12376 features.set_wumbo_optional();
12377 features.set_shutdown_any_segwit_optional();
12378 features.set_channel_type_optional();
12379 features.set_scid_privacy_optional();
12380 features.set_zero_conf_optional();
12381 features.set_route_blinding_optional();
12382 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx {
12383 features.set_anchors_zero_fee_htlc_tx_optional();
12384 }
12385 #[cfg(dual_funding)]
12386 features.set_dual_fund_optional();
12387 features
12388}
12389
12390const SERIALIZATION_VERSION: u8 = 1;
12391const MIN_SERIALIZATION_VERSION: u8 = 1;
12392
12393impl_writeable_tlv_based!(PhantomRouteHints, {
12394 (2, channels, required_vec),
12395 (4, phantom_scid, required),
12396 (6, real_node_pubkey, required),
12397});
12398
12399impl_writeable_tlv_based!(BlindedForward, {
12400 (0, inbound_blinding_point, required),
12401 (1, failure, (default_value, BlindedFailure::FromIntroductionNode)),
12402 (3, next_blinding_override, option),
12403});
12404
12405impl_writeable_tlv_based_enum!(PendingHTLCRouting,
12406 (0, Forward) => {
12407 (0, onion_packet, required),
12408 (1, blinded, option),
12409 (2, short_channel_id, required),
12410 (3, incoming_cltv_expiry, option),
12411 },
12412 (1, Receive) => {
12413 (0, payment_data, required),
12414 (1, phantom_shared_secret, option),
12415 (2, incoming_cltv_expiry, required),
12416 (3, payment_metadata, option),
12417 (5, custom_tlvs, optional_vec),
12418 (7, requires_blinded_error, (default_value, false)),
12419 (9, payment_context, option),
12420 },
12421 (2, ReceiveKeysend) => {
12422 (0, payment_preimage, required),
12423 (1, requires_blinded_error, (default_value, false)),
12424 (2, incoming_cltv_expiry, required),
12425 (3, payment_metadata, option),
12426 (4, payment_data, option), (5, custom_tlvs, optional_vec),
12428 (7, has_recipient_created_payment_secret, (default_value, false)),
12429 },
12430);
12431
12432impl_writeable_tlv_based!(PendingHTLCInfo, {
12433 (0, routing, required),
12434 (2, incoming_shared_secret, required),
12435 (4, payment_hash, required),
12436 (6, outgoing_amt_msat, required),
12437 (8, outgoing_cltv_value, required),
12438 (9, incoming_amt_msat, option),
12439 (10, skimmed_fee_msat, option),
12440});
12441
12442
12443impl Writeable for HTLCFailureMsg {
12444 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
12445 match self {
12446 HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id, htlc_id, reason }) => {
12447 0u8.write(writer)?;
12448 channel_id.write(writer)?;
12449 htlc_id.write(writer)?;
12450 reason.write(writer)?;
12451 },
12452 HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
12453 channel_id, htlc_id, sha256_of_onion, failure_code
12454 }) => {
12455 1u8.write(writer)?;
12456 channel_id.write(writer)?;
12457 htlc_id.write(writer)?;
12458 sha256_of_onion.write(writer)?;
12459 failure_code.write(writer)?;
12460 },
12461 }
12462 Ok(())
12463 }
12464}
12465
12466impl Readable for HTLCFailureMsg {
12467 fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
12468 let id: u8 = Readable::read(reader)?;
12469 match id {
12470 0 => {
12471 Ok(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
12472 channel_id: Readable::read(reader)?,
12473 htlc_id: Readable::read(reader)?,
12474 reason: Readable::read(reader)?,
12475 }))
12476 },
12477 1 => {
12478 Ok(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
12479 channel_id: Readable::read(reader)?,
12480 htlc_id: Readable::read(reader)?,
12481 sha256_of_onion: Readable::read(reader)?,
12482 failure_code: Readable::read(reader)?,
12483 }))
12484 },
12485 2 => {
12492 let length: BigSize = Readable::read(reader)?;
12493 let mut s = FixedLengthReader::new(reader, length.0);
12494 let res = Readable::read(&mut s)?;
12495 s.eat_remaining()?; Ok(HTLCFailureMsg::Relay(res))
12497 },
12498 3 => {
12499 let length: BigSize = Readable::read(reader)?;
12500 let mut s = FixedLengthReader::new(reader, length.0);
12501 let res = Readable::read(&mut s)?;
12502 s.eat_remaining()?; Ok(HTLCFailureMsg::Malformed(res))
12504 },
12505 _ => Err(DecodeError::UnknownRequiredFeature),
12506 }
12507 }
12508}
12509
12510impl_writeable_tlv_based_enum_legacy!(PendingHTLCStatus, ;
12511 (0, Forward),
12512 (1, Fail),
12513);
12514
12515impl_writeable_tlv_based_enum!(BlindedFailure,
12516 (0, FromIntroductionNode) => {},
12517 (2, FromBlindedNode) => {},
12518);
12519
12520impl_writeable_tlv_based!(HTLCPreviousHopData, {
12521 (0, short_channel_id, required),
12522 (1, phantom_shared_secret, option),
12523 (2, outpoint, required),
12524 (3, blinded_failure, option),
12525 (4, htlc_id, required),
12526 (5, cltv_expiry, option),
12527 (6, incoming_packet_shared_secret, required),
12528 (7, user_channel_id, option),
12529 (9, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(outpoint.0.unwrap()))),
12532 (11, counterparty_node_id, option),
12533});
12534
12535impl Writeable for ClaimableHTLC {
12536 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
12537 let (payment_data, keysend_preimage) = match &self.onion_payload {
12538 OnionPayload::Invoice { _legacy_hop_data } => {
12539 (_legacy_hop_data.as_ref(), None)
12540 },
12541 OnionPayload::Spontaneous(preimage) => (None, Some(preimage)),
12542 };
12543 write_tlv_fields!(writer, {
12544 (0, self.prev_hop, required),
12545 (1, self.total_msat, required),
12546 (2, self.value, required),
12547 (3, self.sender_intended_value, required),
12548 (4, payment_data, option),
12549 (5, self.total_value_received, option),
12550 (6, self.cltv_expiry, required),
12551 (8, keysend_preimage, option),
12552 (10, self.counterparty_skimmed_fee_msat, option),
12553 });
12554 Ok(())
12555 }
12556}
12557
12558impl Readable for ClaimableHTLC {
12559 fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
12560 _init_and_read_len_prefixed_tlv_fields!(reader, {
12561 (0, prev_hop, required),
12562 (1, total_msat, option),
12563 (2, value_ser, required),
12564 (3, sender_intended_value, option),
12565 (4, payment_data_opt, option),
12566 (5, total_value_received, option),
12567 (6, cltv_expiry, required),
12568 (8, keysend_preimage, option),
12569 (10, counterparty_skimmed_fee_msat, option),
12570 });
12571 let payment_data: Option<msgs::FinalOnionHopData> = payment_data_opt;
12572 let value = value_ser.0.unwrap();
12573 let onion_payload = match keysend_preimage {
12574 Some(p) => {
12575 if payment_data.is_some() {
12576 return Err(DecodeError::InvalidValue)
12577 }
12578 if total_msat.is_none() {
12579 total_msat = Some(value);
12580 }
12581 OnionPayload::Spontaneous(p)
12582 },
12583 None => {
12584 if total_msat.is_none() {
12585 if payment_data.is_none() {
12586 return Err(DecodeError::InvalidValue)
12587 }
12588 total_msat = Some(payment_data.as_ref().unwrap().total_msat);
12589 }
12590 OnionPayload::Invoice { _legacy_hop_data: payment_data }
12591 },
12592 };
12593 Ok(Self {
12594 prev_hop: prev_hop.0.unwrap(),
12595 timer_ticks: 0,
12596 value,
12597 sender_intended_value: sender_intended_value.unwrap_or(value),
12598 total_value_received,
12599 total_msat: total_msat.unwrap(),
12600 onion_payload,
12601 cltv_expiry: cltv_expiry.0.unwrap(),
12602 counterparty_skimmed_fee_msat,
12603 })
12604 }
12605}
12606
12607impl Readable for HTLCSource {
12608 fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
12609 let id: u8 = Readable::read(reader)?;
12610 match id {
12611 0 => {
12612 let mut session_priv: crate::util::ser::RequiredWrapper<SecretKey> = crate::util::ser::RequiredWrapper(None);
12613 let mut first_hop_htlc_msat: u64 = 0;
12614 let mut path_hops = Vec::new();
12615 let mut payment_id = None;
12616 let mut payment_params: Option<PaymentParameters> = None;
12617 let mut blinded_tail: Option<BlindedTail> = None;
12618 read_tlv_fields!(reader, {
12619 (0, session_priv, required),
12620 (1, payment_id, option),
12621 (2, first_hop_htlc_msat, required),
12622 (4, path_hops, required_vec),
12623 (5, payment_params, (option: ReadableArgs, 0)),
12624 (6, blinded_tail, option),
12625 });
12626 if payment_id.is_none() {
12627 payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref()));
12630 }
12631 let path = Path { hops: path_hops, blinded_tail };
12632 if path.hops.len() == 0 {
12633 return Err(DecodeError::InvalidValue);
12634 }
12635 if let Some(params) = payment_params.as_mut() {
12636 if let Payee::Clear { ref mut final_cltv_expiry_delta, .. } = params.payee {
12637 if final_cltv_expiry_delta == &0 {
12638 *final_cltv_expiry_delta = path.final_cltv_expiry_delta().ok_or(DecodeError::InvalidValue)?;
12639 }
12640 }
12641 }
12642 Ok(HTLCSource::OutboundRoute {
12643 session_priv: session_priv.0.unwrap(),
12644 first_hop_htlc_msat,
12645 path,
12646 payment_id: payment_id.unwrap(),
12647 })
12648 }
12649 1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
12650 _ => Err(DecodeError::UnknownRequiredFeature),
12651 }
12652 }
12653}
12654
12655impl Writeable for HTLCSource {
12656 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), crate::io::Error> {
12657 match self {
12658 HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id } => {
12659 0u8.write(writer)?;
12660 let payment_id_opt = Some(payment_id);
12661 write_tlv_fields!(writer, {
12662 (0, session_priv, required),
12663 (1, payment_id_opt, option),
12664 (2, first_hop_htlc_msat, required),
12665 (4, path.hops, required_vec),
12667 (5, None::<PaymentParameters>, option), (6, path.blinded_tail, option),
12669 });
12670 }
12671 HTLCSource::PreviousHopData(ref field) => {
12672 1u8.write(writer)?;
12673 field.write(writer)?;
12674 }
12675 }
12676 Ok(())
12677 }
12678}
12679
12680impl_writeable_tlv_based!(PendingAddHTLCInfo, {
12681 (0, forward_info, required),
12682 (1, prev_user_channel_id, (default_value, 0)),
12683 (2, prev_short_channel_id, required),
12684 (4, prev_htlc_id, required),
12685 (6, prev_funding_outpoint, required),
12686 (7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))),
12689 (9, prev_counterparty_node_id, option),
12690});
12691
12692impl Writeable for HTLCForwardInfo {
12693 fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
12694 const FAIL_HTLC_VARIANT_ID: u8 = 1;
12695 match self {
12696 Self::AddHTLC(info) => {
12697 0u8.write(w)?;
12698 info.write(w)?;
12699 },
12700 Self::FailHTLC { htlc_id, err_packet } => {
12701 FAIL_HTLC_VARIANT_ID.write(w)?;
12702 write_tlv_fields!(w, {
12703 (0, htlc_id, required),
12704 (2, err_packet, required),
12705 });
12706 },
12707 Self::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
12708 FAIL_HTLC_VARIANT_ID.write(w)?;
12712 let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
12713 write_tlv_fields!(w, {
12714 (0, htlc_id, required),
12715 (1, failure_code, required),
12716 (2, dummy_err_packet, required),
12717 (3, sha256_of_onion, required),
12718 });
12719 },
12720 }
12721 Ok(())
12722 }
12723}
12724
12725impl Readable for HTLCForwardInfo {
12726 fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
12727 let id: u8 = Readable::read(r)?;
12728 Ok(match id {
12729 0 => Self::AddHTLC(Readable::read(r)?),
12730 1 => {
12731 _init_and_read_len_prefixed_tlv_fields!(r, {
12732 (0, htlc_id, required),
12733 (1, malformed_htlc_failure_code, option),
12734 (2, err_packet, required),
12735 (3, sha256_of_onion, option),
12736 });
12737 if let Some(failure_code) = malformed_htlc_failure_code {
12738 Self::FailMalformedHTLC {
12739 htlc_id: _init_tlv_based_struct_field!(htlc_id, required),
12740 failure_code,
12741 sha256_of_onion: sha256_of_onion.ok_or(DecodeError::InvalidValue)?,
12742 }
12743 } else {
12744 Self::FailHTLC {
12745 htlc_id: _init_tlv_based_struct_field!(htlc_id, required),
12746 err_packet: _init_tlv_based_struct_field!(err_packet, required),
12747 }
12748 }
12749 },
12750 _ => return Err(DecodeError::InvalidValue),
12751 })
12752 }
12753}
12754
12755impl_writeable_tlv_based!(PendingInboundPayment, {
12756 (0, payment_secret, required),
12757 (2, expiry_time, required),
12758 (4, user_payment_id, required),
12759 (6, payment_preimage, required),
12760 (8, min_value_msat, required),
12761});
12762
12763impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> Writeable for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
12764where
12765 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
12766 T::Target: BroadcasterInterface,
12767 ES::Target: EntropySource,
12768 NS::Target: NodeSigner,
12769 SP::Target: SignerProvider,
12770 F::Target: FeeEstimator,
12771 R::Target: Router,
12772 MR::Target: MessageRouter,
12773 L::Target: Logger,
12774{
12775 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
12776 let _consistency_lock = self.total_consistency_lock.write().unwrap();
12777
12778 write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
12779
12780 self.chain_hash.write(writer)?;
12781 {
12782 let best_block = self.best_block.read().unwrap();
12783 best_block.height.write(writer)?;
12784 best_block.block_hash.write(writer)?;
12785 }
12786
12787 let per_peer_state = self.per_peer_state.write().unwrap();
12788
12789 let mut serializable_peer_count: u64 = 0;
12790 {
12791 let mut number_of_funded_channels = 0;
12792 for (_, peer_state_mutex) in per_peer_state.iter() {
12793 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
12794 let peer_state = &mut *peer_state_lock;
12795 if !peer_state.ok_to_remove(false) {
12796 serializable_peer_count += 1;
12797 }
12798
12799 number_of_funded_channels += peer_state.channel_by_id.iter().filter(
12800 |(_, phase)| if let ChannelPhase::Funded(chan) = phase { chan.context.is_funding_broadcast() } else { false }
12801 ).count();
12802 }
12803
12804 (number_of_funded_channels as u64).write(writer)?;
12805
12806 for (_, peer_state_mutex) in per_peer_state.iter() {
12807 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
12808 let peer_state = &mut *peer_state_lock;
12809 for channel in peer_state.channel_by_id.iter().filter_map(
12810 |(_, phase)| if let ChannelPhase::Funded(channel) = phase {
12811 if channel.context.is_funding_broadcast() { Some(channel) } else { None }
12812 } else { None }
12813 ) {
12814 channel.write(writer)?;
12815 }
12816 }
12817 }
12818
12819 {
12820 let forward_htlcs = self.forward_htlcs.lock().unwrap();
12821 (forward_htlcs.len() as u64).write(writer)?;
12822 for (short_channel_id, pending_forwards) in forward_htlcs.iter() {
12823 short_channel_id.write(writer)?;
12824 (pending_forwards.len() as u64).write(writer)?;
12825 for forward in pending_forwards {
12826 forward.write(writer)?;
12827 }
12828 }
12829 }
12830
12831 let mut decode_update_add_htlcs_opt = None;
12832 let decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
12833 if !decode_update_add_htlcs.is_empty() {
12834 decode_update_add_htlcs_opt = Some(decode_update_add_htlcs);
12835 }
12836
12837 let claimable_payments = self.claimable_payments.lock().unwrap();
12838 let pending_outbound_payments = self.pending_outbound_payments.pending_outbound_payments.lock().unwrap();
12839
12840 let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
12841 let mut htlc_onion_fields: Vec<&_> = Vec::new();
12842 (claimable_payments.claimable_payments.len() as u64).write(writer)?;
12843 for (payment_hash, payment) in claimable_payments.claimable_payments.iter() {
12844 payment_hash.write(writer)?;
12845 (payment.htlcs.len() as u64).write(writer)?;
12846 for htlc in payment.htlcs.iter() {
12847 htlc.write(writer)?;
12848 }
12849 htlc_purposes.push(&payment.purpose);
12850 htlc_onion_fields.push(&payment.onion_fields);
12851 }
12852
12853 let mut monitor_update_blocked_actions_per_peer = None;
12854 let mut peer_states = Vec::new();
12855 for (_, peer_state_mutex) in per_peer_state.iter() {
12856 peer_states.push(peer_state_mutex.unsafe_well_ordered_double_lock_self());
12860 }
12861
12862 (serializable_peer_count).write(writer)?;
12863 for ((peer_pubkey, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
12864 if !peer_state.ok_to_remove(false) {
12869 peer_pubkey.write(writer)?;
12870 peer_state.latest_features.write(writer)?;
12871 if !peer_state.monitor_update_blocked_actions.is_empty() {
12872 monitor_update_blocked_actions_per_peer
12873 .get_or_insert_with(Vec::new)
12874 .push((*peer_pubkey, &peer_state.monitor_update_blocked_actions));
12875 }
12876 }
12877 }
12878
12879 let events = self.pending_events.lock().unwrap();
12880 let events_not_backwards_compatible = events.iter().any(|(_, action)| action.is_some());
12884 if events_not_backwards_compatible {
12885 0u64.write(writer)?;
12888 } else {
12889 (events.len() as u64).write(writer)?;
12890 for (event, _) in events.iter() {
12891 event.write(writer)?;
12892 }
12893 }
12894
12895 0u64.write(writer)?;
12901
12902 (self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?;
12906 (self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?;
12907
12908 (0 as u64).write(writer)?;
12912
12913 let mut num_pending_outbounds_compat: u64 = 0;
12915 for (_, outbound) in pending_outbound_payments.iter() {
12916 if !outbound.is_fulfilled() && !outbound.abandoned() {
12917 num_pending_outbounds_compat += outbound.remaining_parts() as u64;
12918 }
12919 }
12920 num_pending_outbounds_compat.write(writer)?;
12921 for (_, outbound) in pending_outbound_payments.iter() {
12922 match outbound {
12923 PendingOutboundPayment::Legacy { session_privs } |
12924 PendingOutboundPayment::Retryable { session_privs, .. } => {
12925 for session_priv in session_privs.iter() {
12926 session_priv.write(writer)?;
12927 }
12928 }
12929 PendingOutboundPayment::AwaitingInvoice { .. } => {},
12930 PendingOutboundPayment::AwaitingOffer { .. } => {},
12931 PendingOutboundPayment::InvoiceReceived { .. } => {},
12932 PendingOutboundPayment::StaticInvoiceReceived { .. } => {},
12933 PendingOutboundPayment::Fulfilled { .. } => {},
12934 PendingOutboundPayment::Abandoned { .. } => {},
12935 }
12936 }
12937
12938 let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = new_hash_map();
12940 for (id, outbound) in pending_outbound_payments.iter() {
12941 match outbound {
12942 PendingOutboundPayment::Legacy { session_privs } |
12943 PendingOutboundPayment::Retryable { session_privs, .. } => {
12944 pending_outbound_payments_no_retry.insert(*id, session_privs.clone());
12945 },
12946 _ => {},
12947 }
12948 }
12949
12950 let mut pending_intercepted_htlcs = None;
12951 let our_pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
12952 if our_pending_intercepts.len() != 0 {
12953 pending_intercepted_htlcs = Some(our_pending_intercepts);
12954 }
12955
12956 let mut pending_claiming_payments = Some(&claimable_payments.pending_claiming_payments);
12957 if pending_claiming_payments.as_ref().unwrap().is_empty() {
12958 pending_claiming_payments = None;
12961 }
12962
12963 let mut in_flight_monitor_updates: Option<HashMap<(&PublicKey, &OutPoint), &Vec<ChannelMonitorUpdate>>> = None;
12964 for ((counterparty_id, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
12965 for (funding_outpoint, updates) in peer_state.in_flight_monitor_updates.iter() {
12966 if !updates.is_empty() {
12967 if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(new_hash_map()); }
12968 in_flight_monitor_updates.as_mut().unwrap().insert((counterparty_id, funding_outpoint), updates);
12969 }
12970 }
12971 }
12972
12973 write_tlv_fields!(writer, {
12974 (1, pending_outbound_payments_no_retry, required),
12975 (2, pending_intercepted_htlcs, option),
12976 (3, pending_outbound_payments, required),
12977 (4, pending_claiming_payments, option),
12978 (5, self.our_network_pubkey, required),
12979 (6, monitor_update_blocked_actions_per_peer, option),
12980 (7, self.fake_scid_rand_bytes, required),
12981 (8, if events_not_backwards_compatible { Some(&*events) } else { None }, option),
12982 (9, htlc_purposes, required_vec),
12983 (10, in_flight_monitor_updates, option),
12984 (11, self.probing_cookie_secret, required),
12985 (13, htlc_onion_fields, optional_vec),
12986 (14, decode_update_add_htlcs_opt, option),
12987 (15, self.inbound_payment_id_secret, required),
12988 });
12989
12990 Ok(())
12991 }
12992}
12993
12994impl Writeable for VecDeque<(Event, Option<EventCompletionAction>)> {
12995 fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
12996 (self.len() as u64).write(w)?;
12997 for (event, action) in self.iter() {
12998 event.write(w)?;
12999 action.write(w)?;
13000 #[cfg(debug_assertions)] {
13001 let event_encoded = event.encode();
13007 let event_read: Option<Event> =
13008 MaybeReadable::read(&mut &event_encoded[..]).unwrap();
13009 if action.is_some() { assert!(event_read.is_some()); }
13010 }
13011 }
13012 Ok(())
13013 }
13014}
13015impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
13016 fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
13017 let len: u64 = Readable::read(reader)?;
13018 const MAX_ALLOC_SIZE: u64 = 1024 * 16;
13019 let mut events: Self = VecDeque::with_capacity(cmp::min(
13020 MAX_ALLOC_SIZE/mem::size_of::<(events::Event, Option<EventCompletionAction>)>() as u64,
13021 len) as usize);
13022 for _ in 0..len {
13023 let ev_opt = MaybeReadable::read(reader)?;
13024 let action = Readable::read(reader)?;
13025 if let Some(ev) = ev_opt {
13026 events.push_back((ev, action));
13027 } else if action.is_some() {
13028 return Err(DecodeError::InvalidValue);
13029 }
13030 }
13031 Ok(events)
13032 }
13033}
13034
13035pub struct ChannelManagerReadArgs<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
13071where
13072 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
13073 T::Target: BroadcasterInterface,
13074 ES::Target: EntropySource,
13075 NS::Target: NodeSigner,
13076 SP::Target: SignerProvider,
13077 F::Target: FeeEstimator,
13078 R::Target: Router,
13079 MR::Target: MessageRouter,
13080 L::Target: Logger,
13081{
13082 pub entropy_source: ES,
13084
13085 pub node_signer: NS,
13087
13088 pub signer_provider: SP,
13092
13093 pub fee_estimator: F,
13097 pub chain_monitor: M,
13103
13104 pub tx_broadcaster: T,
13108 pub router: R,
13113 pub message_router: MR,
13116 pub logger: L,
13119 pub default_config: UserConfig,
13122
13123 pub channel_monitors: HashMap<OutPoint, &'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
13136}
13137
13138impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
13139 ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>
13140where
13141 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
13142 T::Target: BroadcasterInterface,
13143 ES::Target: EntropySource,
13144 NS::Target: NodeSigner,
13145 SP::Target: SignerProvider,
13146 F::Target: FeeEstimator,
13147 R::Target: Router,
13148 MR::Target: MessageRouter,
13149 L::Target: Logger,
13150{
13151 pub fn new(
13155 entropy_source: ES, node_signer: NS, signer_provider: SP, fee_estimator: F,
13156 chain_monitor: M, tx_broadcaster: T, router: R, message_router: MR, logger: L,
13157 default_config: UserConfig,
13158 mut channel_monitors: Vec<&'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
13159 ) -> Self {
13160 Self {
13161 entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor,
13162 tx_broadcaster, router, message_router, logger, default_config,
13163 channel_monitors: hash_map_from_iter(
13164 channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) })
13165 ),
13166 }
13167 }
13168}
13169
13170impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
13173 ReadableArgs<ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>> for (BlockHash, Arc<ChannelManager<M, T, ES, NS, SP, F, R, MR, L>>)
13174where
13175 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
13176 T::Target: BroadcasterInterface,
13177 ES::Target: EntropySource,
13178 NS::Target: NodeSigner,
13179 SP::Target: SignerProvider,
13180 F::Target: FeeEstimator,
13181 R::Target: Router,
13182 MR::Target: MessageRouter,
13183 L::Target: Logger,
13184{
13185 fn read<Reader: io::Read>(reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>) -> Result<Self, DecodeError> {
13186 let (blockhash, chan_manager) = <(BlockHash, ChannelManager<M, T, ES, NS, SP, F, R, MR, L>)>::read(reader, args)?;
13187 Ok((blockhash, Arc::new(chan_manager)))
13188 }
13189}
13190
13191impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
13192 ReadableArgs<ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>> for (BlockHash, ChannelManager<M, T, ES, NS, SP, F, R, MR, L>)
13193where
13194 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
13195 T::Target: BroadcasterInterface,
13196 ES::Target: EntropySource,
13197 NS::Target: NodeSigner,
13198 SP::Target: SignerProvider,
13199 F::Target: FeeEstimator,
13200 R::Target: Router,
13201 MR::Target: MessageRouter,
13202 L::Target: Logger,
13203{
13204 fn read<Reader: io::Read>(reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>) -> Result<Self, DecodeError> {
13205 let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
13206
13207 let chain_hash: ChainHash = Readable::read(reader)?;
13208 let best_block_height: u32 = Readable::read(reader)?;
13209 let best_block_hash: BlockHash = Readable::read(reader)?;
13210
13211 let empty_peer_state = || {
13212 PeerState {
13213 channel_by_id: new_hash_map(),
13214 inbound_channel_request_by_id: new_hash_map(),
13215 latest_features: InitFeatures::empty(),
13216 pending_msg_events: Vec::new(),
13217 in_flight_monitor_updates: BTreeMap::new(),
13218 monitor_update_blocked_actions: BTreeMap::new(),
13219 actions_blocking_raa_monitor_updates: BTreeMap::new(),
13220 closed_channel_monitor_update_ids: BTreeMap::new(),
13221 is_connected: false,
13222 }
13223 };
13224
13225 let mut failed_htlcs = Vec::new();
13226 let channel_count: u64 = Readable::read(reader)?;
13227 let mut funding_txo_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128));
13228 let mut per_peer_state = hash_map_with_capacity(cmp::min(channel_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
13229 let mut outpoint_to_peer = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
13230 let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
13231 let mut channel_closures = VecDeque::new();
13232 let mut close_background_events = Vec::new();
13233 let mut funding_txo_to_channel_id = hash_map_with_capacity(channel_count as usize);
13234 for _ in 0..channel_count {
13235 let mut channel: Channel<SP> = Channel::read(reader, (
13236 &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
13237 ))?;
13238 let logger = WithChannelContext::from(&args.logger, &channel.context, None);
13239 let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
13240 funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id());
13241 funding_txo_set.insert(funding_txo.clone());
13242 if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
13243 if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
13244 channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() ||
13245 channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
13246 channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
13247 log_error!(logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
13249 log_error!(logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
13250 if channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
13251 log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
13252 &channel.context.channel_id(), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
13253 }
13254 if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() {
13255 log_error!(logger, " The ChannelMonitor for channel {} is at holder commitment number {} but the ChannelManager is at holder commitment number {}.",
13256 &channel.context.channel_id(), monitor.get_cur_holder_commitment_number(), channel.get_cur_holder_commitment_transaction_number());
13257 }
13258 if channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() {
13259 log_error!(logger, " The ChannelMonitor for channel {} is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.",
13260 &channel.context.channel_id(), monitor.get_min_seen_secret(), channel.get_revoked_counterparty_commitment_transaction_number());
13261 }
13262 if channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() {
13263 log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
13264 &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
13265 }
13266 let mut shutdown_result = channel.context.force_shutdown(true, ClosureReason::OutdatedChannelManager);
13267 if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
13268 return Err(DecodeError::InvalidValue);
13269 }
13270 if let Some((counterparty_node_id, funding_txo, channel_id, mut update)) = shutdown_result.monitor_update {
13271 let latest_update_id = monitor.get_latest_update_id().saturating_add(1);
13275 update.update_id = latest_update_id;
13276 per_peer_state.entry(counterparty_node_id)
13277 .or_insert_with(|| Mutex::new(empty_peer_state()))
13278 .lock().unwrap()
13279 .closed_channel_monitor_update_ids.entry(channel_id)
13280 .and_modify(|v| *v = cmp::max(latest_update_id, *v))
13281 .or_insert(latest_update_id);
13282
13283 close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13284 counterparty_node_id, funding_txo, channel_id, update
13285 });
13286 }
13287 failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs);
13288 channel_closures.push_back((events::Event::ChannelClosed {
13289 channel_id: channel.context.channel_id(),
13290 user_channel_id: channel.context.get_user_id(),
13291 reason: ClosureReason::OutdatedChannelManager,
13292 counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
13293 channel_capacity_sats: Some(channel.context.get_value_satoshis()),
13294 channel_funding_txo: channel.context.get_funding_txo(),
13295 last_local_balance_msat: Some(channel.context.get_value_to_self_msat()),
13296 }, None));
13297 for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
13298 let mut found_htlc = false;
13299 for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() {
13300 if *channel_htlc_source == monitor_htlc_source { found_htlc = true; break; }
13301 }
13302 if !found_htlc {
13303 let logger = WithChannelContext::from(&args.logger, &channel.context, Some(*payment_hash));
13311 log_info!(logger,
13312 "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager",
13313 &channel.context.channel_id(), &payment_hash);
13314 failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.context.get_counterparty_node_id(), channel.context.channel_id()));
13315 }
13316 }
13317 } else {
13318 channel.on_startup_drop_completed_blocked_mon_updates_through(&logger, monitor.get_latest_update_id());
13319 log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {} with {} blocked updates",
13320 &channel.context.channel_id(), channel.context.get_latest_monitor_update_id(),
13321 monitor.get_latest_update_id(), channel.blocked_monitor_updates_pending());
13322 if let Some(short_channel_id) = channel.context.get_short_channel_id() {
13323 short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
13324 }
13325 if let Some(funding_txo) = channel.context.get_funding_txo() {
13326 outpoint_to_peer.insert(funding_txo, channel.context.get_counterparty_node_id());
13327 }
13328 per_peer_state.entry(channel.context.get_counterparty_node_id())
13329 .or_insert_with(|| Mutex::new(empty_peer_state()))
13330 .get_mut().unwrap()
13331 .channel_by_id.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
13332 }
13333 } else if channel.is_awaiting_initial_mon_persist() {
13334 let _ = channel.context.force_shutdown(false, ClosureReason::DisconnectedPeer);
13338 channel_closures.push_back((events::Event::ChannelClosed {
13339 channel_id: channel.context.channel_id(),
13340 user_channel_id: channel.context.get_user_id(),
13341 reason: ClosureReason::DisconnectedPeer,
13342 counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
13343 channel_capacity_sats: Some(channel.context.get_value_satoshis()),
13344 channel_funding_txo: channel.context.get_funding_txo(),
13345 last_local_balance_msat: Some(channel.context.get_value_to_self_msat()),
13346 }, None));
13347 } else {
13348 log_error!(logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id());
13349 log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
13350 log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
13351 log_error!(logger, " Without the ChannelMonitor we cannot continue without risking funds.");
13352 log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
13353 return Err(DecodeError::InvalidValue);
13354 }
13355 }
13356
13357 for (funding_txo, monitor) in args.channel_monitors.iter() {
13358 if !funding_txo_set.contains(funding_txo) {
13359 let mut should_queue_fc_update = false;
13360 if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13361 if !monitor.offchain_closed() || monitor.get_latest_update_id() > 1 {
13368 should_queue_fc_update = !monitor.offchain_closed();
13369 let mut latest_update_id = monitor.get_latest_update_id();
13370 if should_queue_fc_update {
13371 latest_update_id += 1;
13372 }
13373 per_peer_state.entry(counterparty_node_id)
13374 .or_insert_with(|| Mutex::new(empty_peer_state()))
13375 .lock().unwrap()
13376 .closed_channel_monitor_update_ids.entry(monitor.channel_id())
13377 .and_modify(|v| *v = cmp::max(latest_update_id, *v))
13378 .or_insert(latest_update_id);
13379 }
13380 }
13381
13382 if !should_queue_fc_update {
13383 continue;
13384 }
13385
13386 let logger = WithChannelMonitor::from(&args.logger, monitor, None);
13387 let channel_id = monitor.channel_id();
13388 log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
13389 &channel_id);
13390 let mut monitor_update = ChannelMonitorUpdate {
13391 update_id: monitor.get_latest_update_id().saturating_add(1),
13392 counterparty_node_id: None,
13393 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
13394 channel_id: Some(monitor.channel_id()),
13395 };
13396 if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13397 let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13398 counterparty_node_id,
13399 funding_txo: *funding_txo,
13400 channel_id,
13401 update: monitor_update,
13402 };
13403 close_background_events.push(update);
13404 } else {
13405 monitor_update.update_id = u64::MAX;
13412 close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, channel_id, monitor_update)));
13413 }
13414 }
13415 }
13416
13417 const MAX_ALLOC_SIZE: usize = 1024 * 64;
13418 let forward_htlcs_count: u64 = Readable::read(reader)?;
13419 let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128));
13420 for _ in 0..forward_htlcs_count {
13421 let short_channel_id = Readable::read(reader)?;
13422 let pending_forwards_count: u64 = Readable::read(reader)?;
13423 let mut pending_forwards = Vec::with_capacity(cmp::min(pending_forwards_count as usize, MAX_ALLOC_SIZE/mem::size_of::<HTLCForwardInfo>()));
13424 for _ in 0..pending_forwards_count {
13425 pending_forwards.push(Readable::read(reader)?);
13426 }
13427 forward_htlcs.insert(short_channel_id, pending_forwards);
13428 }
13429
13430 let claimable_htlcs_count: u64 = Readable::read(reader)?;
13431 let mut claimable_htlcs_list = Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128));
13432 for _ in 0..claimable_htlcs_count {
13433 let payment_hash = Readable::read(reader)?;
13434 let previous_hops_len: u64 = Readable::read(reader)?;
13435 let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, MAX_ALLOC_SIZE/mem::size_of::<ClaimableHTLC>()));
13436 for _ in 0..previous_hops_len {
13437 previous_hops.push(<ClaimableHTLC as Readable>::read(reader)?);
13438 }
13439 claimable_htlcs_list.push((payment_hash, previous_hops));
13440 }
13441
13442 let peer_count: u64 = Readable::read(reader)?;
13443 for _ in 0..peer_count {
13444 let peer_pubkey: PublicKey = Readable::read(reader)?;
13445 let latest_features = Readable::read(reader)?;
13446 if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) {
13447 peer_state.get_mut().unwrap().latest_features = latest_features;
13448 }
13449 }
13450
13451 let event_count: u64 = Readable::read(reader)?;
13452 let mut pending_events_read: VecDeque<(events::Event, Option<EventCompletionAction>)> =
13453 VecDeque::with_capacity(cmp::min(event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(events::Event, Option<EventCompletionAction>)>()));
13454 for _ in 0..event_count {
13455 match MaybeReadable::read(reader)? {
13456 Some(event) => pending_events_read.push_back((event, None)),
13457 None => continue,
13458 }
13459 }
13460
13461 let background_event_count: u64 = Readable::read(reader)?;
13462 for _ in 0..background_event_count {
13463 match <u8 as Readable>::read(reader)? {
13464 0 => {
13465 let _: OutPoint = Readable::read(reader)?;
13469 let _: ChannelMonitorUpdate = Readable::read(reader)?;
13470 }
13471 _ => return Err(DecodeError::InvalidValue),
13472 }
13473 }
13474
13475 let _last_node_announcement_serial: u32 = Readable::read(reader)?; let highest_seen_timestamp: u32 = Readable::read(reader)?;
13477
13478 let pending_inbound_payment_count: u64 = Readable::read(reader)?;
13480 for _ in 0..pending_inbound_payment_count {
13481 let payment_hash: PaymentHash = Readable::read(reader)?;
13482 let logger = WithContext::from(&args.logger, None, None, Some(payment_hash));
13483 let inbound: PendingInboundPayment = Readable::read(reader)?;
13484 log_warn!(logger, "Ignoring deprecated pending inbound payment with payment hash {}: {:?}", payment_hash, inbound);
13485 }
13486
13487 let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?;
13488 let mut pending_outbound_payments_compat: HashMap<PaymentId, PendingOutboundPayment> =
13489 hash_map_with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
13490 for _ in 0..pending_outbound_payments_count_compat {
13491 let session_priv = Readable::read(reader)?;
13492 let payment = PendingOutboundPayment::Legacy {
13493 session_privs: hash_set_from_iter([session_priv]),
13494 };
13495 if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() {
13496 return Err(DecodeError::InvalidValue)
13497 };
13498 }
13499
13500 let mut pending_outbound_payments_no_retry: Option<HashMap<PaymentId, HashSet<[u8; 32]>>> = None;
13502 let mut pending_outbound_payments = None;
13503 let mut pending_intercepted_htlcs: Option<HashMap<InterceptId, PendingAddHTLCInfo>> = Some(new_hash_map());
13504 let mut received_network_pubkey: Option<PublicKey> = None;
13505 let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
13506 let mut probing_cookie_secret: Option<[u8; 32]> = None;
13507 let mut claimable_htlc_purposes = None;
13508 let mut claimable_htlc_onion_fields = None;
13509 let mut pending_claiming_payments = Some(new_hash_map());
13510 let mut monitor_update_blocked_actions_per_peer: Option<Vec<(_, BTreeMap<_, Vec<_>>)>> = Some(Vec::new());
13511 let mut events_override = None;
13512 let mut in_flight_monitor_updates: Option<HashMap<(PublicKey, OutPoint), Vec<ChannelMonitorUpdate>>> = None;
13513 let mut decode_update_add_htlcs: Option<HashMap<u64, Vec<msgs::UpdateAddHTLC>>> = None;
13514 let mut inbound_payment_id_secret = None;
13515 read_tlv_fields!(reader, {
13516 (1, pending_outbound_payments_no_retry, option),
13517 (2, pending_intercepted_htlcs, option),
13518 (3, pending_outbound_payments, option),
13519 (4, pending_claiming_payments, option),
13520 (5, received_network_pubkey, option),
13521 (6, monitor_update_blocked_actions_per_peer, option),
13522 (7, fake_scid_rand_bytes, option),
13523 (8, events_override, option),
13524 (9, claimable_htlc_purposes, optional_vec),
13525 (10, in_flight_monitor_updates, option),
13526 (11, probing_cookie_secret, option),
13527 (13, claimable_htlc_onion_fields, optional_vec),
13528 (14, decode_update_add_htlcs, option),
13529 (15, inbound_payment_id_secret, option),
13530 });
13531 let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map());
13532 if fake_scid_rand_bytes.is_none() {
13533 fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes());
13534 }
13535
13536 if probing_cookie_secret.is_none() {
13537 probing_cookie_secret = Some(args.entropy_source.get_secure_random_bytes());
13538 }
13539
13540 if inbound_payment_id_secret.is_none() {
13541 inbound_payment_id_secret = Some(args.entropy_source.get_secure_random_bytes());
13542 }
13543
13544 if let Some(events) = events_override {
13545 pending_events_read = events;
13546 }
13547
13548 if !channel_closures.is_empty() {
13549 pending_events_read.append(&mut channel_closures);
13550 }
13551
13552 if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
13553 pending_outbound_payments = Some(pending_outbound_payments_compat);
13554 } else if pending_outbound_payments.is_none() {
13555 let mut outbounds = new_hash_map();
13556 for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() {
13557 outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
13558 }
13559 pending_outbound_payments = Some(outbounds);
13560 }
13561 let pending_outbounds = OutboundPayments::new(pending_outbound_payments.unwrap());
13562
13563 let mut pending_background_events = Vec::new();
13575 macro_rules! handle_in_flight_updates {
13576 ($counterparty_node_id: expr, $chan_in_flight_upds: expr, $funding_txo: expr,
13577 $monitor: expr, $peer_state: expr, $logger: expr, $channel_info_log: expr
13578 ) => { {
13579 let mut max_in_flight_update_id = 0;
13580 $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id());
13581 for update in $chan_in_flight_upds.iter() {
13582 log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
13583 update.update_id, $channel_info_log, &$monitor.channel_id());
13584 max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
13585 pending_background_events.push(
13586 BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13587 counterparty_node_id: $counterparty_node_id,
13588 funding_txo: $funding_txo,
13589 channel_id: $monitor.channel_id(),
13590 update: update.clone(),
13591 });
13592 }
13593 if $chan_in_flight_upds.is_empty() {
13594 pending_background_events.push(
13598 BackgroundEvent::MonitorUpdatesComplete {
13599 counterparty_node_id: $counterparty_node_id,
13600 channel_id: $monitor.channel_id(),
13601 });
13602 } else {
13603 $peer_state.closed_channel_monitor_update_ids.entry($monitor.channel_id())
13604 .and_modify(|v| *v = cmp::max(max_in_flight_update_id, *v))
13605 .or_insert(max_in_flight_update_id);
13606 }
13607 if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() {
13608 log_error!($logger, "Duplicate in-flight monitor update set for the same channel!");
13609 return Err(DecodeError::InvalidValue);
13610 }
13611 max_in_flight_update_id
13612 } }
13613 }
13614
13615 for (counterparty_id, peer_state_mtx) in per_peer_state.iter_mut() {
13616 let mut peer_state_lock = peer_state_mtx.lock().unwrap();
13617 let peer_state = &mut *peer_state_lock;
13618 for phase in peer_state.channel_by_id.values() {
13619 if let ChannelPhase::Funded(chan) = phase {
13620 let logger = WithChannelContext::from(&args.logger, &chan.context, None);
13621
13622 let funding_txo = chan.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
13625 let monitor = args.channel_monitors.get(&funding_txo)
13626 .expect("We already checked for monitor presence when loading channels");
13627 let mut max_in_flight_update_id = monitor.get_latest_update_id();
13628 if let Some(in_flight_upds) = &mut in_flight_monitor_updates {
13629 if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, funding_txo)) {
13630 max_in_flight_update_id = cmp::max(max_in_flight_update_id,
13631 handle_in_flight_updates!(*counterparty_id, chan_in_flight_upds,
13632 funding_txo, monitor, peer_state, logger, ""));
13633 }
13634 }
13635 if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id {
13636 log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
13638 log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
13639 chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id);
13640 log_error!(logger, " but the ChannelManager is at update_id {}.", chan.get_latest_unblocked_monitor_update_id());
13641 log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
13642 log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
13643 log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
13644 log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
13645 return Err(DecodeError::DangerousValue);
13646 }
13647 } else {
13648 debug_assert!(false);
13651 return Err(DecodeError::InvalidValue);
13652 }
13653 }
13654 }
13655
13656 if let Some(in_flight_upds) = in_flight_monitor_updates {
13657 for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds {
13658 let channel_id = funding_txo_to_channel_id.get(&funding_txo).copied();
13659 let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id, None);
13660 if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
13661 let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| {
13665 Mutex::new(empty_peer_state())
13666 });
13667 let mut peer_state = peer_state_mutex.lock().unwrap();
13668 handle_in_flight_updates!(counterparty_id, chan_in_flight_updates,
13669 funding_txo, monitor, peer_state, logger, "closed ");
13670 } else {
13671 log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
13672 log_error!(logger, " The ChannelMonitor for channel {} is missing.", if let Some(channel_id) =
13673 channel_id { channel_id.to_string() } else { format!("with outpoint {}", funding_txo) } );
13674 log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
13675 log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
13676 log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
13677 log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
13678 log_error!(logger, " Pending in-flight updates are: {:?}", chan_in_flight_updates);
13679 return Err(DecodeError::InvalidValue);
13680 }
13681 }
13682 }
13683
13684 pending_background_events.reserve(close_background_events.len());
13687 'each_bg_event: for mut new_event in close_background_events {
13688 if let BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13689 counterparty_node_id, funding_txo, channel_id, update,
13690 } = &mut new_event {
13691 debug_assert_eq!(update.updates.len(), 1);
13692 debug_assert!(matches!(update.updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. }));
13693 let mut updated_id = false;
13694 for pending_event in pending_background_events.iter() {
13695 if let BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13696 counterparty_node_id: pending_cp, funding_txo: pending_funding,
13697 channel_id: pending_chan_id, update: pending_update,
13698 } = pending_event {
13699 let for_same_channel = counterparty_node_id == pending_cp
13700 && funding_txo == pending_funding
13701 && channel_id == pending_chan_id;
13702 if for_same_channel {
13703 debug_assert!(update.update_id >= pending_update.update_id);
13704 if pending_update.updates.iter().any(|upd| matches!(upd, ChannelMonitorUpdateStep::ChannelForceClosed { .. })) {
13705 continue 'each_bg_event;
13709 }
13710 update.update_id = pending_update.update_id.saturating_add(1);
13711 updated_id = true;
13712 }
13713 }
13714 }
13715 let mut per_peer_state = per_peer_state.get(counterparty_node_id)
13716 .expect("If we have pending updates for a channel it must have an entry")
13717 .lock().unwrap();
13718 if updated_id {
13719 per_peer_state
13720 .closed_channel_monitor_update_ids.entry(*channel_id)
13721 .and_modify(|v| *v = cmp::max(update.update_id, *v))
13722 .or_insert(update.update_id);
13723 }
13724 let in_flight_updates = per_peer_state.in_flight_monitor_updates
13725 .entry(*funding_txo)
13726 .or_insert_with(Vec::new);
13727 debug_assert!(!in_flight_updates.iter().any(|upd| upd == update));
13728 in_flight_updates.push(update.clone());
13729 }
13730 pending_background_events.push(new_event);
13731 }
13732
13733 let mut pending_claims_to_replay = Vec::new();
13737
13738 {
13739 for (_, monitor) in args.channel_monitors.iter() {
13748 let counterparty_opt = outpoint_to_peer.get(&monitor.get_funding_txo().0);
13749 if counterparty_opt.is_none() {
13750 for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
13751 let logger = WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash));
13752 if let HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } = htlc_source {
13753 if path.hops.is_empty() {
13754 log_error!(logger, "Got an empty path for a pending payment");
13755 return Err(DecodeError::InvalidValue);
13756 }
13757
13758 let mut session_priv_bytes = [0; 32];
13759 session_priv_bytes[..].copy_from_slice(&session_priv[..]);
13760 pending_outbounds.insert_from_monitor_on_startup(
13761 payment_id, htlc.payment_hash, session_priv_bytes, &path, best_block_height, logger
13762 );
13763 }
13764 }
13765 for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() {
13766 let logger = WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash));
13767 match htlc_source {
13768 HTLCSource::PreviousHopData(prev_hop_data) => {
13769 let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
13770 info.prev_funding_outpoint == prev_hop_data.outpoint &&
13771 info.prev_htlc_id == prev_hop_data.htlc_id
13772 };
13773 decode_update_add_htlcs.retain(|scid, update_add_htlcs| {
13779 update_add_htlcs.retain(|update_add_htlc| {
13780 let matches = *scid == prev_hop_data.short_channel_id &&
13781 update_add_htlc.htlc_id == prev_hop_data.htlc_id;
13782 if matches {
13783 log_info!(logger, "Removing pending to-decode HTLC with hash {} as it was forwarded to the closed channel {}",
13784 &htlc.payment_hash, &monitor.channel_id());
13785 }
13786 !matches
13787 });
13788 !update_add_htlcs.is_empty()
13789 });
13790 forward_htlcs.retain(|_, forwards| {
13791 forwards.retain(|forward| {
13792 if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
13793 if pending_forward_matches_htlc(&htlc_info) {
13794 log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
13795 &htlc.payment_hash, &monitor.channel_id());
13796 false
13797 } else { true }
13798 } else { true }
13799 });
13800 !forwards.is_empty()
13801 });
13802 pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
13803 if pending_forward_matches_htlc(&htlc_info) {
13804 log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
13805 &htlc.payment_hash, &monitor.channel_id());
13806 pending_events_read.retain(|(event, _)| {
13807 if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
13808 intercepted_id != ev_id
13809 } else { true }
13810 });
13811 false
13812 } else { true }
13813 });
13814 },
13815 HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } => {
13816 if let Some(preimage) = preimage_opt {
13817 let pending_events = Mutex::new(pending_events_read);
13818 let compl_action =
13827 EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
13828 channel_funding_outpoint: monitor.get_funding_txo().0,
13829 channel_id: monitor.channel_id(),
13830 counterparty_node_id: path.hops[0].pubkey,
13831 };
13832 pending_outbounds.claim_htlc(payment_id, preimage, session_priv,
13833 path, false, compl_action, &pending_events, &&logger);
13834 pending_events_read = pending_events.into_inner().unwrap();
13835 }
13836 },
13837 }
13838 }
13839 }
13840
13841 let mut fail_read = false;
13845 let outbound_claimed_htlcs_iter = monitor.get_all_current_outbound_htlcs()
13846 .into_iter()
13847 .filter_map(|(htlc_source, (htlc, preimage_opt))| {
13848 if let HTLCSource::PreviousHopData(prev_hop) = &htlc_source {
13849 if let Some(payment_preimage) = preimage_opt {
13850 let inbound_edge_monitor = args.channel_monitors.get(&prev_hop.outpoint);
13851 let inbound_edge_monitor = if let Some(monitor) = inbound_edge_monitor {
13863 monitor
13864 } else {
13865 return None;
13866 };
13867 let inbound_edge_balances = inbound_edge_monitor.get_claimable_balances();
13872 if inbound_edge_balances.is_empty() {
13873 return None;
13874 }
13875
13876 if prev_hop.counterparty_node_id.is_none() {
13877 let htlc_payment_hash: PaymentHash = payment_preimage.into();
13888 let balance_could_incl_htlc = |bal| match bal {
13889 &Balance::ClaimableOnChannelClose { .. } => {
13890 true
13893 },
13894 &Balance::MaybePreimageClaimableHTLC { payment_hash, .. } => {
13895 payment_hash == htlc_payment_hash
13896 },
13897 _ => false,
13898 };
13899 let htlc_may_be_in_balances =
13900 inbound_edge_balances.iter().any(balance_could_incl_htlc);
13901 if !htlc_may_be_in_balances {
13902 return None;
13903 }
13904
13905 if short_to_chan_info.get(&prev_hop.short_channel_id).is_none() {
13910 log_error!(args.logger,
13911 "We need to replay the HTLC claim for payment_hash {} (preimage {}) but cannot do so as the HTLC was forwarded prior to LDK 0.0.124.\
13912 All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1",
13913 htlc_payment_hash,
13914 payment_preimage,
13915 );
13916 fail_read = true;
13917 }
13918
13919 log_error!(args.logger,
13926 "We need to replay the HTLC claim for payment_hash {} (preimage {}) but don't have all the required information to do so reliably.\
13927 As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime!\
13928 All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1\
13929 Continuing anyway, though panics may occur!",
13930 htlc_payment_hash,
13931 payment_preimage,
13932 );
13933 }
13934
13935 Some((htlc_source, payment_preimage, htlc.amount_msat,
13936 counterparty_opt.is_none(),
13940 counterparty_opt.cloned().or(monitor.get_counterparty_node_id()),
13941 monitor.get_funding_txo().0, monitor.channel_id()))
13942 } else { None }
13943 } else {
13944 None
13949 }
13950 });
13951 for tuple in outbound_claimed_htlcs_iter {
13952 pending_claims_to_replay.push(tuple);
13953 }
13954 if fail_read {
13955 return Err(DecodeError::InvalidValue);
13956 }
13957 }
13958 }
13959
13960 if !forward_htlcs.is_empty() || !decode_update_add_htlcs.is_empty() || pending_outbounds.needs_abandon() {
13961 pending_events_read.push_back((events::Event::PendingHTLCsForwardable {
13967 time_forwardable: Duration::from_secs(2),
13968 }, None));
13969 }
13970
13971 let expanded_inbound_key = args.node_signer.get_inbound_payment_key();
13972
13973 let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len());
13974 if let Some(purposes) = claimable_htlc_purposes {
13975 if purposes.len() != claimable_htlcs_list.len() {
13976 return Err(DecodeError::InvalidValue);
13977 }
13978 if let Some(onion_fields) = claimable_htlc_onion_fields {
13979 if onion_fields.len() != claimable_htlcs_list.len() {
13980 return Err(DecodeError::InvalidValue);
13981 }
13982 for (purpose, (onion, (payment_hash, htlcs))) in
13983 purposes.into_iter().zip(onion_fields.into_iter().zip(claimable_htlcs_list.into_iter()))
13984 {
13985 let existing_payment = claimable_payments.insert(payment_hash, ClaimablePayment {
13986 purpose, htlcs, onion_fields: onion,
13987 });
13988 if existing_payment.is_some() { return Err(DecodeError::InvalidValue); }
13989 }
13990 } else {
13991 for (purpose, (payment_hash, htlcs)) in purposes.into_iter().zip(claimable_htlcs_list.into_iter()) {
13992 let existing_payment = claimable_payments.insert(payment_hash, ClaimablePayment {
13993 purpose, htlcs, onion_fields: None,
13994 });
13995 if existing_payment.is_some() { return Err(DecodeError::InvalidValue); }
13996 }
13997 }
13998 } else {
13999 for (payment_hash, htlcs) in claimable_htlcs_list.drain(..) {
14002 if htlcs.is_empty() {
14003 return Err(DecodeError::InvalidValue);
14004 }
14005 let purpose = match &htlcs[0].onion_payload {
14006 OnionPayload::Invoice { _legacy_hop_data } => {
14007 if let Some(hop_data) = _legacy_hop_data {
14008 events::PaymentPurpose::Bolt11InvoicePayment {
14009 payment_preimage:
14010 match inbound_payment::verify(
14011 payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger
14012 ) {
14013 Ok((payment_preimage, _)) => payment_preimage,
14014 Err(()) => {
14015 log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", &payment_hash);
14016 return Err(DecodeError::InvalidValue);
14017 }
14018 },
14019 payment_secret: hop_data.payment_secret,
14020 }
14021 } else { return Err(DecodeError::InvalidValue); }
14022 },
14023 OnionPayload::Spontaneous(payment_preimage) =>
14024 events::PaymentPurpose::SpontaneousPayment(*payment_preimage),
14025 };
14026 claimable_payments.insert(payment_hash, ClaimablePayment {
14027 purpose, htlcs, onion_fields: None,
14028 });
14029 }
14030 }
14031
14032 for (payment_hash, payment) in claimable_payments.iter() {
14036 for htlc in payment.htlcs.iter() {
14037 if htlc.prev_hop.counterparty_node_id.is_some() {
14038 continue;
14039 }
14040 if short_to_chan_info.get(&htlc.prev_hop.short_channel_id).is_some() {
14041 log_error!(args.logger,
14042 "We do not have the required information to claim a pending payment with payment hash {} reliably.\
14043 As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime!\
14044 All HTLCs that were received by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1\
14045 Continuing anyway, though panics may occur!",
14046 payment_hash,
14047 );
14048 } else {
14049 log_error!(args.logger,
14050 "We do not have the required information to claim a pending payment with payment hash {}.\
14051 All HTLCs that were received by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1",
14052 payment_hash,
14053 );
14054 return Err(DecodeError::InvalidValue);
14055 }
14056 }
14057 }
14058
14059 let mut secp_ctx = Secp256k1::new();
14060 secp_ctx.seeded_randomize(&args.entropy_source.get_secure_random_bytes());
14061
14062 let our_network_pubkey = match args.node_signer.get_node_id(Recipient::Node) {
14063 Ok(key) => key,
14064 Err(()) => return Err(DecodeError::InvalidValue)
14065 };
14066 if let Some(network_pubkey) = received_network_pubkey {
14067 if network_pubkey != our_network_pubkey {
14068 log_error!(args.logger, "Key that was generated does not match the existing key.");
14069 return Err(DecodeError::InvalidValue);
14070 }
14071 }
14072
14073 let mut outbound_scid_aliases = new_hash_set();
14074 for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() {
14075 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
14076 let peer_state = &mut *peer_state_lock;
14077 for (chan_id, phase) in peer_state.channel_by_id.iter_mut() {
14078 if let ChannelPhase::Funded(chan) = phase {
14079 let logger = WithChannelContext::from(&args.logger, &chan.context, None);
14080 if chan.context.outbound_scid_alias() == 0 {
14081 let mut outbound_scid_alias;
14082 loop {
14083 outbound_scid_alias = fake_scid::Namespace::OutboundAlias
14084 .get_fake_scid(best_block_height, &chain_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
14085 if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
14086 }
14087 chan.context.set_outbound_scid_alias(outbound_scid_alias);
14088 } else if !outbound_scid_aliases.insert(chan.context.outbound_scid_alias()) {
14089 log_error!(logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
14092 return Err(DecodeError::InvalidValue);
14093 }
14094 if chan.context.is_usable() {
14095 if short_to_chan_info.insert(chan.context.outbound_scid_alias(), (chan.context.get_counterparty_node_id(), *chan_id)).is_some() {
14096 log_error!(logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
14099 return Err(DecodeError::InvalidValue);
14100 }
14101 }
14102 } else {
14103 debug_assert!(false);
14106 return Err(DecodeError::InvalidValue);
14107 }
14108 }
14109 }
14110
14111 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(args.fee_estimator);
14112
14113 for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
14114 if let Some(peer_state) = per_peer_state.get(&node_id) {
14115 for (channel_id, actions) in monitor_update_blocked_actions.iter() {
14116 let logger = WithContext::from(&args.logger, Some(node_id), Some(*channel_id), None);
14117 for action in actions.iter() {
14118 if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
14119 downstream_counterparty_and_funding_outpoint:
14120 Some(EventUnblockedChannel {
14121 counterparty_node_id: blocked_node_id,
14122 funding_txo: _,
14123 channel_id: blocked_channel_id,
14124 blocking_action,
14125 }), ..
14126 } = action {
14127 if let Some(blocked_peer_state) = per_peer_state.get(blocked_node_id) {
14128 log_trace!(logger,
14129 "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
14130 blocked_channel_id);
14131 blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
14132 .entry(*blocked_channel_id)
14133 .or_insert_with(Vec::new).push(blocking_action.clone());
14134 } else {
14135 }
14141 }
14142 if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately { .. } = action {
14143 debug_assert!(false, "Non-event-generating channel freeing should not appear in our queue");
14144 }
14145 }
14146 }
14147 peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions;
14148 } else {
14149 log_error!(WithContext::from(&args.logger, Some(node_id), None, None), "Got blocked actions without a per-peer-state for {}", node_id);
14150 return Err(DecodeError::InvalidValue);
14151 }
14152 }
14153
14154 let channel_manager = ChannelManager {
14155 chain_hash,
14156 fee_estimator: bounded_fee_estimator,
14157 chain_monitor: args.chain_monitor,
14158 tx_broadcaster: args.tx_broadcaster,
14159 router: args.router,
14160 message_router: args.message_router,
14161
14162 best_block: RwLock::new(BestBlock::new(best_block_hash, best_block_height)),
14163
14164 inbound_payment_key: expanded_inbound_key,
14165 pending_outbound_payments: pending_outbounds,
14166 pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()),
14167
14168 forward_htlcs: Mutex::new(forward_htlcs),
14169 decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs),
14170 claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap() }),
14171 outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
14172 outpoint_to_peer: Mutex::new(outpoint_to_peer),
14173 short_to_chan_info: FairRwLock::new(short_to_chan_info),
14174 fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(),
14175
14176 probing_cookie_secret: probing_cookie_secret.unwrap(),
14177 inbound_payment_id_secret: inbound_payment_id_secret.unwrap(),
14178
14179 our_network_pubkey,
14180 secp_ctx,
14181
14182 highest_seen_timestamp: AtomicUsize::new(highest_seen_timestamp as usize),
14183
14184 per_peer_state: FairRwLock::new(per_peer_state),
14185
14186 pending_events: Mutex::new(pending_events_read),
14187 pending_events_processor: AtomicBool::new(false),
14188 pending_background_events: Mutex::new(pending_background_events),
14189 total_consistency_lock: RwLock::new(()),
14190 background_events_processed_since_startup: AtomicBool::new(false),
14191
14192 event_persist_notifier: Notifier::new(),
14193 needs_persist_flag: AtomicBool::new(false),
14194
14195 funding_batch_states: Mutex::new(BTreeMap::new()),
14196
14197 pending_offers_messages: Mutex::new(Vec::new()),
14198 pending_async_payments_messages: Mutex::new(Vec::new()),
14199
14200 pending_broadcast_messages: Mutex::new(Vec::new()),
14201
14202 entropy_source: args.entropy_source,
14203 node_signer: args.node_signer,
14204 signer_provider: args.signer_provider,
14205
14206 last_days_feerates: Mutex::new(VecDeque::new()),
14207
14208 logger: args.logger,
14209 default_configuration: args.default_config,
14210
14211 #[cfg(feature = "dnssec")]
14212 hrn_resolver: OMNameResolver::new(highest_seen_timestamp, best_block_height),
14213 #[cfg(feature = "dnssec")]
14214 pending_dns_onion_messages: Mutex::new(Vec::new()),
14215
14216 #[cfg(feature = "_test_utils")]
14217 testing_dnssec_proof_offer_resolution_override: Mutex::new(new_hash_map()),
14218 };
14219
14220 let mut processed_claims: HashSet<Vec<MPPClaimHTLCSource>> = new_hash_set();
14221 for (_, monitor) in args.channel_monitors.iter() {
14222 for (payment_hash, (payment_preimage, payment_claims)) in monitor.get_stored_preimages() {
14223 if !payment_claims.is_empty() {
14224 for payment_claim in payment_claims {
14225 if processed_claims.contains(&payment_claim.mpp_parts) {
14226 continue;
14231 }
14232 if payment_claim.mpp_parts.is_empty() {
14233 return Err(DecodeError::InvalidValue);
14234 }
14235 let pending_claims = PendingMPPClaim {
14236 channels_without_preimage: payment_claim.mpp_parts.clone(),
14237 channels_with_preimage: Vec::new(),
14238 };
14239 let pending_claim_ptr_opt = Some(Arc::new(Mutex::new(pending_claims)));
14240
14241 let claim_found =
14250 channel_manager.claimable_payments.lock().unwrap().begin_claiming_payment(
14251 payment_hash, &channel_manager.node_signer, &channel_manager.logger,
14252 &channel_manager.inbound_payment_id_secret, true,
14253 );
14254 if claim_found.is_err() {
14255 let mut claimable_payments = channel_manager.claimable_payments.lock().unwrap();
14256 match claimable_payments.pending_claiming_payments.entry(payment_hash) {
14257 hash_map::Entry::Occupied(_) => {
14258 debug_assert!(false, "Entry was added in begin_claiming_payment");
14259 return Err(DecodeError::InvalidValue);
14260 },
14261 hash_map::Entry::Vacant(entry) => {
14262 entry.insert(payment_claim.claiming_payment);
14263 },
14264 }
14265 }
14266
14267 for part in payment_claim.mpp_parts.iter() {
14268 let pending_mpp_claim = pending_claim_ptr_opt.as_ref().map(|ptr| (
14269 part.counterparty_node_id, part.channel_id, part.htlc_id,
14270 PendingMPPClaimPointer(Arc::clone(&ptr))
14271 ));
14272 let pending_claim_ptr = pending_claim_ptr_opt.as_ref().map(|ptr|
14273 RAAMonitorUpdateBlockingAction::ClaimedMPPPayment {
14274 pending_claim: PendingMPPClaimPointer(Arc::clone(&ptr)),
14275 }
14276 );
14277 channel_manager.claim_mpp_part(
14281 part.into(), payment_preimage, None,
14282 |_, _|
14283 (Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim }), pending_claim_ptr)
14284 );
14285 }
14286 processed_claims.insert(payment_claim.mpp_parts);
14287 }
14288 } else {
14289 let per_peer_state = channel_manager.per_peer_state.read().unwrap();
14290 let mut claimable_payments = channel_manager.claimable_payments.lock().unwrap();
14291 let payment = claimable_payments.claimable_payments.remove(&payment_hash);
14292 mem::drop(claimable_payments);
14293 if let Some(payment) = payment {
14294 log_info!(channel_manager.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", &payment_hash);
14295 let mut claimable_amt_msat = 0;
14296 let mut receiver_node_id = Some(our_network_pubkey);
14297 let phantom_shared_secret = payment.htlcs[0].prev_hop.phantom_shared_secret;
14298 if phantom_shared_secret.is_some() {
14299 let phantom_pubkey = channel_manager.node_signer.get_node_id(Recipient::PhantomNode)
14300 .expect("Failed to get node_id for phantom node recipient");
14301 receiver_node_id = Some(phantom_pubkey)
14302 }
14303 for claimable_htlc in &payment.htlcs {
14304 claimable_amt_msat += claimable_htlc.value;
14305
14306 let previous_channel_id = claimable_htlc.prev_hop.channel_id;
14322 let peer_node_id_opt = channel_manager.outpoint_to_peer.lock().unwrap()
14323 .get(&claimable_htlc.prev_hop.outpoint).cloned();
14324 if let Some(peer_node_id) = peer_node_id_opt {
14325 let peer_state_mutex = per_peer_state.get(&peer_node_id).unwrap();
14326 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
14327 let peer_state = &mut *peer_state_lock;
14328 if let Some(ChannelPhase::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
14329 let logger = WithChannelContext::from(&channel_manager.logger, &channel.context, Some(payment_hash));
14330 channel.claim_htlc_while_disconnected_dropping_mon_update_legacy(
14331 claimable_htlc.prev_hop.htlc_id, payment_preimage, &&logger
14332 );
14333 }
14334 }
14335 if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) {
14336 previous_hop_monitor.provide_payment_preimage_unsafe_legacy(
14349 &payment_hash, &payment_preimage, &channel_manager.tx_broadcaster,
14350 &channel_manager.fee_estimator, &channel_manager.logger
14351 );
14352 }
14353 }
14354 let mut pending_events = channel_manager.pending_events.lock().unwrap();
14355 let payment_id = payment.inbound_payment_id(&inbound_payment_id_secret.unwrap());
14356 pending_events.push_back((events::Event::PaymentClaimed {
14357 receiver_node_id,
14358 payment_hash,
14359 purpose: payment.purpose,
14360 amount_msat: claimable_amt_msat,
14361 htlcs: payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(),
14362 sender_intended_total_msat: payment.htlcs.first().map(|htlc| htlc.total_msat),
14363 onion_fields: payment.onion_fields,
14364 payment_id: Some(payment_id),
14365 }, None));
14366 }
14367 }
14368 }
14369 }
14370
14371 for htlc_source in failed_htlcs.drain(..) {
14372 let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
14373 let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
14374 let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
14375 channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
14376 }
14377
14378 for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding, downstream_channel_id) in pending_claims_to_replay {
14379 channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None,
14383 downstream_closed, true, downstream_node_id, downstream_funding,
14384 downstream_channel_id, None
14385 );
14386 }
14387
14388 Ok((best_block_hash.clone(), channel_manager))
14392 }
14393}
14394
14395#[cfg(test)]
14396mod tests {
14397 use bitcoin::hashes::Hash;
14398 use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
14399 use core::sync::atomic::Ordering;
14400 use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
14401 use crate::ln::types::ChannelId;
14402 use crate::types::payment::{PaymentPreimage, PaymentHash, PaymentSecret};
14403 use crate::ln::channelmanager::{create_recv_pending_htlc_info, HTLCForwardInfo, inbound_payment, PaymentId, RecipientOnionFields, InterceptId};
14404 use crate::ln::functional_test_utils::*;
14405 use crate::ln::msgs::{self, ErrorAction};
14406 use crate::ln::msgs::ChannelMessageHandler;
14407 use crate::ln::outbound_payment::Retry;
14408 use crate::prelude::*;
14409 use crate::routing::router::{PaymentParameters, RouteParameters, find_route};
14410 use crate::util::errors::APIError;
14411 use crate::util::ser::Writeable;
14412 use crate::util::test_utils;
14413 use crate::util::config::{ChannelConfig, ChannelConfigUpdate};
14414 use crate::sign::EntropySource;
14415
14416 #[test]
14417 fn test_notify_limits() {
14418 let chanmon_cfgs = create_chanmon_cfgs(3);
14421 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
14422 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
14423 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
14424
14425 assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14428 assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14429 assert!(nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
14430
14431 let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1);
14432
14433 chan.0.contents.fee_base_msat *= 2;
14436 chan.1.contents.fee_base_msat *= 2;
14437 let node_a_chan_info = nodes[0].node.list_channels_with_counterparty(
14438 &nodes[1].node.get_our_node_id()).pop().unwrap();
14439 let node_b_chan_info = nodes[1].node.list_channels_with_counterparty(
14440 &nodes[0].node.get_our_node_id()).pop().unwrap();
14441
14442 assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14444 assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14445 assert!(!nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
14447 assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14449 assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14450
14451 nodes[2].node.handle_channel_update(nodes[1].node.get_our_node_id(), &chan.0);
14454 nodes[2].node.handle_channel_update(nodes[1].node.get_our_node_id(), &chan.1);
14455 assert!(!nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
14456
14457 nodes[0].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.0);
14460 nodes[0].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.1);
14461 nodes[1].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.0);
14462 nodes[1].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.1);
14463 assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14464 assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14465
14466 assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
14468 assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
14469
14470 let as_node_one = nodes[0].node.get_our_node_id().serialize()[..] < nodes[1].node.get_our_node_id().serialize()[..];
14474 let as_update = if as_node_one == (chan.0.contents.channel_flags & 1 == 0 ) { &chan.0 } else { &chan.1 };
14475 let bs_update = if as_node_one == (chan.0.contents.channel_flags & 1 == 0 ) { &chan.1 } else { &chan.0 };
14476
14477 nodes[0].node.handle_channel_update(nodes[1].node.get_our_node_id(), &as_update);
14480 nodes[1].node.handle_channel_update(nodes[0].node.get_our_node_id(), &bs_update);
14481 assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14482 assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14483 assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
14484 assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
14485
14486 nodes[0].node.handle_channel_update(nodes[1].node.get_our_node_id(), &bs_update);
14489 nodes[1].node.handle_channel_update(nodes[0].node.get_our_node_id(), &as_update);
14490 assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14491 assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14492 assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
14493 assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
14494 }
14495
14496 #[test]
14497 fn test_keysend_dup_hash_partial_mpp() {
14498 let chanmon_cfgs = create_chanmon_cfgs(2);
14501 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14502 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
14503 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
14504 create_announced_chan_between_nodes(&nodes, 0, 1);
14505
14506 let (route, our_payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000);
14508 let mut mpp_route = route.clone();
14509 mpp_route.paths.push(mpp_route.paths[0].clone());
14510
14511 let payment_id = PaymentId([42; 32]);
14512 let cur_height = CHAN_CONFIRM_DEPTH + 1; let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
14516 RecipientOnionFields::secret_only(payment_secret), payment_id, &mpp_route).unwrap();
14517 nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &our_payment_hash,
14518 RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
14519 check_added_monitors!(nodes[0], 1);
14520 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14521 assert_eq!(events.len(), 1);
14522 pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
14523
14524 nodes[0].node.send_spontaneous_payment(
14526 Some(payment_preimage), RecipientOnionFields::spontaneous_empty(),
14527 PaymentId(payment_preimage.0), route.route_params.clone().unwrap(), Retry::Attempts(0)
14528 ).unwrap();
14529 check_added_monitors!(nodes[0], 1);
14530 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14531 assert_eq!(events.len(), 1);
14532 let ev = events.drain(..).next().unwrap();
14533 let payment_event = SendEvent::from_event(ev);
14534 nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
14535 check_added_monitors!(nodes[1], 0);
14536 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
14537 expect_pending_htlcs_forwardable!(nodes[1]);
14538 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
14539 check_added_monitors!(nodes[1], 1);
14540 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14541 assert!(updates.update_add_htlcs.is_empty());
14542 assert!(updates.update_fulfill_htlcs.is_empty());
14543 assert_eq!(updates.update_fail_htlcs.len(), 1);
14544 assert!(updates.update_fail_malformed_htlcs.is_empty());
14545 assert!(updates.update_fee.is_none());
14546 nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
14547 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
14548 expect_payment_failed!(nodes[0], our_payment_hash, true);
14549
14550 nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &our_payment_hash,
14552 RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
14553 check_added_monitors!(nodes[0], 1);
14554 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14555 assert_eq!(events.len(), 1);
14556 pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), true, None);
14557
14558 nodes[1].node.claim_funds(payment_preimage);
14563 expect_payment_claimed!(nodes[1], our_payment_hash, 200_000);
14564 check_added_monitors!(nodes[1], 2);
14565
14566 let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14567 nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]);
14568 expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
14569 nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed);
14570 check_added_monitors!(nodes[0], 1);
14571 let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
14572 nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa);
14573 check_added_monitors!(nodes[1], 1);
14574 let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14575 nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_first_cs);
14576 check_added_monitors!(nodes[1], 1);
14577 let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
14578 nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
14579 nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
14580 check_added_monitors!(nodes[0], 1);
14581 let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
14582 nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa);
14583 let as_second_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
14584 check_added_monitors!(nodes[0], 1);
14585 nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa);
14586 check_added_monitors!(nodes[1], 1);
14587 nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed);
14588 check_added_monitors!(nodes[1], 1);
14589 let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
14590 nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_third_raa);
14591 check_added_monitors!(nodes[0], 1);
14592
14593 let events = nodes[0].node.get_and_clear_pending_events();
14596 assert_eq!(events.len(), 2);
14597 match events[0] {
14598 Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
14599 assert_eq!(payment_id, *actual_payment_id);
14600 assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
14601 assert_eq!(route.paths[0], *path);
14602 },
14603 _ => panic!("Unexpected event"),
14604 }
14605 match events[1] {
14606 Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
14607 assert_eq!(payment_id, *actual_payment_id);
14608 assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
14609 assert_eq!(route.paths[0], *path);
14610 },
14611 _ => panic!("Unexpected event"),
14612 }
14613 }
14614
14615 #[test]
14616 fn test_keysend_dup_payment_hash() {
14617 let chanmon_cfgs = create_chanmon_cfgs(2);
14625 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14626 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
14627 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
14628 create_announced_chan_between_nodes(&nodes, 0, 1);
14629 let scorer = test_utils::TestScorer::new();
14630 let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
14631
14632 let expected_route = [&nodes[1]];
14634 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &expected_route, 100_000);
14635
14636 let route_params = RouteParameters::from_payment_params_and_value(
14638 PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(),
14639 TEST_FINAL_CLTV, false), 100_000);
14640 nodes[0].node.send_spontaneous_payment(
14641 Some(payment_preimage), RecipientOnionFields::spontaneous_empty(),
14642 PaymentId(payment_preimage.0), route_params.clone(), Retry::Attempts(0)
14643 ).unwrap();
14644 check_added_monitors!(nodes[0], 1);
14645 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14646 assert_eq!(events.len(), 1);
14647 let ev = events.drain(..).next().unwrap();
14648 let payment_event = SendEvent::from_event(ev);
14649 nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
14650 check_added_monitors!(nodes[1], 0);
14651 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
14652 expect_pending_htlcs_forwardable!(nodes[1]);
14655 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
14656 check_added_monitors!(nodes[1], 1);
14657 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14658 assert!(updates.update_add_htlcs.is_empty());
14659 assert!(updates.update_fulfill_htlcs.is_empty());
14660 assert_eq!(updates.update_fail_htlcs.len(), 1);
14661 assert!(updates.update_fail_malformed_htlcs.is_empty());
14662 assert!(updates.update_fee.is_none());
14663 nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
14664 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
14665 expect_payment_failed!(nodes[0], payment_hash, true);
14666
14667 claim_payment(&nodes[0], &expected_route, payment_preimage);
14669
14670 let payment_preimage = PaymentPreimage([42; 32]);
14672 let route = find_route(
14673 &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
14674 None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
14675 ).unwrap();
14676 let payment_hash = nodes[0].node.send_spontaneous_payment(
14677 Some(payment_preimage), RecipientOnionFields::spontaneous_empty(),
14678 PaymentId(payment_preimage.0), route.route_params.clone().unwrap(), Retry::Attempts(0)
14679 ).unwrap();
14680 check_added_monitors!(nodes[0], 1);
14681 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14682 assert_eq!(events.len(), 1);
14683 let event = events.pop().unwrap();
14684 let path = vec![&nodes[1]];
14685 pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
14686
14687 let payment_secret = PaymentSecret([43; 32]);
14689 nodes[0].node.send_payment_with_route(route.clone(), payment_hash,
14690 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
14691 check_added_monitors!(nodes[0], 1);
14692 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14693 assert_eq!(events.len(), 1);
14694 let ev = events.drain(..).next().unwrap();
14695 let payment_event = SendEvent::from_event(ev);
14696 nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
14697 check_added_monitors!(nodes[1], 0);
14698 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
14699 expect_pending_htlcs_forwardable!(nodes[1]);
14700 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
14701 check_added_monitors!(nodes[1], 1);
14702 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14703 assert!(updates.update_add_htlcs.is_empty());
14704 assert!(updates.update_fulfill_htlcs.is_empty());
14705 assert_eq!(updates.update_fail_htlcs.len(), 1);
14706 assert!(updates.update_fail_malformed_htlcs.is_empty());
14707 assert!(updates.update_fee.is_none());
14708 nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
14709 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
14710 expect_payment_failed!(nodes[0], payment_hash, true);
14711
14712 claim_payment(&nodes[0], &expected_route, payment_preimage);
14714
14715 let payment_id_1 = PaymentId([44; 32]);
14717 let payment_hash = nodes[0].node.send_spontaneous_payment(
14718 Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_1,
14719 route.route_params.clone().unwrap(), Retry::Attempts(0)
14720 ).unwrap();
14721 check_added_monitors!(nodes[0], 1);
14722 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14723 assert_eq!(events.len(), 1);
14724 let event = events.pop().unwrap();
14725 let path = vec![&nodes[1]];
14726 pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
14727
14728 let route_params = RouteParameters::from_payment_params_and_value(
14730 PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false),
14731 100_000
14732 );
14733 let payment_id_2 = PaymentId([45; 32]);
14734 nodes[0].node.send_spontaneous_payment(
14735 Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_2, route_params,
14736 Retry::Attempts(0)
14737 ).unwrap();
14738 check_added_monitors!(nodes[0], 1);
14739 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14740 assert_eq!(events.len(), 1);
14741 let ev = events.drain(..).next().unwrap();
14742 let payment_event = SendEvent::from_event(ev);
14743 nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
14744 check_added_monitors!(nodes[1], 0);
14745 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
14746 expect_pending_htlcs_forwardable!(nodes[1]);
14747 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
14748 check_added_monitors!(nodes[1], 1);
14749 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14750 assert!(updates.update_add_htlcs.is_empty());
14751 assert!(updates.update_fulfill_htlcs.is_empty());
14752 assert_eq!(updates.update_fail_htlcs.len(), 1);
14753 assert!(updates.update_fail_malformed_htlcs.is_empty());
14754 assert!(updates.update_fee.is_none());
14755 nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
14756 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
14757 expect_payment_failed!(nodes[0], payment_hash, true);
14758
14759 claim_payment(&nodes[0], &expected_route, payment_preimage);
14761 }
14762
14763 #[test]
14764 fn test_keysend_hash_mismatch() {
14765 let chanmon_cfgs = create_chanmon_cfgs(2);
14768 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14769 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
14770 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
14771
14772 let payer_pubkey = nodes[0].node.get_our_node_id();
14773 let payee_pubkey = nodes[1].node.get_our_node_id();
14774
14775 let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
14776 let route_params = RouteParameters::from_payment_params_and_value(
14777 PaymentParameters::for_keysend(payee_pubkey, 40, false), 10_000);
14778 let network_graph = nodes[0].network_graph;
14779 let first_hops = nodes[0].node.list_usable_channels();
14780 let scorer = test_utils::TestScorer::new();
14781 let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
14782 let route = find_route(
14783 &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
14784 nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
14785 ).unwrap();
14786
14787 let test_preimage = PaymentPreimage([42; 32]);
14788 let mismatch_payment_hash = PaymentHash([43; 32]);
14789 let session_privs = nodes[0].node.test_add_new_pending_payment(mismatch_payment_hash,
14790 RecipientOnionFields::spontaneous_empty(), PaymentId(mismatch_payment_hash.0), &route).unwrap();
14791 nodes[0].node.test_send_payment_internal(&route, mismatch_payment_hash,
14792 RecipientOnionFields::spontaneous_empty(), Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap();
14793 check_added_monitors!(nodes[0], 1);
14794
14795 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
14796 assert_eq!(updates.update_add_htlcs.len(), 1);
14797 assert!(updates.update_fulfill_htlcs.is_empty());
14798 assert!(updates.update_fail_htlcs.is_empty());
14799 assert!(updates.update_fail_malformed_htlcs.is_empty());
14800 assert!(updates.update_fee.is_none());
14801 nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
14802
14803 nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Payment preimage didn't match payment hash", 1);
14804 }
14805
14806 #[test]
14807 fn test_multi_hop_missing_secret() {
14808 let chanmon_cfgs = create_chanmon_cfgs(4);
14809 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
14810 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
14811 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
14812
14813 let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
14814 let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
14815 let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
14816 let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
14817
14818 let (mut route, payment_hash, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
14820 let path = route.paths[0].clone();
14821 route.paths.push(path);
14822 route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
14823 route.paths[0].hops[0].short_channel_id = chan_1_id;
14824 route.paths[0].hops[1].short_channel_id = chan_3_id;
14825 route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
14826 route.paths[1].hops[0].short_channel_id = chan_2_id;
14827 route.paths[1].hops[1].short_channel_id = chan_4_id;
14828
14829 nodes[0].node.send_payment_with_route(route, payment_hash,
14830 RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)).unwrap();
14831 let events = nodes[0].node.get_and_clear_pending_events();
14832 assert_eq!(events.len(), 1);
14833 match events[0] {
14834 Event::PaymentFailed { reason, .. } => {
14835 assert_eq!(reason.unwrap(), crate::events::PaymentFailureReason::UnexpectedError);
14836 }
14837 _ => panic!()
14838 }
14839 nodes[0].logger.assert_log_contains("lightning::ln::outbound_payment", "Payment secret is required for multi-path payments", 2);
14840 assert!(nodes[0].node.list_recent_payments().is_empty());
14841 }
14842
14843 #[test]
14844 fn test_channel_update_cached() {
14845 let chanmon_cfgs = create_chanmon_cfgs(3);
14846 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
14847 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
14848 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
14849
14850 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
14851
14852 nodes[0].node.force_close_channel_with_peer(&chan.2, &nodes[1].node.get_our_node_id(), None, true).unwrap();
14853 check_added_monitors!(nodes[0], 1);
14854 check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
14855
14856 let node_1_events = nodes[1].node.get_and_clear_pending_msg_events();
14858 assert_eq!(node_1_events.len(), 0);
14859
14860 {
14861 let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
14863 assert_eq!(pending_broadcast_messages.len(), 1);
14864 }
14865
14866 nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
14868 nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
14869
14870 nodes[0].node.peer_disconnected(nodes[2].node.get_our_node_id());
14871 nodes[2].node.peer_disconnected(nodes[0].node.get_our_node_id());
14872
14873 let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
14874 assert_eq!(node_0_events.len(), 0);
14875
14876 nodes[0].node.peer_connected(nodes[2].node.get_our_node_id(), &msgs::Init {
14878 features: nodes[2].node.init_features(), networks: None, remote_network_address: None
14879 }, true).unwrap();
14880 nodes[2].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
14881 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
14882 }, false).unwrap();
14883
14884 let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
14886 assert_eq!(node_0_events.len(), 1);
14887 match &node_0_events[0] {
14888 MessageSendEvent::BroadcastChannelUpdate { .. } => (),
14889 _ => panic!("Unexpected event"),
14890 }
14891 {
14892 let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
14894 assert_eq!(pending_broadcast_messages.len(), 0);
14895 }
14896 }
14897
14898 #[test]
14899 fn test_drop_disconnected_peers_when_removing_channels() {
14900 let chanmon_cfgs = create_chanmon_cfgs(2);
14901 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14902 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
14903 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
14904
14905 create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
14906
14907 nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
14908 nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
14909 let chan_id = nodes[0].node.list_channels()[0].channel_id;
14910 let error_message = "Channel force-closed";
14911 nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
14912 check_added_monitors!(nodes[0], 1);
14913 check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 1_000_000);
14914
14915 {
14916 let nodes_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
14919 assert_eq!(nodes_0_per_peer_state.len(), 1);
14921 assert!(nodes_0_per_peer_state.get(&nodes[1].node.get_our_node_id()).is_some());
14922 }
14923
14924 nodes[0].node.timer_tick_occurred();
14925
14926 {
14927 assert_eq!(nodes[0].node.per_peer_state.read().unwrap().len(), 0);
14929 }
14930 }
14931
14932 #[test]
14933 fn test_drop_peers_when_removing_unfunded_channels() {
14934 let chanmon_cfgs = create_chanmon_cfgs(2);
14935 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14936 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
14937 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
14938
14939 exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
14940 let events = nodes[0].node.get_and_clear_pending_events();
14941 assert_eq!(events.len(), 1, "Unexpected events {:?}", events);
14942 match events[0] {
14943 Event::FundingGenerationReady { .. } => {}
14944 _ => panic!("Unexpected event {:?}", events),
14945 }
14946
14947 nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
14948 nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
14949 check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer, [nodes[1].node.get_our_node_id()], 1_000_000);
14950 check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer, [nodes[0].node.get_our_node_id()], 1_000_000);
14951
14952 assert_eq!(nodes[0].node.per_peer_state.read().unwrap().len(), 0);
14954 assert_eq!(nodes[1].node.per_peer_state.read().unwrap().len(), 0);
14955 }
14956
14957 #[test]
14958 fn bad_inbound_payment_hash() {
14959 let chanmon_cfgs = create_chanmon_cfgs(2);
14961 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14962 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
14963 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
14964
14965 let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[0]);
14966 let payment_data = msgs::FinalOnionHopData {
14967 payment_secret,
14968 total_msat: 100_000,
14969 };
14970
14971 let mut bad_payment_hash = payment_hash.clone();
14974 bad_payment_hash.0[0] += 1;
14975 match inbound_payment::verify(bad_payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) {
14976 Ok(_) => panic!("Unexpected ok"),
14977 Err(()) => {
14978 nodes[0].logger.assert_log_contains("lightning::ln::inbound_payment", "Failing HTLC with user-generated payment_hash", 1);
14979 }
14980 }
14981
14982 assert!(inbound_payment::verify(payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger).is_ok());
14984 }
14985
14986 #[test]
14987 fn test_outpoint_to_peer_coverage() {
14988 let chanmon_cfgs = create_chanmon_cfgs(2);
14992 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14993 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
14994 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
14995
14996 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
14997 let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
14998 nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel);
14999 let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
15000 nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel);
15001
15002 let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
15003 let channel_id = ChannelId::from_bytes(tx.compute_txid().to_byte_array());
15004 {
15005 assert_eq!(nodes[0].node.outpoint_to_peer.lock().unwrap().len(), 0);
15008 assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
15009 }
15010
15011 nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
15012 {
15013 let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
15016 assert_eq!(nodes_0_lock.len(), 1);
15017 assert!(nodes_0_lock.contains_key(&funding_output));
15018 }
15019
15020 assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
15021
15022 let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
15023
15024 nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg);
15025 {
15026 let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
15027 assert_eq!(nodes_0_lock.len(), 1);
15028 assert!(nodes_0_lock.contains_key(&funding_output));
15029 }
15030 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
15031
15032 {
15033 let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
15036 assert_eq!(nodes_1_lock.len(), 1);
15037 assert!(nodes_1_lock.contains_key(&funding_output));
15038 }
15039 check_added_monitors!(nodes[1], 1);
15040 let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
15041 nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed);
15042 check_added_monitors!(nodes[0], 1);
15043 expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
15044 let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
15045 let (announcement, nodes_0_update, nodes_1_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
15046 update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update);
15047
15048 nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
15049 nodes[1].node.handle_shutdown(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
15050 let nodes_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
15051 nodes[0].node.handle_shutdown(nodes[1].node.get_our_node_id(), &nodes_1_shutdown);
15052
15053 let closing_signed_node_0 = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
15054 nodes[1].node.handle_closing_signed(nodes[0].node.get_our_node_id(), &closing_signed_node_0);
15055 {
15056 let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
15061 assert_eq!(nodes_0_lock.len(), 1);
15062 assert!(nodes_0_lock.contains_key(&funding_output));
15063 }
15064
15065 {
15066 let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
15071 assert_eq!(nodes_1_lock.len(), 1);
15072 assert!(nodes_1_lock.contains_key(&funding_output));
15073 }
15074
15075 nodes[0].node.handle_closing_signed(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
15076 {
15077 assert_eq!(nodes[0].node.outpoint_to_peer.lock().unwrap().len(), 0);
15083
15084 let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
15087 assert_eq!(nodes_1_lock.len(), 1);
15088 assert!(nodes_1_lock.contains_key(&funding_output));
15089 }
15090
15091 let (_nodes_0_update, closing_signed_node_0) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
15092
15093 nodes[1].node.handle_closing_signed(nodes[0].node.get_our_node_id(), &closing_signed_node_0.unwrap());
15094 {
15095 assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
15098 }
15099 let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
15100
15101 check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
15102 check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
15103 }
15104
15105 fn check_not_connected_to_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
15106 let expected_message = format!("Not connected to node: {}", expected_public_key);
15107 check_api_error_message(expected_message, res_err)
15108 }
15109
15110 fn check_unkown_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
15111 let expected_message = format!("Can't find a peer matching the passed counterparty node_id {}", expected_public_key);
15112 check_api_error_message(expected_message, res_err)
15113 }
15114
15115 fn check_channel_unavailable_error<T>(res_err: Result<T, APIError>, expected_channel_id: ChannelId, peer_node_id: PublicKey) {
15116 let expected_message = format!("Channel with id {} not found for the passed counterparty node_id {}", expected_channel_id, peer_node_id);
15117 check_api_error_message(expected_message, res_err)
15118 }
15119
15120 fn check_api_misuse_error<T>(res_err: Result<T, APIError>) {
15121 let expected_message = "No such channel awaiting to be accepted.".to_string();
15122 check_api_error_message(expected_message, res_err)
15123 }
15124
15125 fn check_api_error_message<T>(expected_err_message: String, res_err: Result<T, APIError>) {
15126 match res_err {
15127 Err(APIError::APIMisuseError { err }) => {
15128 assert_eq!(err, expected_err_message);
15129 },
15130 Err(APIError::ChannelUnavailable { err }) => {
15131 assert_eq!(err, expected_err_message);
15132 },
15133 Ok(_) => panic!("Unexpected Ok"),
15134 Err(_) => panic!("Unexpected Error"),
15135 }
15136 }
15137
15138 #[test]
15139 fn test_api_calls_with_unkown_counterparty_node() {
15140 let chanmon_cfg = create_chanmon_cfgs(2);
15144 let node_cfg = create_node_cfgs(2, &chanmon_cfg);
15145 let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
15146 let nodes = create_network(2, &node_cfg, &node_chanmgr);
15147
15148 let channel_id = ChannelId::from_bytes([4; 32]);
15150 let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap());
15151 let intercept_id = InterceptId([0; 32]);
15152 let error_message = "Channel force-closed";
15153
15154 check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None, None), unkown_public_key);
15156
15157 check_unkown_peer_error(nodes[0].node.accept_inbound_channel(&channel_id, &unkown_public_key, 42), unkown_public_key);
15158
15159 check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key);
15160
15161 check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key);
15162
15163 check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key);
15164
15165 check_unkown_peer_error(nodes[0].node.forward_intercepted_htlc(intercept_id, &channel_id, unkown_public_key, 1_000_000), unkown_public_key);
15166
15167 check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key);
15168 }
15169
15170 #[test]
15171 fn test_api_calls_with_unavailable_channel() {
15172 let chanmon_cfg = create_chanmon_cfgs(2);
15177 let node_cfg = create_node_cfgs(2, &chanmon_cfg);
15178 let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
15179 let nodes = create_network(2, &node_cfg, &node_chanmgr);
15180
15181 let counterparty_node_id = nodes[1].node.get_our_node_id();
15182
15183 let channel_id = ChannelId::from_bytes([4; 32]);
15185 let error_message = "Channel force-closed";
15186
15187 check_api_misuse_error(nodes[0].node.accept_inbound_channel(&channel_id, &counterparty_node_id, 42));
15189
15190 check_channel_unavailable_error(nodes[0].node.close_channel(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
15191
15192 check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id);
15193
15194 check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id);
15195
15196 check_channel_unavailable_error(nodes[0].node.forward_intercepted_htlc(InterceptId([0; 32]), &channel_id, counterparty_node_id, 1_000_000), channel_id, counterparty_node_id);
15197
15198 check_channel_unavailable_error(nodes[0].node.update_channel_config(&counterparty_node_id, &[channel_id], &ChannelConfig::default()), channel_id, counterparty_node_id);
15199 }
15200
15201 #[test]
15202 fn test_connection_limiting() {
15203 let chanmon_cfgs = create_chanmon_cfgs(2);
15205 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
15206 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
15207 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15208
15209 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
15212 let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15213
15214 let mut funding_tx = None;
15215 for idx in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
15216 nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15217 let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
15218
15219 if idx == 0 {
15220 nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel);
15221 let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
15222 funding_tx = Some(tx.clone());
15223 nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx).unwrap();
15224 let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
15225
15226 nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg);
15227 check_added_monitors!(nodes[1], 1);
15228 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
15229
15230 let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
15231
15232 nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed);
15233 check_added_monitors!(nodes[0], 1);
15234 expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
15235 }
15236 open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
15237 }
15238
15239 open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(
15241 &nodes[0].keys_manager);
15242 nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15243 assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
15244 open_channel_msg.common_fields.temporary_channel_id);
15245
15246 let mut peer_pks = Vec::with_capacity(super::MAX_NO_CHANNEL_PEERS);
15250 for _ in 1..super::MAX_NO_CHANNEL_PEERS {
15251 let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
15252 &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
15253 peer_pks.push(random_pk);
15254 nodes[1].node.peer_connected(random_pk, &msgs::Init {
15255 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15256 }, true).unwrap();
15257 }
15258 let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
15259 &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
15260 nodes[1].node.peer_connected(last_random_pk, &msgs::Init {
15261 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15262 }, true).unwrap_err();
15263
15264 nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
15267 let chan_closed_events = nodes[1].node.get_and_clear_pending_events();
15268 assert_eq!(chan_closed_events.len(), super::MAX_UNFUNDED_CHANS_PER_PEER - 1);
15269 for ev in chan_closed_events {
15270 if let Event::ChannelClosed { .. } = ev { } else { panic!(); }
15271 }
15272 nodes[1].node.peer_connected(last_random_pk, &msgs::Init {
15273 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15274 }, true).unwrap();
15275 nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
15276 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15277 }, true).unwrap_err();
15278
15279 nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
15281 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15282 }, false).unwrap();
15283 nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
15284
15285 assert!(peer_pks.len() > super::MAX_UNFUNDED_CHANNEL_PEERS - 1);
15289 for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
15290 nodes[1].node.handle_open_channel(peer_pks[i], &open_channel_msg);
15291 get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]);
15292 open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
15293 }
15294 nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
15295 assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
15296 open_channel_msg.common_fields.temporary_channel_id);
15297
15298 nodes[1].node.create_channel(last_random_pk, 100_000, 0, 42, None, None).unwrap();
15300 get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, last_random_pk);
15301
15302 mine_transaction(&nodes[1], funding_tx.as_ref().unwrap());
15305 nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
15306 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15307 }, true).unwrap();
15308 get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
15309
15310 nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
15313 get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
15314 }
15315
15316 #[test]
15317 fn test_outbound_chans_unlimited() {
15318 let chanmon_cfgs = create_chanmon_cfgs(2);
15320 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
15321 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
15322 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15323
15324 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
15327 let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15328
15329 for _ in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
15330 nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15331 get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
15332 open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
15333 }
15334
15335 nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15338 assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
15339 open_channel_msg.common_fields.temporary_channel_id);
15340
15341 nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
15343 get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
15344
15345 nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15347 assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
15348 open_channel_msg.common_fields.temporary_channel_id);
15349 }
15350
15351 #[test]
15352 fn test_0conf_limiting() {
15353 let chanmon_cfgs = create_chanmon_cfgs(2);
15356 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
15357 let mut settings = test_default_channel_config();
15358 settings.manually_accept_inbound_channels = true;
15359 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(settings)]);
15360 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15361
15362 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
15365 let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15366
15367 for _ in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
15369 let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
15370 &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
15371 nodes[1].node.peer_connected(random_pk, &msgs::Init {
15372 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15373 }, true).unwrap();
15374
15375 nodes[1].node.handle_open_channel(random_pk, &open_channel_msg);
15376 let events = nodes[1].node.get_and_clear_pending_events();
15377 match events[0] {
15378 Event::OpenChannelRequest { temporary_channel_id, .. } => {
15379 nodes[1].node.accept_inbound_channel(&temporary_channel_id, &random_pk, 23).unwrap();
15380 }
15381 _ => panic!("Unexpected event"),
15382 }
15383 get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, random_pk);
15384 open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
15385 }
15386
15387 let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
15389 &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
15390 nodes[1].node.peer_connected(last_random_pk, &msgs::Init {
15391 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15392 }, true).unwrap();
15393 nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
15394 let events = nodes[1].node.get_and_clear_pending_events();
15395 match events[0] {
15396 Event::OpenChannelRequest { temporary_channel_id, .. } => {
15397 match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &last_random_pk, 23) {
15398 Err(APIError::APIMisuseError { err }) =>
15399 assert_eq!(err, "Too many peers with unfunded channels, refusing to accept new ones"),
15400 _ => panic!(),
15401 }
15402 }
15403 _ => panic!("Unexpected event"),
15404 }
15405 assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
15406 open_channel_msg.common_fields.temporary_channel_id);
15407
15408 nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
15410 let events = nodes[1].node.get_and_clear_pending_events();
15411 match events[0] {
15412 Event::OpenChannelRequest { temporary_channel_id, .. } => {
15413 nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &last_random_pk, 23).unwrap();
15414 }
15415 _ => panic!("Unexpected event"),
15416 }
15417 get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
15418 }
15419
15420 #[test]
15421 fn reject_excessively_underpaying_htlcs() {
15422 let chanmon_cfg = create_chanmon_cfgs(1);
15423 let node_cfg = create_node_cfgs(1, &chanmon_cfg);
15424 let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
15425 let node = create_network(1, &node_cfg, &node_chanmgr);
15426 let sender_intended_amt_msat = 100;
15427 let extra_fee_msat = 10;
15428 let hop_data = msgs::InboundOnionPayload::Receive {
15429 sender_intended_htlc_amt_msat: 100,
15430 cltv_expiry_height: 42,
15431 payment_metadata: None,
15432 keysend_preimage: None,
15433 payment_data: Some(msgs::FinalOnionHopData {
15434 payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat,
15435 }),
15436 custom_tlvs: Vec::new(),
15437 };
15438 let current_height: u32 = node[0].node.best_block.read().unwrap().height;
15441 if let Err(crate::ln::channelmanager::InboundHTLCErr { err_code, .. }) =
15442 create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
15443 sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat),
15444 current_height)
15445 {
15446 assert_eq!(err_code, 19);
15447 } else { panic!(); }
15448
15449 let hop_data = msgs::InboundOnionPayload::Receive { sender_intended_htlc_amt_msat: 100,
15452 cltv_expiry_height: 42,
15453 payment_metadata: None,
15454 keysend_preimage: None,
15455 payment_data: Some(msgs::FinalOnionHopData {
15456 payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat,
15457 }),
15458 custom_tlvs: Vec::new(),
15459 };
15460 let current_height: u32 = node[0].node.best_block.read().unwrap().height;
15461 assert!(create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
15462 sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat),
15463 current_height).is_ok());
15464 }
15465
15466 #[test]
15467 fn test_final_incorrect_cltv(){
15468 let chanmon_cfg = create_chanmon_cfgs(1);
15469 let node_cfg = create_node_cfgs(1, &chanmon_cfg);
15470 let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
15471 let node = create_network(1, &node_cfg, &node_chanmgr);
15472
15473 let current_height: u32 = node[0].node.best_block.read().unwrap().height;
15474 let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive {
15475 sender_intended_htlc_amt_msat: 100,
15476 cltv_expiry_height: 22,
15477 payment_metadata: None,
15478 keysend_preimage: None,
15479 payment_data: Some(msgs::FinalOnionHopData {
15480 payment_secret: PaymentSecret([0; 32]), total_msat: 100,
15481 }),
15482 custom_tlvs: Vec::new(),
15483 }, [0; 32], PaymentHash([0; 32]), 100, 23, None, true, None, current_height);
15484
15485 assert!(result.is_ok());
15489 }
15490
15491 #[test]
15492 fn test_inbound_anchors_manual_acceptance() {
15493 let mut anchors_cfg = test_default_channel_config();
15496 anchors_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
15497
15498 let mut anchors_manual_accept_cfg = anchors_cfg.clone();
15499 anchors_manual_accept_cfg.manually_accept_inbound_channels = true;
15500
15501 let chanmon_cfgs = create_chanmon_cfgs(3);
15502 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
15503 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs,
15504 &[Some(anchors_cfg.clone()), Some(anchors_cfg.clone()), Some(anchors_manual_accept_cfg.clone())]);
15505 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
15506
15507 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
15508 let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15509
15510 nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15511 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
15512 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
15513 match &msg_events[0] {
15514 MessageSendEvent::HandleError { node_id, action } => {
15515 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
15516 match action {
15517 ErrorAction::SendErrorMessage { msg } =>
15518 assert_eq!(msg.data, "No channels with anchor outputs accepted".to_owned()),
15519 _ => panic!("Unexpected error action"),
15520 }
15521 }
15522 _ => panic!("Unexpected event"),
15523 }
15524
15525 nodes[2].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15526 let events = nodes[2].node.get_and_clear_pending_events();
15527 match events[0] {
15528 Event::OpenChannelRequest { temporary_channel_id, .. } =>
15529 nodes[2].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23).unwrap(),
15530 _ => panic!("Unexpected event"),
15531 }
15532 get_event_msg!(nodes[2], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
15533 }
15534
15535 #[test]
15536 fn test_anchors_zero_fee_htlc_tx_fallback() {
15537 let chanmon_cfgs = create_chanmon_cfgs(2);
15541 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
15542 let mut anchors_config = test_default_channel_config();
15543 anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
15544 anchors_config.manually_accept_inbound_channels = true;
15545 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config.clone())]);
15546 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15547 let error_message = "Channel force-closed";
15548
15549 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 0, None, None).unwrap();
15550 let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15551 assert!(open_channel_msg.common_fields.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx());
15552
15553 nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15554 let events = nodes[1].node.get_and_clear_pending_events();
15555 match events[0] {
15556 Event::OpenChannelRequest { temporary_channel_id, .. } => {
15557 nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
15558 }
15559 _ => panic!("Unexpected event"),
15560 }
15561
15562 let error_msg = get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id());
15563 nodes[0].node.handle_error(nodes[1].node.get_our_node_id(), &error_msg);
15564
15565 let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15566 assert!(!open_channel_msg.common_fields.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx());
15567
15568 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
15571 }
15572
15573 #[test]
15574 fn test_update_channel_config() {
15575 let chanmon_cfg = create_chanmon_cfgs(2);
15576 let node_cfg = create_node_cfgs(2, &chanmon_cfg);
15577 let mut user_config = test_default_channel_config();
15578 let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]);
15579 let nodes = create_network(2, &node_cfg, &node_chanmgr);
15580 let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
15581 let channel = &nodes[0].node.list_channels()[0];
15582
15583 nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap();
15584 let events = nodes[0].node.get_and_clear_pending_msg_events();
15585 assert_eq!(events.len(), 0);
15586
15587 user_config.channel_config.forwarding_fee_base_msat += 10;
15588 nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap();
15589 assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_base_msat, user_config.channel_config.forwarding_fee_base_msat);
15590 let events = nodes[0].node.get_and_clear_pending_msg_events();
15591 assert_eq!(events.len(), 1);
15592 match &events[0] {
15593 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
15594 _ => panic!("expected BroadcastChannelUpdate event"),
15595 }
15596
15597 nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate::default()).unwrap();
15598 let events = nodes[0].node.get_and_clear_pending_msg_events();
15599 assert_eq!(events.len(), 0);
15600
15601 let new_cltv_expiry_delta = user_config.channel_config.cltv_expiry_delta + 6;
15602 nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate {
15603 cltv_expiry_delta: Some(new_cltv_expiry_delta),
15604 ..Default::default()
15605 }).unwrap();
15606 assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta);
15607 let events = nodes[0].node.get_and_clear_pending_msg_events();
15608 assert_eq!(events.len(), 1);
15609 match &events[0] {
15610 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
15611 _ => panic!("expected BroadcastChannelUpdate event"),
15612 }
15613
15614 let new_fee = user_config.channel_config.forwarding_fee_proportional_millionths + 100;
15615 nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate {
15616 forwarding_fee_proportional_millionths: Some(new_fee),
15617 ..Default::default()
15618 }).unwrap();
15619 assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta);
15620 assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, new_fee);
15621 let events = nodes[0].node.get_and_clear_pending_msg_events();
15622 assert_eq!(events.len(), 1);
15623 match &events[0] {
15624 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
15625 _ => panic!("expected BroadcastChannelUpdate event"),
15626 }
15627
15628 let bad_channel_id = ChannelId::v1_from_funding_txid(&[10; 32], 10);
15631 let current_fee = nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths;
15632 let new_fee = current_fee + 100;
15633 assert!(
15634 matches!(
15635 nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id, bad_channel_id], &ChannelConfigUpdate {
15636 forwarding_fee_proportional_millionths: Some(new_fee),
15637 ..Default::default()
15638 }),
15639 Err(APIError::ChannelUnavailable { err: _ }),
15640 )
15641 );
15642 assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, current_fee);
15644 let events = nodes[0].node.get_and_clear_pending_msg_events();
15645 assert_eq!(events.len(), 0);
15646 }
15647
15648 #[test]
15649 fn test_payment_display() {
15650 let payment_id = PaymentId([42; 32]);
15651 assert_eq!(format!("{}", &payment_id), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
15652 let payment_hash = PaymentHash([42; 32]);
15653 assert_eq!(format!("{}", &payment_hash), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
15654 let payment_preimage = PaymentPreimage([42; 32]);
15655 assert_eq!(format!("{}", &payment_preimage), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
15656 }
15657
15658 #[test]
15659 fn test_trigger_lnd_force_close() {
15660 let chanmon_cfg = create_chanmon_cfgs(2);
15661 let node_cfg = create_node_cfgs(2, &chanmon_cfg);
15662 let user_config = test_default_channel_config();
15663 let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]);
15664 let nodes = create_network(2, &node_cfg, &node_chanmgr);
15665 let error_message = "Channel force-closed";
15666
15667 let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
15669 nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
15670 nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
15671 nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
15672 check_closed_broadcast(&nodes[0], 1, true);
15673 check_added_monitors(&nodes[0], 1);
15674 check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
15675 {
15676 let txn = nodes[0].tx_broadcaster.txn_broadcast();
15677 assert_eq!(txn.len(), 1);
15678 check_spends!(txn[0], funding_tx);
15679 }
15680
15681 nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init {
15685 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
15686 }, true).unwrap();
15687 nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
15688 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15689 }, false).unwrap();
15690 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
15691 let channel_reestablish = get_event_msg!(
15692 nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()
15693 );
15694 nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &channel_reestablish);
15695
15696 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
15700 assert_eq!(msg_events.len(), 2);
15701 if let MessageSendEvent::SendChannelReestablish { node_id, msg } = &msg_events[0] {
15702 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
15703 assert_eq!(msg.next_local_commitment_number, 0);
15704 assert_eq!(msg.next_remote_commitment_number, 0);
15705 nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &msg);
15706 } else { panic!() };
15707 check_closed_broadcast(&nodes[1], 1, true);
15708 check_added_monitors(&nodes[1], 1);
15709 let expected_close_reason = ClosureReason::ProcessingError {
15710 err: "Peer sent an invalid channel_reestablish to force close in a non-standard way".to_string()
15711 };
15712 check_closed_event!(nodes[1], 1, expected_close_reason, [nodes[0].node.get_our_node_id()], 100000);
15713 {
15714 let txn = nodes[1].tx_broadcaster.txn_broadcast();
15715 assert_eq!(txn.len(), 1);
15716 check_spends!(txn[0], funding_tx);
15717 }
15718 }
15719
15720 #[test]
15721 fn test_malformed_forward_htlcs_ser() {
15722 let chanmon_cfg = create_chanmon_cfgs(1);
15724 let node_cfg = create_node_cfgs(1, &chanmon_cfg);
15725 let persister;
15726 let chain_monitor;
15727 let chanmgrs = create_node_chanmgrs(1, &node_cfg, &[None]);
15728 let deserialized_chanmgr;
15729 let mut nodes = create_network(1, &node_cfg, &chanmgrs);
15730
15731 let dummy_failed_htlc = |htlc_id| {
15732 HTLCForwardInfo::FailHTLC { htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }, }
15733 };
15734 let dummy_malformed_htlc = |htlc_id| {
15735 HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code: 0x4000, sha256_of_onion: [0; 32] }
15736 };
15737
15738 let dummy_htlcs_1: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
15739 if htlc_id % 2 == 0 {
15740 dummy_failed_htlc(htlc_id)
15741 } else {
15742 dummy_malformed_htlc(htlc_id)
15743 }
15744 }).collect();
15745
15746 let dummy_htlcs_2: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
15747 if htlc_id % 2 == 1 {
15748 dummy_failed_htlc(htlc_id)
15749 } else {
15750 dummy_malformed_htlc(htlc_id)
15751 }
15752 }).collect();
15753
15754
15755 let (scid_1, scid_2) = (42, 43);
15756 let mut forward_htlcs = new_hash_map();
15757 forward_htlcs.insert(scid_1, dummy_htlcs_1.clone());
15758 forward_htlcs.insert(scid_2, dummy_htlcs_2.clone());
15759
15760 let mut chanmgr_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
15761 *chanmgr_fwd_htlcs = forward_htlcs.clone();
15762 core::mem::drop(chanmgr_fwd_htlcs);
15763
15764 reload_node!(nodes[0], nodes[0].node.encode(), &[], persister, chain_monitor, deserialized_chanmgr);
15765
15766 let mut deserialized_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
15767 for scid in [scid_1, scid_2].iter() {
15768 let deserialized_htlcs = deserialized_fwd_htlcs.remove(scid).unwrap();
15769 assert_eq!(forward_htlcs.remove(scid).unwrap(), deserialized_htlcs);
15770 }
15771 assert!(deserialized_fwd_htlcs.is_empty());
15772 core::mem::drop(deserialized_fwd_htlcs);
15773
15774 expect_pending_htlcs_forwardable!(nodes[0]);
15775 }
15776}
15777
15778#[cfg(ldk_bench)]
15779pub mod bench {
15780 use crate::chain::Listen;
15781 use crate::chain::chainmonitor::{ChainMonitor, Persist};
15782 use crate::sign::{KeysManager, InMemorySigner};
15783 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider};
15784 use crate::ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId, RecipientOnionFields, Retry};
15785 use crate::ln::functional_test_utils::*;
15786 use crate::ln::msgs::{ChannelMessageHandler, Init};
15787 use crate::routing::gossip::NetworkGraph;
15788 use crate::routing::router::{PaymentParameters, RouteParameters};
15789 use crate::util::test_utils;
15790 use crate::util::config::{UserConfig, MaxDustHTLCExposure};
15791
15792 use bitcoin::amount::Amount;
15793 use bitcoin::locktime::absolute::LockTime;
15794 use bitcoin::hashes::Hash;
15795 use bitcoin::hashes::sha256::Hash as Sha256;
15796 use bitcoin::{Transaction, TxOut};
15797 use bitcoin::transaction::Version;
15798
15799 use crate::sync::{Arc, Mutex, RwLock};
15800
15801 use criterion::Criterion;
15802
15803 type Manager<'a, P> = ChannelManager<
15804 &'a ChainMonitor<InMemorySigner, &'a test_utils::TestChainSource,
15805 &'a test_utils::TestBroadcaster, &'a test_utils::TestFeeEstimator,
15806 &'a test_utils::TestLogger, &'a P>,
15807 &'a test_utils::TestBroadcaster, &'a KeysManager, &'a KeysManager, &'a KeysManager,
15808 &'a test_utils::TestFeeEstimator, &'a test_utils::TestRouter<'a>,
15809 &'a test_utils::TestMessageRouter<'a>, &'a test_utils::TestLogger>;
15810
15811 struct ANodeHolder<'node_cfg, 'chan_mon_cfg: 'node_cfg, P: Persist<InMemorySigner>> {
15812 node: &'node_cfg Manager<'chan_mon_cfg, P>,
15813 }
15814 impl<'node_cfg, 'chan_mon_cfg: 'node_cfg, P: Persist<InMemorySigner>> NodeHolder for ANodeHolder<'node_cfg, 'chan_mon_cfg, P> {
15815 type CM = Manager<'chan_mon_cfg, P>;
15816 #[inline]
15817 fn node(&self) -> &Manager<'chan_mon_cfg, P> { self.node }
15818 #[inline]
15819 fn chain_monitor(&self) -> Option<&test_utils::TestChainMonitor> { None }
15820 }
15821
15822 pub fn bench_sends(bench: &mut Criterion) {
15823 bench_two_sends(bench, "bench_sends", test_utils::TestPersister::new(), test_utils::TestPersister::new());
15824 }
15825
15826 pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Criterion, bench_name: &str, persister_a: P, persister_b: P) {
15827 let network = bitcoin::Network::Testnet;
15831 let genesis_block = bitcoin::constants::genesis_block(network);
15832
15833 let tx_broadcaster = test_utils::TestBroadcaster::new(network);
15834 let fee_estimator = test_utils::TestFeeEstimator::new(253);
15835 let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
15836 let scorer = RwLock::new(test_utils::TestScorer::new());
15837 let entropy = test_utils::TestKeysInterface::new(&[0u8; 32], network);
15838 let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &logger_a, &scorer);
15839 let message_router = test_utils::TestMessageRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &entropy);
15840
15841 let mut config: UserConfig = Default::default();
15842 config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253);
15843 config.channel_handshake_config.minimum_depth = 1;
15844
15845 let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a);
15846 let seed_a = [1u8; 32];
15847 let keys_manager_a = KeysManager::new(&seed_a, 42, 42);
15848 let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &message_router, &logger_a, &keys_manager_a, &keys_manager_a, &keys_manager_a, config.clone(), ChainParameters {
15849 network,
15850 best_block: BestBlock::from_network(network),
15851 }, genesis_block.header.time);
15852 let node_a_holder = ANodeHolder { node: &node_a };
15853
15854 let logger_b = test_utils::TestLogger::with_id("node a".to_owned());
15855 let chain_monitor_b = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_b);
15856 let seed_b = [2u8; 32];
15857 let keys_manager_b = KeysManager::new(&seed_b, 42, 42);
15858 let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &message_router, &logger_b, &keys_manager_b, &keys_manager_b, &keys_manager_b, config.clone(), ChainParameters {
15859 network,
15860 best_block: BestBlock::from_network(network),
15861 }, genesis_block.header.time);
15862 let node_b_holder = ANodeHolder { node: &node_b };
15863
15864 node_a.peer_connected(node_b.get_our_node_id(), &Init {
15865 features: node_b.init_features(), networks: None, remote_network_address: None
15866 }, true).unwrap();
15867 node_b.peer_connected(node_a.get_our_node_id(), &Init {
15868 features: node_a.init_features(), networks: None, remote_network_address: None
15869 }, false).unwrap();
15870 node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None, None).unwrap();
15871 node_b.handle_open_channel(node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
15872 node_a.handle_accept_channel(node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
15873
15874 let tx;
15875 if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) {
15876 tx = Transaction { version: Version::TWO, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
15877 value: Amount::from_sat(8_000_000), script_pubkey: output_script,
15878 }]};
15879 node_a.funding_transaction_generated(temporary_channel_id, node_b.get_our_node_id(), tx.clone()).unwrap();
15880 } else { panic!(); }
15881
15882 node_b.handle_funding_created(node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingCreated, node_b.get_our_node_id()));
15883 let events_b = node_b.get_and_clear_pending_events();
15884 assert_eq!(events_b.len(), 1);
15885 match events_b[0] {
15886 Event::ChannelPending{ ref counterparty_node_id, .. } => {
15887 assert_eq!(*counterparty_node_id, node_a.get_our_node_id());
15888 },
15889 _ => panic!("Unexpected event"),
15890 }
15891
15892 node_a.handle_funding_signed(node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingSigned, node_a.get_our_node_id()));
15893 let events_a = node_a.get_and_clear_pending_events();
15894 assert_eq!(events_a.len(), 1);
15895 match events_a[0] {
15896 Event::ChannelPending{ ref counterparty_node_id, .. } => {
15897 assert_eq!(*counterparty_node_id, node_b.get_our_node_id());
15898 },
15899 _ => panic!("Unexpected event"),
15900 }
15901
15902 assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
15903
15904 let block = create_dummy_block(BestBlock::from_network(network).block_hash, 42, vec![tx]);
15905 Listen::block_connected(&node_a, &block, 1);
15906 Listen::block_connected(&node_b, &block, 1);
15907
15908 node_a.handle_channel_ready(node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendChannelReady, node_a.get_our_node_id()));
15909 let msg_events = node_a.get_and_clear_pending_msg_events();
15910 assert_eq!(msg_events.len(), 2);
15911 match msg_events[0] {
15912 MessageSendEvent::SendChannelReady { ref msg, .. } => {
15913 node_b.handle_channel_ready(node_a.get_our_node_id(), msg);
15914 get_event_msg!(node_b_holder, MessageSendEvent::SendChannelUpdate, node_a.get_our_node_id());
15915 },
15916 _ => panic!(),
15917 }
15918 match msg_events[1] {
15919 MessageSendEvent::SendChannelUpdate { .. } => {},
15920 _ => panic!(),
15921 }
15922
15923 let events_a = node_a.get_and_clear_pending_events();
15924 assert_eq!(events_a.len(), 1);
15925 match events_a[0] {
15926 Event::ChannelReady{ ref counterparty_node_id, .. } => {
15927 assert_eq!(*counterparty_node_id, node_b.get_our_node_id());
15928 },
15929 _ => panic!("Unexpected event"),
15930 }
15931
15932 let events_b = node_b.get_and_clear_pending_events();
15933 assert_eq!(events_b.len(), 1);
15934 match events_b[0] {
15935 Event::ChannelReady{ ref counterparty_node_id, .. } => {
15936 assert_eq!(*counterparty_node_id, node_a.get_our_node_id());
15937 },
15938 _ => panic!("Unexpected event"),
15939 }
15940
15941 let mut payment_count: u64 = 0;
15942 macro_rules! send_payment {
15943 ($node_a: expr, $node_b: expr) => {
15944 let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id(), TEST_FINAL_CLTV)
15945 .with_bolt11_features($node_b.bolt11_invoice_features()).unwrap();
15946 let mut payment_preimage = PaymentPreimage([0; 32]);
15947 payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes());
15948 payment_count += 1;
15949 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
15950 let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
15951
15952 $node_a.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
15953 PaymentId(payment_hash.0),
15954 RouteParameters::from_payment_params_and_value(payment_params, 10_000),
15955 Retry::Attempts(0)).unwrap();
15956 let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
15957 $node_b.handle_update_add_htlc($node_a.get_our_node_id(), &payment_event.msgs[0]);
15958 $node_b.handle_commitment_signed($node_a.get_our_node_id(), &payment_event.commitment_msg);
15959 let (raa, cs) = get_revoke_commit_msgs(&ANodeHolder { node: &$node_b }, &$node_a.get_our_node_id());
15960 $node_a.handle_revoke_and_ack($node_b.get_our_node_id(), &raa);
15961 $node_a.handle_commitment_signed($node_b.get_our_node_id(), &cs);
15962 $node_b.handle_revoke_and_ack($node_a.get_our_node_id(), &get_event_msg!(ANodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id()));
15963
15964 expect_pending_htlcs_forwardable!(ANodeHolder { node: &$node_b });
15965 expect_payment_claimable!(ANodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000);
15966 $node_b.claim_funds(payment_preimage);
15967 expect_payment_claimed!(ANodeHolder { node: &$node_b }, payment_hash, 10_000);
15968
15969 match $node_b.get_and_clear_pending_msg_events().pop().unwrap() {
15970 MessageSendEvent::UpdateHTLCs { node_id, updates } => {
15971 assert_eq!(node_id, $node_a.get_our_node_id());
15972 $node_a.handle_update_fulfill_htlc($node_b.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
15973 $node_a.handle_commitment_signed($node_b.get_our_node_id(), &updates.commitment_signed);
15974 },
15975 _ => panic!("Failed to generate claim event"),
15976 }
15977
15978 let (raa, cs) = get_revoke_commit_msgs(&ANodeHolder { node: &$node_a }, &$node_b.get_our_node_id());
15979 $node_b.handle_revoke_and_ack($node_a.get_our_node_id(), &raa);
15980 $node_b.handle_commitment_signed($node_a.get_our_node_id(), &cs);
15981 $node_a.handle_revoke_and_ack($node_b.get_our_node_id(), &get_event_msg!(ANodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id()));
15982
15983 expect_payment_sent!(ANodeHolder { node: &$node_a }, payment_preimage);
15984 }
15985 }
15986
15987 bench.bench_function(bench_name, |b| b.iter(|| {
15988 send_payment!(node_a, node_b);
15989 send_payment!(node_b, node_a);
15990 }));
15991 }
15992}