lightning/chain/
onchaintx.rs

1// This file is Copyright its original authors, visible in version control
2// history.
3//
4// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7// You may not use this file except in accordance with one or both of these
8// licenses.
9
10//! The logic to build claims and bump in-flight transactions until confirmations.
11//!
12//! OnchainTxHandler objects are fully-part of ChannelMonitor and encapsulates all
13//! building, tracking, bumping and notifications functions.
14
15use bitcoin::amount::Amount;
16use bitcoin::locktime::absolute::LockTime;
17use bitcoin::transaction::Transaction;
18use bitcoin::transaction::OutPoint as BitcoinOutPoint;
19use bitcoin::script::{Script, ScriptBuf};
20use bitcoin::hashes::{Hash, HashEngine};
21use bitcoin::hashes::sha256::Hash as Sha256;
22use bitcoin::hash_types::{Txid, BlockHash};
23use bitcoin::secp256k1::PublicKey;
24use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
25use bitcoin::secp256k1;
26
27use crate::chain::chaininterface::{ConfirmationTarget, compute_feerate_sat_per_1000_weight};
28use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ChannelSigner, EntropySource, SignerProvider, ecdsa::EcdsaChannelSigner};
29use crate::ln::msgs::DecodeError;
30use crate::types::payment::PaymentPreimage;
31use crate::ln::chan_utils::{self, ChannelTransactionParameters, HTLCOutputInCommitment, HolderCommitmentTransaction};
32use crate::chain::ClaimId;
33use crate::chain::chaininterface::{FeeEstimator, BroadcasterInterface, LowerBoundedFeeEstimator};
34use crate::chain::channelmonitor::ANTI_REORG_DELAY;
35use crate::chain::package::{PackageSolvingData, PackageTemplate};
36use crate::chain::transaction::MaybeSignedTransaction;
37use crate::util::logger::Logger;
38use crate::util::ser::{Readable, ReadableArgs, MaybeReadable, UpgradableRequired, Writer, Writeable};
39
40use crate::io;
41use crate::prelude::*;
42use alloc::collections::BTreeMap;
43use core::cmp;
44use core::ops::Deref;
45use core::mem::replace;
46use core::mem::swap;
47use crate::types::features::ChannelTypeFeatures;
48
49const MAX_ALLOC_SIZE: usize = 64*1024;
50
51/// An entry for an [`OnchainEvent`], stating the block height when the event was observed and the
52/// transaction causing it.
53///
54/// Used to determine when the on-chain event can be considered safe from a chain reorganization.
55#[derive(Clone, PartialEq, Eq)]
56struct OnchainEventEntry {
57	txid: Txid,
58	height: u32,
59	block_hash: Option<BlockHash>, // Added as optional, will be filled in for any entry generated on 0.0.113 or after
60	event: OnchainEvent,
61}
62
63impl OnchainEventEntry {
64	fn confirmation_threshold(&self) -> u32 {
65		self.height + ANTI_REORG_DELAY - 1
66	}
67
68	fn has_reached_confirmation_threshold(&self, height: u32) -> bool {
69		height >= self.confirmation_threshold()
70	}
71}
72
73/// Events for claims the [`OnchainTxHandler`] has generated. Once the events are considered safe
74/// from a chain reorg, the [`OnchainTxHandler`] will act accordingly.
75#[derive(Clone, PartialEq, Eq)]
76enum OnchainEvent {
77	/// A pending request has been claimed by a transaction spending the exact same set of outpoints
78	/// as the request. This claim can either be ours or from the counterparty. Once the claiming
79	/// transaction has met [`ANTI_REORG_DELAY`] confirmations, we consider it final and remove the
80	/// pending request.
81	Claim {
82		claim_id: ClaimId,
83	},
84	/// The counterparty has claimed an outpoint from one of our pending requests through a
85	/// different transaction than ours. If our transaction was attempting to claim multiple
86	/// outputs, we need to drop the outpoint claimed by the counterparty and regenerate a new claim
87	/// transaction for ourselves. We keep tracking, separately, the outpoint claimed by the
88	/// counterparty up to [`ANTI_REORG_DELAY`] confirmations to ensure we attempt to re-claim it
89	/// if the counterparty's claim is reorged from the chain.
90	ContentiousOutpoint {
91		package: PackageTemplate,
92	}
93}
94
95impl Writeable for OnchainEventEntry {
96	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
97		write_tlv_fields!(writer, {
98			(0, self.txid, required),
99			(1, self.block_hash, option),
100			(2, self.height, required),
101			(4, self.event, required),
102		});
103		Ok(())
104	}
105}
106
107impl MaybeReadable for OnchainEventEntry {
108	fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
109		let mut txid = Txid::all_zeros();
110		let mut height = 0;
111		let mut block_hash = None;
112		let mut event = UpgradableRequired(None);
113		read_tlv_fields!(reader, {
114			(0, txid, required),
115			(1, block_hash, option),
116			(2, height, required),
117			(4, event, upgradable_required),
118		});
119		Ok(Some(Self { txid, height, block_hash, event: _init_tlv_based_struct_field!(event, upgradable_required) }))
120	}
121}
122
123impl_writeable_tlv_based_enum_upgradable!(OnchainEvent,
124	(0, Claim) => {
125		(0, claim_id, required),
126	},
127	(1, ContentiousOutpoint) => {
128		(0, package, required),
129	},
130);
131
132impl Readable for Option<Vec<Option<(usize, Signature)>>> {
133	fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
134		match Readable::read(reader)? {
135			0u8 => Ok(None),
136			1u8 => {
137				let vlen: u64 = Readable::read(reader)?;
138				let mut ret = Vec::with_capacity(cmp::min(vlen as usize, MAX_ALLOC_SIZE / ::core::mem::size_of::<Option<(usize, Signature)>>()));
139				for _ in 0..vlen {
140					ret.push(match Readable::read(reader)? {
141						0u8 => None,
142						1u8 => Some((<u64 as Readable>::read(reader)? as usize, Readable::read(reader)?)),
143						_ => return Err(DecodeError::InvalidValue)
144					});
145				}
146				Ok(Some(ret))
147			},
148			_ => Err(DecodeError::InvalidValue),
149		}
150	}
151}
152
153impl Writeable for Option<Vec<Option<(usize, Signature)>>> {
154	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
155		match self {
156			&Some(ref vec) => {
157				1u8.write(writer)?;
158				(vec.len() as u64).write(writer)?;
159				for opt in vec.iter() {
160					match opt {
161						&Some((ref idx, ref sig)) => {
162							1u8.write(writer)?;
163							(*idx as u64).write(writer)?;
164							sig.write(writer)?;
165						},
166						&None => 0u8.write(writer)?,
167					}
168				}
169			},
170			&None => 0u8.write(writer)?,
171		}
172		Ok(())
173	}
174}
175
176/// The claim commonly referred to as the pre-signed second-stage HTLC transaction.
177#[derive(Clone, PartialEq, Eq)]
178pub(crate) struct ExternalHTLCClaim {
179	pub(crate) commitment_txid: Txid,
180	pub(crate) per_commitment_number: u64,
181	pub(crate) htlc: HTLCOutputInCommitment,
182	pub(crate) preimage: Option<PaymentPreimage>,
183	pub(crate) counterparty_sig: Signature,
184	pub(crate) per_commitment_point: PublicKey,
185}
186
187// Represents the different types of claims for which events are yielded externally to satisfy said
188// claims.
189#[derive(Clone, PartialEq, Eq)]
190pub(crate) enum ClaimEvent {
191	/// Event yielded to signal that the commitment transaction fee must be bumped to claim any
192	/// encumbered funds and proceed to HTLC resolution, if any HTLCs exist.
193	BumpCommitment {
194		package_target_feerate_sat_per_1000_weight: u32,
195		commitment_tx: Transaction,
196		anchor_output_idx: u32,
197	},
198	/// Event yielded to signal that the commitment transaction has confirmed and its HTLCs must be
199	/// resolved by broadcasting a transaction with sufficient fee to claim them.
200	BumpHTLC {
201		target_feerate_sat_per_1000_weight: u32,
202		htlcs: Vec<ExternalHTLCClaim>,
203		tx_lock_time: LockTime,
204	},
205}
206
207/// Represents the different ways an output can be claimed (i.e., spent to an address under our
208/// control) onchain.
209pub(crate) enum OnchainClaim {
210	/// A finalized transaction pending confirmation spending the output to claim.
211	Tx(MaybeSignedTransaction),
212	/// An event yielded externally to signal additional inputs must be added to a transaction
213	/// pending confirmation spending the output to claim.
214	Event(ClaimEvent),
215}
216
217/// Represents the different feerate strategies a pending request can use when generating a claim.
218#[derive(Debug)]
219pub(crate) enum FeerateStrategy {
220	/// We must reuse the most recently used feerate, if any.
221	RetryPrevious,
222	/// We must pick the highest between the most recently used and the current feerate estimate.
223	HighestOfPreviousOrNew,
224	/// We must force a bump of the most recently used feerate, either by using the current feerate
225	/// estimate if it's higher, or manually bumping.
226	ForceBump,
227}
228
229/// OnchainTxHandler receives claiming requests, aggregates them if it's sound, broadcast and
230/// do RBF bumping if possible.
231#[derive(Clone)]
232pub struct OnchainTxHandler<ChannelSigner: EcdsaChannelSigner> {
233	channel_value_satoshis: u64,
234	channel_keys_id: [u8; 32],
235	destination_script: ScriptBuf,
236	holder_commitment: HolderCommitmentTransaction,
237	prev_holder_commitment: Option<HolderCommitmentTransaction>,
238
239	pub(super) signer: ChannelSigner,
240	pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
241
242	// Used to track claiming requests. If claim tx doesn't confirm before height timer expiration we need to bump
243	// it (RBF or CPFP). If an input has been part of an aggregate tx at first claim try, we need to keep it within
244	// another bumped aggregate tx to comply with RBF rules. We may have multiple claiming txn in the flight for the
245	// same set of outpoints. One of the outpoints may be spent by a transaction not issued by us. That's why at
246	// block connection we scan all inputs and if any of them is among a set of a claiming request we test for set
247	// equality between spending transaction and claim request. If true, it means transaction was one our claiming one
248	// after a security delay of 6 blocks we remove pending claim request. If false, it means transaction wasn't and
249	// we need to regenerate new claim request with reduced set of still-claimable outpoints.
250	// Key is identifier of the pending claim request, i.e the txid of the initial claiming transaction generated by
251	// us and is immutable until all outpoint of the claimable set are post-anti-reorg-delay solved.
252	// Entry is cache of elements need to generate a bumped claiming transaction (see ClaimTxBumpMaterial)
253	#[cfg(test)] // Used in functional_test to verify sanitization
254	pub(crate) pending_claim_requests: HashMap<ClaimId, PackageTemplate>,
255	#[cfg(not(test))]
256	pending_claim_requests: HashMap<ClaimId, PackageTemplate>,
257
258	// Used to track external events that need to be forwarded to the `ChainMonitor`. This `Vec`
259	// essentially acts as an insertion-ordered `HashMap` – there should only ever be one occurrence
260	// of a `ClaimId`, which tracks its latest `ClaimEvent`, i.e., if a pending claim exists, and
261	// a new block has been connected, resulting in a new claim, the previous will be replaced with
262	// the new.
263	//
264	// These external events may be generated in the following cases:
265	//	- A channel has been force closed by broadcasting the holder's latest commitment transaction
266	//	- A block being connected/disconnected
267	//	- Learning the preimage for an HTLC we can claim onchain
268	pending_claim_events: Vec<(ClaimId, ClaimEvent)>,
269
270	// Used to link outpoints claimed in a connected block to a pending claim request. The keys
271	// represent the outpoints that our `ChannelMonitor` has detected we have keys/scripts to
272	// claim. The values track the pending claim request identifier and the initial confirmation
273	// block height, and are immutable until the outpoint has enough confirmations to meet our
274	// [`ANTI_REORG_DELAY`]. The initial confirmation block height is used to remove the entry if
275	// the block gets disconnected.
276	#[cfg(test)] // Used in functional_test to verify sanitization
277	pub claimable_outpoints: HashMap<BitcoinOutPoint, (ClaimId, u32)>,
278	#[cfg(not(test))]
279	claimable_outpoints: HashMap<BitcoinOutPoint, (ClaimId, u32)>,
280
281	#[cfg(any(test, feature = "_test_utils"))]
282	pub(crate) locktimed_packages: BTreeMap<u32, Vec<PackageTemplate>>,
283	#[cfg(not(any(test, feature = "_test_utils")))]
284	locktimed_packages: BTreeMap<u32, Vec<PackageTemplate>>,
285
286	onchain_events_awaiting_threshold_conf: Vec<OnchainEventEntry>,
287
288	pub(super) secp_ctx: Secp256k1<secp256k1::All>,
289}
290
291impl<ChannelSigner: EcdsaChannelSigner> PartialEq for OnchainTxHandler<ChannelSigner> {
292	fn eq(&self, other: &Self) -> bool {
293		// `signer`, `secp_ctx`, and `pending_claim_events` are excluded on purpose.
294		self.channel_value_satoshis == other.channel_value_satoshis &&
295			self.channel_keys_id == other.channel_keys_id &&
296			self.destination_script == other.destination_script &&
297			self.holder_commitment == other.holder_commitment &&
298			self.prev_holder_commitment == other.prev_holder_commitment &&
299			self.channel_transaction_parameters == other.channel_transaction_parameters &&
300			self.pending_claim_requests == other.pending_claim_requests &&
301			self.claimable_outpoints == other.claimable_outpoints &&
302			self.locktimed_packages == other.locktimed_packages &&
303			self.onchain_events_awaiting_threshold_conf == other.onchain_events_awaiting_threshold_conf
304	}
305}
306
307const SERIALIZATION_VERSION: u8 = 1;
308const MIN_SERIALIZATION_VERSION: u8 = 1;
309
310impl<ChannelSigner: EcdsaChannelSigner> OnchainTxHandler<ChannelSigner> {
311	pub(crate) fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
312		write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
313
314		self.destination_script.write(writer)?;
315		self.holder_commitment.write(writer)?;
316		None::<Option<Vec<Option<(usize, Signature)>>>>.write(writer)?; // holder_htlc_sigs
317		self.prev_holder_commitment.write(writer)?;
318		None::<Option<Vec<Option<(usize, Signature)>>>>.write(writer)?; // prev_holder_htlc_sigs
319
320		self.channel_transaction_parameters.write(writer)?;
321
322		// Write a zero-length signer. The data is no longer deserialized as of version 0.0.113 and
323		// downgrades before version 0.0.113 are no longer supported as of version 0.0.119.
324		0u32.write(writer)?;
325
326		writer.write_all(&(self.pending_claim_requests.len() as u64).to_be_bytes())?;
327		for (ref ancestor_claim_txid, request) in self.pending_claim_requests.iter() {
328			ancestor_claim_txid.write(writer)?;
329			request.write(writer)?;
330		}
331
332		writer.write_all(&(self.claimable_outpoints.len() as u64).to_be_bytes())?;
333		for (ref outp, ref claim_and_height) in self.claimable_outpoints.iter() {
334			outp.write(writer)?;
335			claim_and_height.0.write(writer)?;
336			claim_and_height.1.write(writer)?;
337		}
338
339		writer.write_all(&(self.locktimed_packages.len() as u64).to_be_bytes())?;
340		for (ref locktime, ref packages) in self.locktimed_packages.iter() {
341			locktime.write(writer)?;
342			writer.write_all(&(packages.len() as u64).to_be_bytes())?;
343			for ref package in packages.iter() {
344				package.write(writer)?;
345			}
346		}
347
348		writer.write_all(&(self.onchain_events_awaiting_threshold_conf.len() as u64).to_be_bytes())?;
349		for ref entry in self.onchain_events_awaiting_threshold_conf.iter() {
350			entry.write(writer)?;
351		}
352
353		write_tlv_fields!(writer, {});
354		Ok(())
355	}
356}
357
358impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP, u64, [u8; 32])> for OnchainTxHandler<SP::EcdsaSigner> {
359	fn read<R: io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u64, [u8; 32])) -> Result<Self, DecodeError> {
360		let entropy_source = args.0;
361		let signer_provider = args.1;
362		let channel_value_satoshis = args.2;
363		let channel_keys_id = args.3;
364
365		let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
366
367		let destination_script = Readable::read(reader)?;
368
369		let holder_commitment = Readable::read(reader)?;
370		let _holder_htlc_sigs: Option<Vec<Option<(usize, Signature)>>> = Readable::read(reader)?;
371		let prev_holder_commitment = Readable::read(reader)?;
372		let _prev_holder_htlc_sigs: Option<Vec<Option<(usize, Signature)>>> = Readable::read(reader)?;
373
374		let channel_parameters = Readable::read(reader)?;
375
376		// Read the serialized signer bytes, but don't deserialize them, as we'll obtain our signer
377		// by re-deriving the private key material.
378		let keys_len: u32 = Readable::read(reader)?;
379		let mut bytes_read = 0;
380		while bytes_read != keys_len as usize {
381			// Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
382			let mut data = [0; 1024];
383			let bytes_to_read = cmp::min(1024, keys_len as usize - bytes_read);
384			let read_slice = &mut data[0..bytes_to_read];
385			reader.read_exact(read_slice)?;
386			bytes_read += bytes_to_read;
387		}
388
389		let mut signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
390		signer.provide_channel_parameters(&channel_parameters);
391
392		let pending_claim_requests_len: u64 = Readable::read(reader)?;
393		let mut pending_claim_requests = hash_map_with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
394		for _ in 0..pending_claim_requests_len {
395			pending_claim_requests.insert(Readable::read(reader)?, Readable::read(reader)?);
396		}
397
398		let claimable_outpoints_len: u64 = Readable::read(reader)?;
399		let mut claimable_outpoints = hash_map_with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
400		for _ in 0..claimable_outpoints_len {
401			let outpoint = Readable::read(reader)?;
402			let ancestor_claim_txid = Readable::read(reader)?;
403			let height = Readable::read(reader)?;
404			claimable_outpoints.insert(outpoint, (ancestor_claim_txid, height));
405		}
406
407		let locktimed_packages_len: u64 = Readable::read(reader)?;
408		let mut locktimed_packages = BTreeMap::new();
409		for _ in 0..locktimed_packages_len {
410			let locktime = Readable::read(reader)?;
411			let packages_len: u64 = Readable::read(reader)?;
412			let mut packages = Vec::with_capacity(cmp::min(packages_len as usize, MAX_ALLOC_SIZE / core::mem::size_of::<PackageTemplate>()));
413			for _ in 0..packages_len {
414				packages.push(Readable::read(reader)?);
415			}
416			locktimed_packages.insert(locktime, packages);
417		}
418
419		let waiting_threshold_conf_len: u64 = Readable::read(reader)?;
420		let mut onchain_events_awaiting_threshold_conf = Vec::with_capacity(cmp::min(waiting_threshold_conf_len as usize, MAX_ALLOC_SIZE / 128));
421		for _ in 0..waiting_threshold_conf_len {
422			if let Some(val) = MaybeReadable::read(reader)? {
423				onchain_events_awaiting_threshold_conf.push(val);
424			}
425		}
426
427		read_tlv_fields!(reader, {});
428
429		let mut secp_ctx = Secp256k1::new();
430		secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
431
432		Ok(OnchainTxHandler {
433			channel_value_satoshis,
434			channel_keys_id,
435			destination_script,
436			holder_commitment,
437			prev_holder_commitment,
438			signer,
439			channel_transaction_parameters: channel_parameters,
440			claimable_outpoints,
441			locktimed_packages,
442			pending_claim_requests,
443			onchain_events_awaiting_threshold_conf,
444			pending_claim_events: Vec::new(),
445			secp_ctx,
446		})
447	}
448}
449
450impl<ChannelSigner: EcdsaChannelSigner> OnchainTxHandler<ChannelSigner> {
451	pub(crate) fn new(
452		channel_value_satoshis: u64, channel_keys_id: [u8; 32], destination_script: ScriptBuf,
453		signer: ChannelSigner, channel_parameters: ChannelTransactionParameters,
454		holder_commitment: HolderCommitmentTransaction, secp_ctx: Secp256k1<secp256k1::All>
455	) -> Self {
456		OnchainTxHandler {
457			channel_value_satoshis,
458			channel_keys_id,
459			destination_script,
460			holder_commitment,
461			prev_holder_commitment: None,
462			signer,
463			channel_transaction_parameters: channel_parameters,
464			pending_claim_requests: new_hash_map(),
465			claimable_outpoints: new_hash_map(),
466			locktimed_packages: BTreeMap::new(),
467			onchain_events_awaiting_threshold_conf: Vec::new(),
468			pending_claim_events: Vec::new(),
469			secp_ctx,
470		}
471	}
472
473	pub(crate) fn get_prev_holder_commitment_to_self_value(&self) -> Option<u64> {
474		self.prev_holder_commitment.as_ref().map(|commitment| commitment.to_broadcaster_value_sat())
475	}
476
477	pub(crate) fn get_cur_holder_commitment_to_self_value(&self) -> u64 {
478		self.holder_commitment.to_broadcaster_value_sat()
479	}
480
481	pub(crate) fn get_and_clear_pending_claim_events(&mut self) -> Vec<(ClaimId, ClaimEvent)> {
482		let mut events = Vec::new();
483		swap(&mut events, &mut self.pending_claim_events);
484		events
485	}
486
487	/// Triggers rebroadcasts/fee-bumps of pending claims from a force-closed channel. This is
488	/// crucial in preventing certain classes of pinning attacks, detecting substantial mempool
489	/// feerate changes between blocks, and ensuring reliability if broadcasting fails. We recommend
490	/// invoking this every 30 seconds, or lower if running in an environment with spotty
491	/// connections, like on mobile.
492	pub(super) fn rebroadcast_pending_claims<B: Deref, F: Deref, L: Logger>(
493		&mut self, current_height: u32, feerate_strategy: FeerateStrategy, broadcaster: &B,
494		conf_target: ConfirmationTarget, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L,
495	)
496	where
497		B::Target: BroadcasterInterface,
498		F::Target: FeeEstimator,
499	{
500		let mut bump_requests = Vec::with_capacity(self.pending_claim_requests.len());
501		for (claim_id, request) in self.pending_claim_requests.iter() {
502			let inputs = request.outpoints();
503			log_info!(logger, "Triggering rebroadcast/fee-bump for request with inputs {:?}", inputs);
504			bump_requests.push((*claim_id, request.clone()));
505		}
506		for (claim_id, request) in bump_requests {
507			self.generate_claim(current_height, &request, &feerate_strategy, conf_target, fee_estimator, logger)
508				.map(|(_, new_feerate, claim)| {
509					let mut feerate_was_bumped = false;
510					if let Some(mut_request) = self.pending_claim_requests.get_mut(&claim_id) {
511						feerate_was_bumped = new_feerate > request.previous_feerate();
512						mut_request.set_feerate(new_feerate);
513					}
514					match claim {
515						OnchainClaim::Tx(tx) => {
516							if tx.is_fully_signed() {
517								let log_start = if feerate_was_bumped { "Broadcasting RBF-bumped" } else { "Rebroadcasting" };
518								log_info!(logger, "{} onchain {}", log_start, log_tx!(tx.0));
519								broadcaster.broadcast_transactions(&[&tx.0]);
520							} else {
521								log_info!(logger, "Waiting for signature of unsigned onchain transaction {}", tx.0.compute_txid());
522							}
523						},
524						OnchainClaim::Event(event) => {
525							let log_start = if feerate_was_bumped { "Yielding fee-bumped" } else { "Replaying" };
526							log_info!(logger, "{} onchain event to spend inputs {:?}", log_start,
527								request.outpoints());
528							#[cfg(debug_assertions)] {
529								debug_assert!(request.requires_external_funding());
530								let num_existing = self.pending_claim_events.iter()
531									.filter(|entry| entry.0 == claim_id).count();
532								assert!(num_existing == 0 || num_existing == 1);
533							}
534							self.pending_claim_events.retain(|event| event.0 != claim_id);
535							self.pending_claim_events.push((claim_id, event));
536						}
537					}
538				});
539		}
540	}
541
542	/// Returns true if we are currently tracking any pending claim requests that are not fully
543	/// confirmed yet.
544	pub(super) fn has_pending_claims(&self) -> bool
545	{
546		self.pending_claim_requests.len() != 0
547	}
548
549	/// Lightning security model (i.e being able to redeem/timeout HTLC or penalize counterparty
550	/// onchain) lays on the assumption of claim transactions getting confirmed before timelock
551	/// expiration (CSV or CLTV following cases). In case of high-fee spikes, claim tx may get stuck
552	/// in the mempool, so you need to bump its feerate quickly using Replace-By-Fee or
553	/// Child-Pay-For-Parent.
554	///
555	/// Panics if there are signing errors, because signing operations in reaction to on-chain
556	/// events are not expected to fail, and if they do, we may lose funds.
557	fn generate_claim<F: Deref, L: Logger>(
558		&mut self, cur_height: u32, cached_request: &PackageTemplate, feerate_strategy: &FeerateStrategy,
559		conf_target: ConfirmationTarget, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L,
560	) -> Option<(u32, u64, OnchainClaim)>
561	where F::Target: FeeEstimator,
562	{
563		let request_outpoints = cached_request.outpoints();
564		if request_outpoints.is_empty() {
565			// Don't prune pending claiming request yet, we may have to resurrect HTLCs. Untractable
566			// packages cannot be aggregated and will never be split, so we cannot end up with an
567			// empty claim.
568			debug_assert!(cached_request.is_malleable());
569			return None;
570		}
571		// If we've seen transaction inclusion in the chain for all outpoints in our request, we
572		// don't need to continue generating more claims. We'll keep tracking the request to fully
573		// remove it once it reaches the confirmation threshold, or to generate a new claim if the
574		// transaction is reorged out.
575		let mut all_inputs_have_confirmed_spend = true;
576		for outpoint in request_outpoints.iter() {
577			if let Some((request_claim_id, _)) = self.claimable_outpoints.get(*outpoint) {
578				// We check for outpoint spends within claims individually rather than as a set
579				// since requests can have outpoints split off.
580				if !self.onchain_events_awaiting_threshold_conf.iter()
581					.any(|event_entry| if let OnchainEvent::Claim { claim_id } = event_entry.event {
582						*request_claim_id == claim_id
583					} else {
584						// The onchain event is not a claim, keep seeking until we find one.
585						false
586					})
587				{
588					// Either we had no `OnchainEvent::Claim`, or we did but none matched the
589					// outpoint's registered spend.
590					all_inputs_have_confirmed_spend = false;
591				}
592			} else {
593				// The request's outpoint spend does not exist yet.
594				all_inputs_have_confirmed_spend = false;
595			}
596		}
597		if all_inputs_have_confirmed_spend {
598			return None;
599		}
600
601		// Compute new height timer to decide when we need to regenerate a new bumped version of the claim tx (if we
602		// didn't receive confirmation of it before, or not enough reorg-safe depth on top of it).
603		let new_timer = cached_request.get_height_timer(cur_height);
604		if cached_request.is_malleable() {
605			if cached_request.requires_external_funding() {
606				let target_feerate_sat_per_1000_weight = cached_request.compute_package_feerate(
607					fee_estimator, conf_target, feerate_strategy,
608				);
609				if let Some(htlcs) = cached_request.construct_malleable_package_with_external_funding(self) {
610					return Some((
611						new_timer,
612						target_feerate_sat_per_1000_weight as u64,
613						OnchainClaim::Event(ClaimEvent::BumpHTLC {
614							target_feerate_sat_per_1000_weight,
615							htlcs,
616							tx_lock_time: LockTime::from_consensus(cached_request.package_locktime(cur_height)),
617						}),
618					));
619				} else {
620					return None;
621				}
622			}
623
624			let predicted_weight = cached_request.package_weight(&self.destination_script);
625			if let Some((output_value, new_feerate)) = cached_request.compute_package_output(
626				predicted_weight, self.destination_script.minimal_non_dust().to_sat(),
627				feerate_strategy, conf_target, fee_estimator, logger,
628			) {
629				assert!(new_feerate != 0);
630
631				let transaction = cached_request.maybe_finalize_malleable_package(
632					cur_height, self, Amount::from_sat(output_value), self.destination_script.clone(), logger
633				).unwrap();
634				assert!(predicted_weight >= transaction.0.weight().to_wu());
635				return Some((new_timer, new_feerate, OnchainClaim::Tx(transaction)));
636			}
637		} else {
638			// Untractable packages cannot have their fees bumped through Replace-By-Fee. Some
639			// packages may support fee bumping through Child-Pays-For-Parent, indicated by those
640			// which require external funding.
641			let mut inputs = cached_request.inputs();
642			debug_assert_eq!(inputs.len(), 1);
643			let tx = match cached_request.maybe_finalize_untractable_package(self, logger) {
644				Some(tx) => tx,
645				None => return None,
646			};
647			if !cached_request.requires_external_funding() {
648				return Some((new_timer, 0, OnchainClaim::Tx(tx)));
649			}
650			return inputs.find_map(|input| match input {
651				// Commitment inputs with anchors support are the only untractable inputs supported
652				// thus far that require external funding.
653				PackageSolvingData::HolderFundingOutput(output) => {
654					debug_assert_eq!(tx.0.compute_txid(), self.holder_commitment.trust().txid(),
655						"Holder commitment transaction mismatch");
656
657					let package_target_feerate_sat_per_1000_weight = cached_request
658						.compute_package_feerate(fee_estimator, conf_target, feerate_strategy);
659					if let Some(input_amount_sat) = output.funding_amount {
660						let fee_sat = input_amount_sat - tx.0.output.iter().map(|output| output.value.to_sat()).sum::<u64>();
661						let commitment_tx_feerate_sat_per_1000_weight =
662							compute_feerate_sat_per_1000_weight(fee_sat, tx.0.weight().to_wu());
663						if commitment_tx_feerate_sat_per_1000_weight >= package_target_feerate_sat_per_1000_weight {
664							log_debug!(logger, "Pre-signed commitment {} already has feerate {} sat/kW above required {} sat/kW",
665								tx.0.compute_txid(), commitment_tx_feerate_sat_per_1000_weight,
666								package_target_feerate_sat_per_1000_weight);
667							return Some((new_timer, 0, OnchainClaim::Tx(tx.clone())));
668						}
669					}
670
671					// We'll locate an anchor output we can spend within the commitment transaction.
672					let funding_pubkey = &self.channel_transaction_parameters.holder_pubkeys.funding_pubkey;
673					match chan_utils::get_anchor_output(&tx.0, funding_pubkey) {
674						// An anchor output was found, so we should yield a funding event externally.
675						Some((idx, _)) => {
676							// TODO: Use a lower confirmation target when both our and the
677							// counterparty's latest commitment don't have any HTLCs present.
678							Some((
679								new_timer,
680								package_target_feerate_sat_per_1000_weight as u64,
681								OnchainClaim::Event(ClaimEvent::BumpCommitment {
682									package_target_feerate_sat_per_1000_weight,
683									commitment_tx: tx.0.clone(),
684									anchor_output_idx: idx,
685								}),
686							))
687						},
688						// An anchor output was not found. There's nothing we can do other than
689						// attempt to broadcast the transaction with its current fee rate and hope
690						// it confirms. This is essentially the same behavior as a commitment
691						// transaction without anchor outputs.
692						None => Some((new_timer, 0, OnchainClaim::Tx(tx.clone()))),
693					}
694				},
695				_ => {
696					debug_assert!(false, "Only HolderFundingOutput inputs should be untractable and require external funding");
697					None
698				},
699			})
700		}
701		None
702	}
703
704	pub fn abandon_claim(&mut self, outpoint: &BitcoinOutPoint) {
705		let claim_id = self.claimable_outpoints.get(outpoint).map(|(claim_id, _)| *claim_id)
706			.or_else(|| {
707				self.pending_claim_requests.iter()
708					.find(|(_, claim)| claim.outpoints().iter().any(|claim_outpoint| *claim_outpoint == outpoint))
709					.map(|(claim_id, _)| *claim_id)
710			});
711		if let Some(claim_id) = claim_id {
712			if let Some(claim) = self.pending_claim_requests.remove(&claim_id) {
713				for outpoint in claim.outpoints() {
714					self.claimable_outpoints.remove(outpoint);
715				}
716			}
717		} else {
718			self.locktimed_packages.values_mut().for_each(|claims|
719				claims.retain(|claim| !claim.outpoints().iter().any(|claim_outpoint| *claim_outpoint == outpoint)));
720		}
721	}
722
723	/// Upon channelmonitor.block_connected(..) or upon provision of a preimage on the forward link
724	/// for this channel, provide new relevant on-chain transactions and/or new claim requests.
725	/// Together with `update_claims_view_from_matched_txn` this used to be named
726	/// `block_connected`, but it is now also used for claiming an HTLC output if we receive a
727	/// preimage after force-close.
728	///
729	/// `conf_height` represents the height at which the request was generated. This
730	/// does not need to equal the current blockchain tip height, which should be provided via
731	/// `cur_height`, however it must never be higher than `cur_height`.
732	pub(super) fn update_claims_view_from_requests<B: Deref, F: Deref, L: Logger>(
733		&mut self, mut requests: Vec<PackageTemplate>, conf_height: u32, cur_height: u32,
734		broadcaster: &B, conf_target: ConfirmationTarget,
735		fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
736	) where
737		B::Target: BroadcasterInterface,
738		F::Target: FeeEstimator,
739	{
740		if !requests.is_empty() {
741			log_debug!(logger, "Updating claims view at height {} with {} claim requests", cur_height, requests.len());
742		}
743
744		// First drop any duplicate claims.
745		requests.retain(|req| {
746			debug_assert_eq!(
747				req.outpoints().len(),
748				1,
749				"Claims passed to `update_claims_view_from_requests` should not be aggregated"
750			);
751			let mut all_outpoints_claiming = true;
752			for outpoint in req.outpoints() {
753				if self.claimable_outpoints.get(outpoint).is_none() {
754					all_outpoints_claiming = false;
755				}
756			}
757			if all_outpoints_claiming {
758				log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request",
759					req.outpoints()[0].txid, req.outpoints()[0].vout);
760				false
761			} else {
762				let timelocked_equivalent_package = self.locktimed_packages.iter().map(|v| v.1.iter()).flatten()
763					.find(|locked_package| locked_package.outpoints() == req.outpoints());
764				if let Some(package) = timelocked_equivalent_package {
765					log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.",
766						req.outpoints()[0].txid, req.outpoints()[0].vout, package.package_locktime(cur_height));
767					false
768				} else {
769					true
770				}
771			}
772		});
773
774		// Then try to maximally aggregate `requests`.
775		for i in (1..requests.len()).rev() {
776			for j in 0..i {
777				if requests[i].can_merge_with(&requests[j], cur_height) {
778					let merge = requests.remove(i);
779					if let Err(rejected) = requests[j].merge_package(merge, cur_height) {
780						debug_assert!(false, "Merging package should not be rejected after verifying can_merge_with.");
781						requests.insert(i, rejected);
782					} else {
783						break;
784					}
785				}
786			}
787		}
788
789		// Finally, split requests into timelocked ones and immediately-spendable ones.
790		let mut preprocessed_requests = Vec::with_capacity(requests.len());
791		for req in requests {
792			let package_locktime = req.package_locktime(cur_height);
793			if package_locktime > cur_height {
794				log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", package_locktime, cur_height);
795				for outpoint in req.outpoints() {
796					log_info!(logger, "  Outpoint {}", outpoint);
797				}
798				self.locktimed_packages.entry(package_locktime).or_default().push(req);
799			} else {
800				preprocessed_requests.push(req);
801			}
802		}
803
804		// Claim everything up to and including `cur_height`.
805		let remaining_locked_packages = self.locktimed_packages.split_off(&(cur_height + 1));
806		if !self.locktimed_packages.is_empty() {
807			log_debug!(logger,
808				"Updating claims view at height {} with {} locked packages available for claim",
809				cur_height,
810				self.locktimed_packages.len());
811		}
812		for (pop_height, mut entry) in self.locktimed_packages.iter_mut() {
813			log_trace!(logger, "Restoring delayed claim of package(s) at their timelock at {}.", pop_height);
814			preprocessed_requests.append(&mut entry);
815		}
816		self.locktimed_packages = remaining_locked_packages;
817
818		// Generate claim transactions and track them to bump if necessary at
819		// height timer expiration (i.e in how many blocks we're going to take action).
820		for mut req in preprocessed_requests {
821			if let Some((new_timer, new_feerate, claim)) = self.generate_claim(
822				cur_height, &req, &FeerateStrategy::ForceBump, conf_target, &*fee_estimator, &*logger,
823			) {
824				req.set_timer(new_timer);
825				req.set_feerate(new_feerate);
826				// Once a pending claim has an id assigned, it remains fixed until the claim is
827				// satisfied, regardless of whether the claim switches between different variants of
828				// `OnchainClaim`.
829				let claim_id = match claim {
830					OnchainClaim::Tx(tx) => {
831						if tx.is_fully_signed() {
832							log_info!(logger, "Broadcasting onchain {}", log_tx!(tx.0));
833							broadcaster.broadcast_transactions(&[&tx.0]);
834						} else {
835							log_info!(logger, "Waiting for signature of unsigned onchain transaction {}", tx.0.compute_txid());
836						}
837						ClaimId(tx.0.compute_txid().to_byte_array())
838					},
839					OnchainClaim::Event(claim_event) => {
840						log_info!(logger, "Yielding onchain event to spend inputs {:?}", req.outpoints());
841						let claim_id = match claim_event {
842							ClaimEvent::BumpCommitment { ref commitment_tx, .. } =>
843								// For commitment claims, we can just use their txid as it should
844								// already be unique.
845								ClaimId(commitment_tx.compute_txid().to_byte_array()),
846							ClaimEvent::BumpHTLC { ref htlcs, .. } => {
847								// For HTLC claims, commit to the entire set of HTLC outputs to
848								// claim, which will always be unique per request. Once a claim ID
849								// is generated, it is assigned and remains unchanged, even if the
850								// underlying set of HTLCs changes.
851								let mut engine = Sha256::engine();
852								for htlc in htlcs {
853									engine.input(&htlc.commitment_txid.to_byte_array());
854									engine.input(&htlc.htlc.transaction_output_index.unwrap().to_be_bytes());
855								}
856								ClaimId(Sha256::from_engine(engine).to_byte_array())
857							},
858						};
859						debug_assert!(self.pending_claim_requests.get(&claim_id).is_none());
860						debug_assert_eq!(self.pending_claim_events.iter().filter(|entry| entry.0 == claim_id).count(), 0);
861						self.pending_claim_events.push((claim_id, claim_event));
862						claim_id
863					},
864				};
865				// Because fuzzing can cause hash collisions, we can end up with conflicting claim
866				// ids here, so we only assert when not fuzzing.
867				debug_assert!(cfg!(fuzzing) || self.pending_claim_requests.get(&claim_id).is_none());
868				for (k, outpoint_confirmation_height) in req.outpoints_and_creation_heights() {
869					let creation_height = outpoint_confirmation_height.unwrap_or(conf_height);
870					log_info!(logger, "Registering claiming request for {}:{}, which exists as of height {creation_height}", k.txid, k.vout);
871					self.claimable_outpoints.insert(k.clone(), (claim_id, creation_height));
872				}
873				self.pending_claim_requests.insert(claim_id, req);
874			}
875		}
876	}
877
878	/// Upon channelmonitor.block_connected(..) or upon provision of a preimage on the forward link
879	/// for this channel, provide new relevant on-chain transactions and/or new claim requests.
880	/// Together with `update_claims_view_from_requests` this used to be named `block_connected`,
881	/// but it is now also used for claiming an HTLC output if we receive a preimage after force-close.
882	///
883	/// `conf_height` represents the height at which the transactions in `txn_matched` were
884	/// confirmed. This does not need to equal the current blockchain tip height, which should be
885	/// provided via `cur_height`, however it must never be higher than `cur_height`.
886	pub(super) fn update_claims_view_from_matched_txn<B: Deref, F: Deref, L: Logger>(
887		&mut self, txn_matched: &[&Transaction], conf_height: u32, conf_hash: BlockHash,
888		cur_height: u32, broadcaster: &B, conf_target: ConfirmationTarget,
889		fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
890	) where
891		B::Target: BroadcasterInterface,
892		F::Target: FeeEstimator,
893	{
894		let mut have_logged_intro = false;
895		let mut maybe_log_intro = || {
896			if !have_logged_intro {
897				log_debug!(logger, "Updating claims view at height {} with {} matched transactions in block {}", cur_height, txn_matched.len(), conf_height);
898				have_logged_intro = true;
899			}
900		};
901		let mut bump_candidates = new_hash_map();
902		if !txn_matched.is_empty() { maybe_log_intro(); }
903		for tx in txn_matched {
904			// Scan all input to verify is one of the outpoint spent is of interest for us
905			let mut claimed_outputs_material = Vec::new();
906			for inp in &tx.input {
907				if let Some((claim_id, _)) = self.claimable_outpoints.get(&inp.previous_output) {
908					// If outpoint has claim request pending on it...
909					if let Some(request) = self.pending_claim_requests.get_mut(claim_id) {
910						//... we need to check if the pending claim was for a subset of the outputs
911						// spent by the confirmed transaction. If so, we can drop the pending claim
912						// after ANTI_REORG_DELAY blocks, otherwise we need to split it and retry
913						// claiming the remaining outputs.
914						let mut is_claim_subset_of_tx = true;
915						let mut tx_inputs = tx.input.iter().map(|input| &input.previous_output).collect::<Vec<_>>();
916						tx_inputs.sort_unstable();
917						for request_input in request.outpoints() {
918							if tx_inputs.binary_search(&request_input).is_err() {
919								is_claim_subset_of_tx = false;
920								break;
921							}
922						}
923
924						macro_rules! clean_claim_request_after_safety_delay {
925							() => {
926								let entry = OnchainEventEntry {
927									txid: tx.compute_txid(),
928									height: conf_height,
929									block_hash: Some(conf_hash),
930									event: OnchainEvent::Claim { claim_id: *claim_id }
931								};
932								if !self.onchain_events_awaiting_threshold_conf.contains(&entry) {
933									self.onchain_events_awaiting_threshold_conf.push(entry);
934								}
935							}
936						}
937
938						// If this is our transaction (or our counterparty spent all the outputs
939						// before we could anyway with same inputs order than us), wait for
940						// ANTI_REORG_DELAY and clean the RBF tracking map.
941						if is_claim_subset_of_tx {
942							clean_claim_request_after_safety_delay!();
943						} else { // If false, generate new claim request with update outpoint set
944							let mut at_least_one_drop = false;
945							for input in tx.input.iter() {
946								if let Some(package) = request.split_package(&input.previous_output) {
947									claimed_outputs_material.push(package);
948									at_least_one_drop = true;
949								}
950								// If there are no outpoints left to claim in this request, drop it entirely after ANTI_REORG_DELAY.
951								if request.outpoints().is_empty() {
952									clean_claim_request_after_safety_delay!();
953								}
954							}
955							//TODO: recompute soonest_timelock to avoid wasting a bit on fees
956							if at_least_one_drop {
957								bump_candidates.insert(*claim_id, request.clone());
958								// If we have any pending claim events for the request being updated
959								// that have yet to be consumed, we'll remove them since they will
960								// end up producing an invalid transaction by double spending
961								// input(s) that already have a confirmed spend. If such spend is
962								// reorged out of the chain, then we'll attempt to re-spend the
963								// inputs once we see it.
964								#[cfg(debug_assertions)] {
965									let existing = self.pending_claim_events.iter()
966										.filter(|entry| entry.0 == *claim_id).count();
967									assert!(existing == 0 || existing == 1);
968								}
969								self.pending_claim_events.retain(|entry| entry.0 != *claim_id);
970							}
971						}
972					} else {
973						panic!("Inconsistencies between pending_claim_requests map and claimable_outpoints map");
974					}
975				}
976
977				// Also remove/split any locktimed packages whose inputs have been spent by this transaction.
978				self.locktimed_packages.retain(|_locktime, packages|{
979					packages.retain_mut(|package| {
980						if let Some(p) = package.split_package(&inp.previous_output) {
981							claimed_outputs_material.push(p);
982						}
983						!package.outpoints().is_empty()
984					});
985					!packages.is_empty()
986				});
987			}
988			for package in claimed_outputs_material.drain(..) {
989				let entry = OnchainEventEntry {
990					txid: tx.compute_txid(),
991					height: conf_height,
992					block_hash: Some(conf_hash),
993					event: OnchainEvent::ContentiousOutpoint { package },
994				};
995				if !self.onchain_events_awaiting_threshold_conf.contains(&entry) {
996					self.onchain_events_awaiting_threshold_conf.push(entry);
997				}
998			}
999		}
1000
1001		// After security delay, either our claim tx got enough confs or outpoint is definetely out of reach
1002		let onchain_events_awaiting_threshold_conf =
1003			self.onchain_events_awaiting_threshold_conf.drain(..).collect::<Vec<_>>();
1004		for entry in onchain_events_awaiting_threshold_conf {
1005			if entry.has_reached_confirmation_threshold(cur_height) {
1006				maybe_log_intro();
1007				match entry.event {
1008					OnchainEvent::Claim { claim_id } => {
1009						// We may remove a whole set of claim outpoints here, as these one may have
1010						// been aggregated in a single tx and claimed so atomically
1011						if let Some(request) = self.pending_claim_requests.remove(&claim_id) {
1012							for outpoint in request.outpoints() {
1013								log_debug!(logger, "Removing claim tracking for {} due to maturation of claim package {}.",
1014									outpoint, log_bytes!(claim_id.0));
1015								self.claimable_outpoints.remove(outpoint);
1016							}
1017							#[cfg(debug_assertions)] {
1018								let num_existing = self.pending_claim_events.iter()
1019									.filter(|entry| entry.0 == claim_id).count();
1020								assert!(num_existing == 0 || num_existing == 1);
1021							}
1022							self.pending_claim_events.retain(|(id, _)| *id != claim_id);
1023						}
1024					},
1025					OnchainEvent::ContentiousOutpoint { package } => {
1026						log_debug!(logger, "Removing claim tracking due to maturation of claim tx for outpoints:");
1027						log_debug!(logger, " {:?}", package.outpoints());
1028						self.claimable_outpoints.remove(package.outpoints()[0]);
1029					}
1030				}
1031			} else {
1032				self.onchain_events_awaiting_threshold_conf.push(entry);
1033			}
1034		}
1035
1036		// Check if any pending claim request must be rescheduled
1037		for (claim_id, request) in self.pending_claim_requests.iter() {
1038			if cur_height >= request.timer() {
1039				bump_candidates.insert(*claim_id, request.clone());
1040			}
1041		}
1042
1043		// Build, bump and rebroadcast tx accordingly
1044		if !bump_candidates.is_empty() {
1045			maybe_log_intro();
1046			log_trace!(logger, "Bumping {} candidates", bump_candidates.len());
1047		}
1048
1049		for (claim_id, request) in bump_candidates.iter() {
1050			if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(
1051				cur_height, &request, &FeerateStrategy::ForceBump, conf_target, &*fee_estimator, &*logger,
1052			) {
1053				match bump_claim {
1054					OnchainClaim::Tx(bump_tx) => {
1055						if bump_tx.is_fully_signed() {
1056							log_info!(logger, "Broadcasting RBF-bumped onchain {}", log_tx!(bump_tx.0));
1057							broadcaster.broadcast_transactions(&[&bump_tx.0]);
1058						} else {
1059							log_info!(logger, "Waiting for signature of RBF-bumped unsigned onchain transaction {}",
1060								bump_tx.0.compute_txid());
1061						}
1062					},
1063					OnchainClaim::Event(claim_event) => {
1064						log_info!(logger, "Yielding RBF-bumped onchain event to spend inputs {:?}", request.outpoints());
1065						#[cfg(debug_assertions)] {
1066							let num_existing = self.pending_claim_events.iter().
1067								filter(|entry| entry.0 == *claim_id).count();
1068							assert!(num_existing == 0 || num_existing == 1);
1069						}
1070						self.pending_claim_events.retain(|event| event.0 != *claim_id);
1071						self.pending_claim_events.push((*claim_id, claim_event));
1072					},
1073				}
1074				if let Some(request) = self.pending_claim_requests.get_mut(claim_id) {
1075					request.set_timer(new_timer);
1076					request.set_feerate(new_feerate);
1077				}
1078			}
1079		}
1080	}
1081
1082	pub(super) fn transaction_unconfirmed<B: Deref, F: Deref, L: Logger>(
1083		&mut self,
1084		txid: &Txid,
1085		broadcaster: B,
1086		conf_target: ConfirmationTarget,
1087		fee_estimator: &LowerBoundedFeeEstimator<F>,
1088		logger: &L,
1089	) where
1090		B::Target: BroadcasterInterface,
1091		F::Target: FeeEstimator,
1092	{
1093		let mut height = None;
1094		for entry in self.onchain_events_awaiting_threshold_conf.iter() {
1095			if entry.txid == *txid {
1096				height = Some(entry.height);
1097				break;
1098			}
1099		}
1100
1101		if let Some(height) = height {
1102			self.block_disconnected(height, broadcaster, conf_target, fee_estimator, logger);
1103		}
1104	}
1105
1106	pub(super) fn block_disconnected<B: Deref, F: Deref, L: Logger>(
1107		&mut self, height: u32, broadcaster: B, conf_target: ConfirmationTarget,
1108		fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L,
1109	)
1110		where B::Target: BroadcasterInterface,
1111			F::Target: FeeEstimator,
1112	{
1113		let mut bump_candidates = new_hash_map();
1114		let onchain_events_awaiting_threshold_conf =
1115			self.onchain_events_awaiting_threshold_conf.drain(..).collect::<Vec<_>>();
1116		for entry in onchain_events_awaiting_threshold_conf {
1117			if entry.height >= height {
1118				//- our claim tx on a commitment tx output
1119				//- resurect outpoint back in its claimable set and regenerate tx
1120				match entry.event {
1121					OnchainEvent::ContentiousOutpoint { package } => {
1122						// We pass 0 to `package_locktime` to get the actual required locktime.
1123						let package_locktime = package.package_locktime(0);
1124						if package_locktime >= height {
1125							self.locktimed_packages.entry(package_locktime).or_default().push(package);
1126							continue;
1127						}
1128
1129						if let Some(pending_claim) = self.claimable_outpoints.get(package.outpoints()[0]) {
1130							if let Some(request) = self.pending_claim_requests.get_mut(&pending_claim.0) {
1131								assert!(request.merge_package(package, height).is_ok());
1132								// Using a HashMap guarantee us than if we have multiple outpoints getting
1133								// resurrected only one bump claim tx is going to be broadcast
1134								bump_candidates.insert(pending_claim.clone(), request.clone());
1135							}
1136						}
1137					},
1138					_ => {},
1139				}
1140			} else {
1141				self.onchain_events_awaiting_threshold_conf.push(entry);
1142			}
1143		}
1144		for ((_claim_id, _), ref mut request) in bump_candidates.iter_mut() {
1145			// `height` is the height being disconnected, so our `current_height` is 1 lower.
1146			let current_height = height - 1;
1147			if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(
1148				current_height, &request, &FeerateStrategy::ForceBump, conf_target, fee_estimator, logger
1149			) {
1150				request.set_timer(new_timer);
1151				request.set_feerate(new_feerate);
1152				match bump_claim {
1153					OnchainClaim::Tx(bump_tx) => {
1154						if bump_tx.is_fully_signed() {
1155							log_info!(logger, "Broadcasting onchain {}", log_tx!(bump_tx.0));
1156							broadcaster.broadcast_transactions(&[&bump_tx.0]);
1157						} else {
1158							log_info!(logger, "Waiting for signature of unsigned onchain transaction {}", bump_tx.0.compute_txid());
1159						}
1160					},
1161					OnchainClaim::Event(claim_event) => {
1162						log_info!(logger, "Yielding onchain event after reorg to spend inputs {:?}", request.outpoints());
1163						#[cfg(debug_assertions)] {
1164							let num_existing = self.pending_claim_events.iter()
1165								.filter(|entry| entry.0 == *_claim_id).count();
1166							assert!(num_existing == 0 || num_existing == 1);
1167						}
1168						self.pending_claim_events.retain(|event| event.0 != *_claim_id);
1169						self.pending_claim_events.push((*_claim_id, claim_event));
1170					},
1171				}
1172			}
1173		}
1174		for (ancestor_claim_txid, request) in bump_candidates.drain() {
1175			self.pending_claim_requests.insert(ancestor_claim_txid.0, request);
1176		}
1177		//TODO: if we implement cross-block aggregated claim transaction we need to refresh set of outpoints and regenerate tx but
1178		// right now if one of the outpoint get disconnected, just erase whole pending claim request.
1179		let mut remove_request = Vec::new();
1180		self.claimable_outpoints.retain(|_, ref v|
1181			if v.1 >= height {
1182			remove_request.push(v.0.clone());
1183			false
1184			} else { true });
1185		for req in remove_request {
1186			self.pending_claim_requests.remove(&req);
1187		}
1188	}
1189
1190	pub(crate) fn is_output_spend_pending(&self, outpoint: &BitcoinOutPoint) -> bool {
1191		self.claimable_outpoints.get(outpoint).is_some()
1192	}
1193
1194	pub(crate) fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option<BlockHash>)> {
1195		let mut txids: Vec<(Txid, u32, Option<BlockHash>)> = self.onchain_events_awaiting_threshold_conf
1196			.iter()
1197			.map(|entry| (entry.txid, entry.height, entry.block_hash))
1198			.collect();
1199		txids.sort_unstable_by(|a, b| a.0.cmp(&b.0).then(b.1.cmp(&a.1)));
1200		txids.dedup_by_key(|(txid, _, _)| *txid);
1201		txids
1202	}
1203
1204	pub(crate) fn provide_latest_holder_tx(&mut self, tx: HolderCommitmentTransaction) {
1205		self.prev_holder_commitment = Some(replace(&mut self.holder_commitment, tx));
1206	}
1207
1208	pub(crate) fn get_unsigned_holder_commitment_tx(&self) -> &Transaction {
1209		&self.holder_commitment.trust().built_transaction().transaction
1210	}
1211
1212	pub(crate) fn get_maybe_signed_holder_tx(&mut self, funding_redeemscript: &Script) -> MaybeSignedTransaction {
1213		let tx = self.signer.sign_holder_commitment(&self.holder_commitment, &self.secp_ctx)
1214			.map(|sig| self.holder_commitment.add_holder_sig(funding_redeemscript, sig))
1215			.unwrap_or_else(|_| self.get_unsigned_holder_commitment_tx().clone());
1216		MaybeSignedTransaction(tx)
1217	}
1218
1219	#[cfg(any(test, feature="unsafe_revoked_tx_signing"))]
1220	pub(crate) fn get_fully_signed_copy_holder_tx(&mut self, funding_redeemscript: &Script) -> Transaction {
1221		let sig = self.signer.unsafe_sign_holder_commitment(&self.holder_commitment, &self.secp_ctx).expect("sign holder commitment");
1222		self.holder_commitment.add_holder_sig(funding_redeemscript, sig)
1223	}
1224
1225	pub(crate) fn get_maybe_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>) -> Option<MaybeSignedTransaction> {
1226		let get_signed_htlc_tx = |holder_commitment: &HolderCommitmentTransaction| {
1227			let trusted_tx = holder_commitment.trust();
1228			if trusted_tx.txid() != outp.txid {
1229				return None;
1230			}
1231			let (htlc_idx, htlc) = trusted_tx.htlcs().iter().enumerate()
1232				.find(|(_, htlc)| htlc.transaction_output_index.unwrap() == outp.vout)
1233				.unwrap();
1234			let counterparty_htlc_sig = holder_commitment.counterparty_htlc_sigs[htlc_idx];
1235			let mut htlc_tx = trusted_tx.build_unsigned_htlc_tx(
1236				&self.channel_transaction_parameters.as_holder_broadcastable(), htlc_idx, preimage,
1237			);
1238
1239			let htlc_descriptor = HTLCDescriptor {
1240				channel_derivation_parameters: ChannelDerivationParameters {
1241					value_satoshis: self.channel_value_satoshis,
1242					keys_id: self.channel_keys_id,
1243					transaction_parameters: self.channel_transaction_parameters.clone(),
1244				},
1245				commitment_txid: trusted_tx.txid(),
1246				per_commitment_number: trusted_tx.commitment_number(),
1247				per_commitment_point: trusted_tx.per_commitment_point(),
1248				feerate_per_kw: trusted_tx.feerate_per_kw(),
1249				htlc: htlc.clone(),
1250				preimage: preimage.clone(),
1251				counterparty_sig: counterparty_htlc_sig.clone(),
1252			};
1253			if let Ok(htlc_sig) = self.signer.sign_holder_htlc_transaction(&htlc_tx, 0, &htlc_descriptor, &self.secp_ctx) {
1254				htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness(
1255					htlc_idx, &counterparty_htlc_sig, &htlc_sig, preimage,
1256				);
1257			}
1258			Some(MaybeSignedTransaction(htlc_tx))
1259		};
1260
1261		// Check if the HTLC spends from the current holder commitment first, or the previous.
1262		get_signed_htlc_tx(&self.holder_commitment)
1263			.or_else(|| self.prev_holder_commitment.as_ref().and_then(|prev_holder_commitment| get_signed_htlc_tx(prev_holder_commitment)))
1264	}
1265
1266	pub(crate) fn generate_external_htlc_claim(
1267		&self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>
1268	) -> Option<ExternalHTLCClaim> {
1269		let find_htlc = |holder_commitment: &HolderCommitmentTransaction| -> Option<ExternalHTLCClaim> {
1270			let trusted_tx = holder_commitment.trust();
1271			if outp.txid != trusted_tx.txid() {
1272				return None;
1273			}
1274			trusted_tx.htlcs().iter().enumerate()
1275				.find(|(_, htlc)| if let Some(output_index) = htlc.transaction_output_index {
1276					output_index == outp.vout
1277				} else {
1278					false
1279				})
1280				.map(|(htlc_idx, htlc)| {
1281					let counterparty_htlc_sig = holder_commitment.counterparty_htlc_sigs[htlc_idx];
1282					ExternalHTLCClaim {
1283						commitment_txid: trusted_tx.txid(),
1284						per_commitment_number: trusted_tx.commitment_number(),
1285						htlc: htlc.clone(),
1286						preimage: *preimage,
1287						counterparty_sig: counterparty_htlc_sig,
1288						per_commitment_point: trusted_tx.per_commitment_point(),
1289					}
1290				})
1291		};
1292		// Check if the HTLC spends from the current holder commitment or the previous one otherwise.
1293		find_htlc(&self.holder_commitment)
1294			.or_else(|| self.prev_holder_commitment.as_ref().map(|c| find_htlc(c)).flatten())
1295	}
1296
1297	pub(crate) fn channel_type_features(&self) -> &ChannelTypeFeatures {
1298		&self.channel_transaction_parameters.channel_type_features
1299	}
1300}
1301
1302#[cfg(test)]
1303mod tests {
1304	use bitcoin::hash_types::Txid;
1305	use bitcoin::hashes::sha256::Hash as Sha256;
1306	use bitcoin::hashes::Hash;
1307	use bitcoin::Network;
1308	use bitcoin::{key::Secp256k1, secp256k1::PublicKey, secp256k1::SecretKey, ScriptBuf};
1309	use types::features::ChannelTypeFeatures;
1310
1311	use crate::chain::chaininterface::{ConfirmationTarget, LowerBoundedFeeEstimator};
1312	use crate::chain::package::{HolderHTLCOutput, PackageSolvingData, PackageTemplate};
1313	use crate::chain::transaction::OutPoint;
1314	use crate::ln::chan_utils::{
1315		ChannelPublicKeys, ChannelTransactionParameters, CounterpartyChannelTransactionParameters,
1316		HTLCOutputInCommitment, HolderCommitmentTransaction,
1317	};
1318	use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
1319	use crate::ln::functional_test_utils::create_dummy_block;
1320	use crate::sign::InMemorySigner;
1321	use crate::types::payment::{PaymentHash, PaymentPreimage};
1322	use crate::util::test_utils::{TestBroadcaster, TestFeeEstimator, TestLogger};
1323
1324	use super::OnchainTxHandler;
1325
1326	// Test that all claims with locktime equal to or less than the current height are broadcast
1327	// immediately while claims with locktime greater than the current height are only broadcast
1328	// once the locktime is reached.
1329	#[test]
1330	fn test_broadcast_height() {
1331		let secp_ctx = Secp256k1::new();
1332		let signer = InMemorySigner::new(
1333			&secp_ctx,
1334			SecretKey::from_slice(&[41; 32]).unwrap(),
1335			SecretKey::from_slice(&[41; 32]).unwrap(),
1336			SecretKey::from_slice(&[41; 32]).unwrap(),
1337			SecretKey::from_slice(&[41; 32]).unwrap(),
1338			SecretKey::from_slice(&[41; 32]).unwrap(),
1339			[41; 32],
1340			0,
1341			[0; 32],
1342			[0; 32],
1343		);
1344		let counterparty_pubkeys = ChannelPublicKeys {
1345			funding_pubkey: PublicKey::from_secret_key(
1346				&secp_ctx,
1347				&SecretKey::from_slice(&[44; 32]).unwrap(),
1348			),
1349			revocation_basepoint: RevocationBasepoint::from(PublicKey::from_secret_key(
1350				&secp_ctx,
1351				&SecretKey::from_slice(&[45; 32]).unwrap(),
1352			)),
1353			payment_point: PublicKey::from_secret_key(
1354				&secp_ctx,
1355				&SecretKey::from_slice(&[46; 32]).unwrap(),
1356			),
1357			delayed_payment_basepoint: DelayedPaymentBasepoint::from(PublicKey::from_secret_key(
1358				&secp_ctx,
1359				&SecretKey::from_slice(&[47; 32]).unwrap(),
1360			)),
1361			htlc_basepoint: HtlcBasepoint::from(PublicKey::from_secret_key(
1362				&secp_ctx,
1363				&SecretKey::from_slice(&[48; 32]).unwrap(),
1364			)),
1365		};
1366		let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::MAX };
1367
1368		// Use non-anchor channels so that HTLC-Timeouts are broadcast immediately instead of sent
1369		// to the user for external funding.
1370		let chan_params = ChannelTransactionParameters {
1371			holder_pubkeys: signer.holder_channel_pubkeys.clone(),
1372			holder_selected_contest_delay: 66,
1373			is_outbound_from_holder: true,
1374			counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
1375				pubkeys: counterparty_pubkeys,
1376				selected_contest_delay: 67,
1377			}),
1378			funding_outpoint: Some(funding_outpoint),
1379			channel_type_features: ChannelTypeFeatures::only_static_remote_key(),
1380		};
1381
1382		// Create an OnchainTxHandler for a commitment containing HTLCs with CLTV expiries of 0, 1,
1383		// and 2 blocks.
1384		let mut htlcs = Vec::new();
1385		for i in 0..3 {
1386			let preimage = PaymentPreimage([i; 32]);
1387			let hash = PaymentHash(Sha256::hash(&preimage.0[..]).to_byte_array());
1388			htlcs.push((
1389				HTLCOutputInCommitment {
1390					offered: true,
1391					amount_msat: 10000,
1392					cltv_expiry: i as u32,
1393					payment_hash: hash,
1394					transaction_output_index: Some(i as u32),
1395				},
1396				(),
1397			));
1398		}
1399		let holder_commit = HolderCommitmentTransaction::dummy(&mut htlcs);
1400		let mut tx_handler = OnchainTxHandler::new(
1401			1000000,
1402			[0; 32],
1403			ScriptBuf::new(),
1404			signer,
1405			chan_params,
1406			holder_commit,
1407			secp_ctx,
1408		);
1409
1410		// Create a broadcaster with current block height 1.
1411		let broadcaster = TestBroadcaster::new(Network::Testnet);
1412		{
1413			let mut blocks = broadcaster.blocks.lock().unwrap();
1414			let genesis_hash = blocks[0].0.block_hash();
1415			blocks.push((create_dummy_block(genesis_hash, 0, Vec::new()), 1));
1416		}
1417
1418		let fee_estimator = TestFeeEstimator::new(253);
1419		let fee_estimator = LowerBoundedFeeEstimator::new(&fee_estimator);
1420		let logger = TestLogger::new();
1421
1422		// Request claiming of each HTLC on the holder's commitment, with current block height 1.
1423		let holder_commit_txid = tx_handler.get_unsigned_holder_commitment_tx().compute_txid();
1424		let mut requests = Vec::new();
1425		for (htlc, _) in htlcs {
1426			requests.push(PackageTemplate::build_package(
1427				holder_commit_txid,
1428				htlc.transaction_output_index.unwrap(),
1429				PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build_offered(
1430					htlc.amount_msat,
1431					htlc.cltv_expiry,
1432					ChannelTypeFeatures::only_static_remote_key(),
1433					0,
1434				)),
1435				0,
1436			));
1437		}
1438		tx_handler.update_claims_view_from_requests(
1439			requests,
1440			1,
1441			1,
1442			&&broadcaster,
1443			ConfirmationTarget::UrgentOnChainSweep,
1444			&fee_estimator,
1445			&logger,
1446		);
1447
1448		// HTLC-Timeouts should be broadcast for the HTLCs with expiries at heights 0 and 1. The
1449		// HTLC with expiry at height 2 should not be claimed yet.
1450		let txs_broadcasted = broadcaster.txn_broadcast();
1451		assert_eq!(txs_broadcasted.len(), 2);
1452		assert!(txs_broadcasted[0].lock_time.to_consensus_u32() <= 1);
1453		assert!(txs_broadcasted[1].lock_time.to_consensus_u32() <= 1);
1454
1455		// Advance to block height 2, and reprocess pending claims.
1456		{
1457			let mut blocks = broadcaster.blocks.lock().unwrap();
1458			let block1_hash = blocks[1].0.block_hash();
1459			blocks.push((create_dummy_block(block1_hash, 0, Vec::new()), 2));
1460		}
1461		tx_handler.update_claims_view_from_requests(
1462			Vec::new(),
1463			2,
1464			2,
1465			&&broadcaster,
1466			ConfirmationTarget::UrgentOnChainSweep,
1467			&fee_estimator,
1468			&logger,
1469		);
1470
1471		// The final HTLC-Timeout should now be broadcast.
1472		let txs_broadcasted = broadcaster.txn_broadcast();
1473		assert_eq!(txs_broadcasted.len(), 1);
1474		assert_eq!(txs_broadcasted[0].lock_time.to_consensus_u32(), 2);
1475	}
1476}