lightning/chain/
chainmonitor.rs

1// This file is Copyright its original authors, visible in version control
2// history.
3//
4// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7// You may not use this file except in accordance with one or both of these
8// licenses.
9
10//! Logic to connect off-chain channel management with on-chain transaction monitoring.
11//!
12//! [`ChainMonitor`] is an implementation of [`chain::Watch`] used both to process blocks and to
13//! update [`ChannelMonitor`]s accordingly. If any on-chain events need further processing, it will
14//! make those available as [`MonitorEvent`]s to be consumed.
15//!
16//! [`ChainMonitor`] is parameterized by an optional chain source, which must implement the
17//! [`chain::Filter`] trait. This provides a mechanism to signal new relevant outputs back to light
18//! clients, such that transactions spending those outputs are included in block data.
19//!
20//! [`ChainMonitor`] may be used directly to monitor channels locally or as a part of a distributed
21//! setup to monitor channels remotely. In the latter case, a custom [`chain::Watch`] implementation
22//! would be responsible for routing each update to a remote server and for retrieving monitor
23//! events. The remote server would make use of [`ChainMonitor`] for block processing and for
24//! servicing [`ChannelMonitor`] updates from the client.
25
26use bitcoin::block::Header;
27use bitcoin::hash_types::{Txid, BlockHash};
28
29use crate::chain;
30use crate::chain::{ChannelMonitorUpdateStatus, Filter, WatchedOutput};
31use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
32use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, WithChannelMonitor};
33use crate::chain::transaction::{OutPoint, TransactionData};
34use crate::ln::types::ChannelId;
35use crate::sign::ecdsa::EcdsaChannelSigner;
36use crate::events::{self, Event, EventHandler, ReplayEvent};
37use crate::util::logger::{Logger, WithContext};
38use crate::util::errors::APIError;
39use crate::util::wakers::{Future, Notifier};
40use crate::ln::channel_state::ChannelDetails;
41
42use crate::prelude::*;
43use crate::sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard};
44use core::ops::Deref;
45use core::sync::atomic::{AtomicUsize, Ordering};
46use bitcoin::hashes::Hash;
47use bitcoin::secp256k1::PublicKey;
48
49/// `Persist` defines behavior for persisting channel monitors: this could mean
50/// writing once to disk, and/or uploading to one or more backup services.
51///
52/// Persistence can happen in one of two ways - synchronously completing before the trait method
53/// calls return or asynchronously in the background.
54///
55/// # For those implementing synchronous persistence
56///
57///  * If persistence completes fully (including any relevant `fsync()` calls), the implementation
58///    should return [`ChannelMonitorUpdateStatus::Completed`], indicating normal channel operation
59///    should continue.
60///
61///  * If persistence fails for some reason, implementations should consider returning
62///    [`ChannelMonitorUpdateStatus::InProgress`] and retry all pending persistence operations in
63///    the background with [`ChainMonitor::list_pending_monitor_updates`] and
64///    [`ChainMonitor::get_monitor`].
65///
66///    Once a full [`ChannelMonitor`] has been persisted, all pending updates for that channel can
67///    be marked as complete via [`ChainMonitor::channel_monitor_updated`].
68///
69///    If at some point no further progress can be made towards persisting the pending updates, the
70///    node should simply shut down.
71///
72///  * If the persistence has failed and cannot be retried further (e.g. because of an outage),
73///    [`ChannelMonitorUpdateStatus::UnrecoverableError`] can be used, though this will result in
74///    an immediate panic and future operations in LDK generally failing.
75///
76/// # For those implementing asynchronous persistence
77///
78///  All calls should generally spawn a background task and immediately return
79///  [`ChannelMonitorUpdateStatus::InProgress`]. Once the update completes,
80///  [`ChainMonitor::channel_monitor_updated`] should be called with the corresponding
81///  [`ChannelMonitor::get_latest_update_id`] or [`ChannelMonitorUpdate::update_id`].
82///
83///  Note that unlike the direct [`chain::Watch`] interface,
84///  [`ChainMonitor::channel_monitor_updated`] must be called once for *each* update which occurs.
85///
86///  If at some point no further progress can be made towards persisting a pending update, the node
87///  should simply shut down. Until then, the background task should either loop indefinitely, or
88///  persistence should be regularly retried with [`ChainMonitor::list_pending_monitor_updates`]
89///  and [`ChainMonitor::get_monitor`] (note that if a full monitor is persisted all pending
90///  monitor updates may be marked completed).
91///
92/// # Using remote watchtowers
93///
94/// Watchtowers may be updated as a part of an implementation of this trait, utilizing the async
95/// update process described above while the watchtower is being updated. The following methods are
96/// provided for bulding transactions for a watchtower:
97/// [`ChannelMonitor::initial_counterparty_commitment_tx`],
98/// [`ChannelMonitor::counterparty_commitment_txs_from_update`],
99/// [`ChannelMonitor::sign_to_local_justice_tx`], [`TrustedCommitmentTransaction::revokeable_output_index`],
100/// [`TrustedCommitmentTransaction::build_to_local_justice_tx`].
101///
102/// [`TrustedCommitmentTransaction::revokeable_output_index`]: crate::ln::chan_utils::TrustedCommitmentTransaction::revokeable_output_index
103/// [`TrustedCommitmentTransaction::build_to_local_justice_tx`]: crate::ln::chan_utils::TrustedCommitmentTransaction::build_to_local_justice_tx
104pub trait Persist<ChannelSigner: EcdsaChannelSigner> {
105	/// Persist a new channel's data in response to a [`chain::Watch::watch_channel`] call. This is
106	/// called by [`ChannelManager`] for new channels, or may be called directly, e.g. on startup.
107	///
108	/// The data can be stored any way you want, but the identifier provided by LDK is the
109	/// channel's outpoint (and it is up to you to maintain a correct mapping between the outpoint
110	/// and the stored channel data). Note that you **must** persist every new monitor to disk.
111	///
112	/// The [`ChannelMonitor::get_latest_update_id`] uniquely links this call to [`ChainMonitor::channel_monitor_updated`].
113	/// For [`Persist::persist_new_channel`], it is only necessary to call [`ChainMonitor::channel_monitor_updated`]
114	/// when you return [`ChannelMonitorUpdateStatus::InProgress`].
115	///
116	/// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`
117	/// and [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
118	///
119	/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
120	/// [`Writeable::write`]: crate::util::ser::Writeable::write
121	fn persist_new_channel(&self, channel_funding_outpoint: OutPoint, monitor: &ChannelMonitor<ChannelSigner>) -> ChannelMonitorUpdateStatus;
122
123	/// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
124	/// update.
125	///
126	/// Note that on every update, you **must** persist either the [`ChannelMonitorUpdate`] or the
127	/// updated monitor itself to disk/backups. See the [`Persist`] trait documentation for more
128	/// details.
129	///
130	/// During blockchain synchronization operations, and in some rare cases, this may be called with
131	/// no [`ChannelMonitorUpdate`], in which case the full [`ChannelMonitor`] needs to be persisted.
132	/// Note that after the full [`ChannelMonitor`] is persisted any previous
133	/// [`ChannelMonitorUpdate`]s which were persisted should be discarded - they can no longer be
134	/// applied to the persisted [`ChannelMonitor`] as they were already applied.
135	///
136	/// If an implementer chooses to persist the updates only, they need to make
137	/// sure that all the updates are applied to the `ChannelMonitors` *before*
138	/// the set of channel monitors is given to the `ChannelManager`
139	/// deserialization routine. If there are any gaps in the persisted [`ChannelMonitorUpdate`]s,
140	/// implementer can safely ignore [`ChannelMonitorUpdate`]s after the gap and load without them.
141	/// See [`ChannelMonitor::update_monitor`] for
142	/// applying a monitor update to a monitor. If full `ChannelMonitors` are
143	/// persisted, then there is no need to persist individual updates.
144	///
145	/// Note that there could be a performance tradeoff between persisting complete
146	/// channel monitors on every update vs. persisting only updates and applying
147	/// them in batches. The size of each monitor grows `O(number of state updates)`
148	/// whereas updates are small and `O(1)`.
149	///
150	/// The [`ChannelMonitorUpdate::update_id`] or [`ChannelMonitor::get_latest_update_id`] uniquely
151	/// links this call to [`ChainMonitor::channel_monitor_updated`].
152	/// For [`Persist::update_persisted_channel`], it is only necessary to call [`ChainMonitor::channel_monitor_updated`]
153	/// when a [`ChannelMonitorUpdate`] is provided and when you return [`ChannelMonitorUpdateStatus::InProgress`].
154	///
155	/// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`,
156	/// [`Writeable::write`] on [`ChannelMonitorUpdate`] for writing out an update, and
157	/// [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
158	///
159	/// [`Writeable::write`]: crate::util::ser::Writeable::write
160	fn update_persisted_channel(&self, channel_funding_outpoint: OutPoint, monitor_update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>) -> ChannelMonitorUpdateStatus;
161	/// Prevents the channel monitor from being loaded on startup.
162	///
163	/// Archiving the data in a backup location (rather than deleting it fully) is useful for
164	/// hedging against data loss in case of unexpected failure.
165	///
166	/// Note that if a crash occurs during the archiving process, and its implementation is not
167	/// atomic, a state may emerge with the archival operation only being partially complete. In
168	/// that scenario, the monitor may still be loaded on startup pending successful completion of
169	/// the archive process. Additionally, because the archive operation could be retried on
170	/// restart, this method must in that case be idempotent, ensuring it can handle scenarios where
171	/// the monitor already exists in the archive.
172	fn archive_persisted_channel(&self, channel_funding_outpoint: OutPoint);
173}
174
175struct MonitorHolder<ChannelSigner: EcdsaChannelSigner> {
176	monitor: ChannelMonitor<ChannelSigner>,
177	/// The full set of pending monitor updates for this Channel.
178	///
179	/// Note that this lock must be held from [`ChannelMonitor::update_monitor`] through to
180	/// [`Persist::update_persisted_channel`] to prevent a race where we call
181	/// [`Persist::update_persisted_channel`], the user returns a
182	/// [`ChannelMonitorUpdateStatus::InProgress`], and then calls
183	/// [`ChainMonitor::channel_monitor_updated`] immediately, racing our insertion of the pending
184	/// update into the contained Vec.
185	///
186	/// This also avoids a race where we update a [`ChannelMonitor`], then while connecting a block
187	/// persist a full [`ChannelMonitor`] prior to persisting the [`ChannelMonitorUpdate`]. This
188	/// could cause users to have a full [`ChannelMonitor`] on disk as well as a
189	/// [`ChannelMonitorUpdate`] which was already applied. While this isn't an issue for the
190	/// LDK-provided update-based [`Persist`], it is somewhat surprising for users so we avoid it.
191	pending_monitor_updates: Mutex<Vec<u64>>,
192}
193
194impl<ChannelSigner: EcdsaChannelSigner> MonitorHolder<ChannelSigner> {
195	fn has_pending_updates(&self, pending_monitor_updates_lock: &MutexGuard<Vec<u64>>) -> bool {
196		!pending_monitor_updates_lock.is_empty()
197	}
198}
199
200/// A read-only reference to a current ChannelMonitor.
201///
202/// Note that this holds a mutex in [`ChainMonitor`] and may block other events until it is
203/// released.
204pub struct LockedChannelMonitor<'a, ChannelSigner: EcdsaChannelSigner> {
205	lock: RwLockReadGuard<'a, HashMap<OutPoint, MonitorHolder<ChannelSigner>>>,
206	funding_txo: OutPoint,
207}
208
209impl<ChannelSigner: EcdsaChannelSigner> Deref for LockedChannelMonitor<'_, ChannelSigner> {
210	type Target = ChannelMonitor<ChannelSigner>;
211	fn deref(&self) -> &ChannelMonitor<ChannelSigner> {
212		&self.lock.get(&self.funding_txo).expect("Checked at construction").monitor
213	}
214}
215
216/// An implementation of [`chain::Watch`] for monitoring channels.
217///
218/// Connected and disconnected blocks must be provided to `ChainMonitor` as documented by
219/// [`chain::Watch`]. May be used in conjunction with [`ChannelManager`] to monitor channels locally
220/// or used independently to monitor channels remotely. See the [module-level documentation] for
221/// details.
222///
223/// Note that `ChainMonitor` should regularly trigger rebroadcasts/fee bumps of pending claims from
224/// a force-closed channel. This is crucial in preventing certain classes of pinning attacks,
225/// detecting substantial mempool feerate changes between blocks, and ensuring reliability if
226/// broadcasting fails. We recommend invoking this every 30 seconds, or lower if running in an
227/// environment with spotty connections, like on mobile.
228///
229/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
230/// [module-level documentation]: crate::chain::chainmonitor
231/// [`rebroadcast_pending_claims`]: Self::rebroadcast_pending_claims
232pub struct ChainMonitor<ChannelSigner: EcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
233	where C::Target: chain::Filter,
234        T::Target: BroadcasterInterface,
235        F::Target: FeeEstimator,
236        L::Target: Logger,
237        P::Target: Persist<ChannelSigner>,
238{
239	monitors: RwLock<HashMap<OutPoint, MonitorHolder<ChannelSigner>>>,
240	chain_source: Option<C>,
241	broadcaster: T,
242	logger: L,
243	fee_estimator: F,
244	persister: P,
245	/// "User-provided" (ie persistence-completion/-failed) [`MonitorEvent`]s. These came directly
246	/// from the user and not from a [`ChannelMonitor`].
247	pending_monitor_events: Mutex<Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)>>,
248	/// The best block height seen, used as a proxy for the passage of time.
249	highest_chain_height: AtomicUsize,
250
251	/// A [`Notifier`] used to wake up the background processor in case we have any [`Event`]s for
252	/// it to give to users (or [`MonitorEvent`]s for `ChannelManager` to process).
253	event_notifier: Notifier,
254}
255
256impl<ChannelSigner: EcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> ChainMonitor<ChannelSigner, C, T, F, L, P>
257where C::Target: chain::Filter,
258	    T::Target: BroadcasterInterface,
259	    F::Target: FeeEstimator,
260	    L::Target: Logger,
261	    P::Target: Persist<ChannelSigner>,
262{
263	/// Dispatches to per-channel monitors, which are responsible for updating their on-chain view
264	/// of a channel and reacting accordingly based on transactions in the given chain data. See
265	/// [`ChannelMonitor::block_connected`] for details. Any HTLCs that were resolved on chain will
266	/// be returned by [`chain::Watch::release_pending_monitor_events`].
267	///
268	/// Calls back to [`chain::Filter`] if any monitor indicated new outputs to watch. Subsequent
269	/// calls must not exclude any transactions matching the new outputs nor any in-block
270	/// descendants of such transactions. It is not necessary to re-fetch the block to obtain
271	/// updated `txdata`.
272	///
273	/// Calls which represent a new blockchain tip height should set `best_height`.
274	fn process_chain_data<FN>(&self, header: &Header, best_height: Option<u32>, txdata: &TransactionData, process: FN)
275	where
276		FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs>
277	{
278		let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
279		let funding_outpoints = hash_set_from_iter(self.monitors.read().unwrap().keys().cloned());
280		let channel_count = funding_outpoints.len();
281		for funding_outpoint in funding_outpoints.iter() {
282			let monitor_lock = self.monitors.read().unwrap();
283			if let Some(monitor_state) = monitor_lock.get(funding_outpoint) {
284				if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state, channel_count).is_err() {
285					// Take the monitors lock for writing so that we poison it and any future
286					// operations going forward fail immediately.
287					core::mem::drop(monitor_lock);
288					let _poison = self.monitors.write().unwrap();
289					log_error!(self.logger, "{}", err_str);
290					panic!("{}", err_str);
291				}
292			}
293		}
294
295		// do some followup cleanup if any funding outpoints were added in between iterations
296		let monitor_states = self.monitors.write().unwrap();
297		for (funding_outpoint, monitor_state) in monitor_states.iter() {
298			if !funding_outpoints.contains(funding_outpoint) {
299				if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state, channel_count).is_err() {
300					log_error!(self.logger, "{}", err_str);
301					panic!("{}", err_str);
302				}
303			}
304		}
305
306		if let Some(height) = best_height {
307			// If the best block height is being updated, update highest_chain_height under the
308			// monitors write lock.
309			let old_height = self.highest_chain_height.load(Ordering::Acquire);
310			let new_height = height as usize;
311			if new_height > old_height {
312				self.highest_chain_height.store(new_height, Ordering::Release);
313			}
314		}
315	}
316
317	fn update_monitor_with_chain_data<FN>(
318		&self, header: &Header, best_height: Option<u32>, txdata: &TransactionData, process: FN, funding_outpoint: &OutPoint,
319		monitor_state: &MonitorHolder<ChannelSigner>, channel_count: usize,
320	) -> Result<(), ()> where FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs> {
321		let monitor = &monitor_state.monitor;
322		let logger = WithChannelMonitor::from(&self.logger, &monitor, None);
323
324		let mut txn_outputs = process(monitor, txdata);
325
326		let get_partition_key = |funding_outpoint: &OutPoint| {
327			let funding_txid_hash = funding_outpoint.txid.to_raw_hash();
328			let funding_txid_hash_bytes = funding_txid_hash.as_byte_array();
329			let funding_txid_u32 = u32::from_be_bytes([funding_txid_hash_bytes[0], funding_txid_hash_bytes[1], funding_txid_hash_bytes[2], funding_txid_hash_bytes[3]]);
330			funding_txid_u32.wrapping_add(best_height.unwrap_or_default())
331		};
332
333		let partition_factor = if channel_count < 15 {
334			5
335		} else {
336			50 // ~ 8hours
337		};
338
339		let has_pending_claims = monitor_state.monitor.has_pending_claims();
340		if has_pending_claims || get_partition_key(funding_outpoint) % partition_factor == 0 {
341			log_trace!(logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
342			// Even though we don't track monitor updates from chain-sync as pending, we still want
343			// updates per-channel to be well-ordered so that users don't see a
344			// `ChannelMonitorUpdate` after a channel persist for a channel with the same
345			// `latest_update_id`.
346			let _pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
347			match self.persister.update_persisted_channel(*funding_outpoint, None, monitor) {
348				ChannelMonitorUpdateStatus::Completed =>
349					log_trace!(logger, "Finished syncing Channel Monitor for channel {} for block-data",
350						log_funding_info!(monitor)
351					),
352				ChannelMonitorUpdateStatus::InProgress => {
353					log_trace!(logger, "Channel Monitor sync for channel {} in progress.", log_funding_info!(monitor));
354				}
355				ChannelMonitorUpdateStatus::UnrecoverableError => {
356					return Err(());
357				}
358			}
359		}
360
361		// Register any new outputs with the chain source for filtering, storing any dependent
362		// transactions from within the block that previously had not been included in txdata.
363		if let Some(ref chain_source) = self.chain_source {
364			let block_hash = header.block_hash();
365			for (txid, mut outputs) in txn_outputs.drain(..) {
366				for (idx, output) in outputs.drain(..) {
367					// Register any new outputs with the chain source for filtering
368					let output = WatchedOutput {
369						block_hash: Some(block_hash),
370						outpoint: OutPoint { txid, index: idx as u16 },
371						script_pubkey: output.script_pubkey,
372					};
373					log_trace!(logger, "Adding monitoring for spends of outpoint {} to the filter", output.outpoint);
374					chain_source.register_output(output);
375				}
376			}
377		}
378		Ok(())
379	}
380
381	/// Creates a new `ChainMonitor` used to watch on-chain activity pertaining to channels.
382	///
383	/// When an optional chain source implementing [`chain::Filter`] is provided, the chain monitor
384	/// will call back to it indicating transactions and outputs of interest. This allows clients to
385	/// pre-filter blocks or only fetch blocks matching a compact filter. Otherwise, clients may
386	/// always need to fetch full blocks absent another means for determining which blocks contain
387	/// transactions relevant to the watched channels.
388	pub fn new(chain_source: Option<C>, broadcaster: T, logger: L, feeest: F, persister: P) -> Self {
389		Self {
390			monitors: RwLock::new(new_hash_map()),
391			chain_source,
392			broadcaster,
393			logger,
394			fee_estimator: feeest,
395			persister,
396			pending_monitor_events: Mutex::new(Vec::new()),
397			highest_chain_height: AtomicUsize::new(0),
398			event_notifier: Notifier::new(),
399		}
400	}
401
402	/// Gets the balances in the contained [`ChannelMonitor`]s which are claimable on-chain or
403	/// claims which are awaiting confirmation.
404	///
405	/// Includes the balances from each [`ChannelMonitor`] *except* those included in
406	/// `ignored_channels`.
407	///
408	/// See [`ChannelMonitor::get_claimable_balances`] for more details on the exact criteria for
409	/// inclusion in the return value.
410	pub fn get_claimable_balances(&self, ignored_channels: &[&ChannelDetails]) -> Vec<Balance> {
411		let mut ret = Vec::new();
412		let monitor_states = self.monitors.read().unwrap();
413		for (_, monitor_state) in monitor_states.iter().filter(|(funding_outpoint, _)| {
414			for chan in ignored_channels {
415				if chan.funding_txo.as_ref() == Some(funding_outpoint) {
416					return false;
417				}
418			}
419			true
420		}) {
421			ret.append(&mut monitor_state.monitor.get_claimable_balances());
422		}
423		ret
424	}
425
426	/// Gets the [`LockedChannelMonitor`] for a given funding outpoint, returning an `Err` if no
427	/// such [`ChannelMonitor`] is currently being monitored for.
428	///
429	/// Note that the result holds a mutex over our monitor set, and should not be held
430	/// indefinitely.
431	pub fn get_monitor(&self, funding_txo: OutPoint) -> Result<LockedChannelMonitor<'_, ChannelSigner>, ()> {
432		let lock = self.monitors.read().unwrap();
433		if lock.get(&funding_txo).is_some() {
434			Ok(LockedChannelMonitor { lock, funding_txo })
435		} else {
436			Err(())
437		}
438	}
439
440	/// Lists the funding outpoint and channel ID of each [`ChannelMonitor`] being monitored.
441	///
442	/// Note that [`ChannelMonitor`]s are not removed when a channel is closed as they are always
443	/// monitoring for on-chain state resolutions.
444	pub fn list_monitors(&self) -> Vec<(OutPoint, ChannelId)> {
445		self.monitors.read().unwrap().iter().map(|(outpoint, monitor_holder)| {
446			let channel_id = monitor_holder.monitor.channel_id();
447			(*outpoint, channel_id)
448		}).collect()
449	}
450
451	#[cfg(not(c_bindings))]
452	/// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored).
453	/// Each `Vec<u64>` contains `update_id`s from [`ChannelMonitor::get_latest_update_id`] for updates
454	/// that have not yet been fully persisted. Note that if a full monitor is persisted all the pending
455	/// monitor updates must be individually marked completed by calling [`ChainMonitor::channel_monitor_updated`].
456	pub fn list_pending_monitor_updates(&self) -> HashMap<OutPoint, Vec<u64>> {
457		hash_map_from_iter(self.monitors.read().unwrap().iter().map(|(outpoint, holder)| {
458			(*outpoint, holder.pending_monitor_updates.lock().unwrap().clone())
459		}))
460	}
461
462	#[cfg(c_bindings)]
463	/// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored).
464	/// Each `Vec<u64>` contains `update_id`s from [`ChannelMonitor::get_latest_update_id`] for updates
465	/// that have not yet been fully persisted. Note that if a full monitor is persisted all the pending
466	/// monitor updates must be individually marked completed by calling [`ChainMonitor::channel_monitor_updated`].
467	pub fn list_pending_monitor_updates(&self) -> Vec<(OutPoint, Vec<u64>)> {
468		self.monitors.read().unwrap().iter().map(|(outpoint, holder)| {
469			(*outpoint, holder.pending_monitor_updates.lock().unwrap().clone())
470		}).collect()
471	}
472
473
474	#[cfg(test)]
475	pub fn remove_monitor(&self, funding_txo: &OutPoint) -> ChannelMonitor<ChannelSigner> {
476		self.monitors.write().unwrap().remove(funding_txo).unwrap().monitor
477	}
478
479	/// Indicates the persistence of a [`ChannelMonitor`] has completed after
480	/// [`ChannelMonitorUpdateStatus::InProgress`] was returned from an update operation.
481	///
482	/// Thus, the anticipated use is, at a high level:
483	///  1) This [`ChainMonitor`] calls [`Persist::update_persisted_channel`] which stores the
484	///     update to disk and begins updating any remote (e.g. watchtower/backup) copies,
485	///     returning [`ChannelMonitorUpdateStatus::InProgress`],
486	///  2) once all remote copies are updated, you call this function with [`ChannelMonitor::get_latest_update_id`]
487	///     or [`ChannelMonitorUpdate::update_id`] as the `completed_update_id`, and once all pending
488	///     updates have completed the channel will be re-enabled.
489	///
490	/// It is only necessary to call [`ChainMonitor::channel_monitor_updated`] when you return [`ChannelMonitorUpdateStatus::InProgress`]
491	/// from [`Persist`] and either:
492	///   1. A new [`ChannelMonitor`] was added in [`Persist::persist_new_channel`], or
493	///   2. A [`ChannelMonitorUpdate`] was provided as part of [`Persist::update_persisted_channel`].
494	/// Note that we don't care about calls to [`Persist::update_persisted_channel`] where no
495	/// [`ChannelMonitorUpdate`] was provided.
496	///
497	/// Returns an [`APIError::APIMisuseError`] if `funding_txo` does not match any currently
498	/// registered [`ChannelMonitor`]s.
499	pub fn channel_monitor_updated(&self, funding_txo: OutPoint, completed_update_id: u64) -> Result<(), APIError> {
500		let monitors = self.monitors.read().unwrap();
501		let monitor_data = if let Some(mon) = monitors.get(&funding_txo) { mon } else {
502			return Err(APIError::APIMisuseError { err: format!("No ChannelMonitor matching funding outpoint {:?} found", funding_txo) });
503		};
504		let mut pending_monitor_updates = monitor_data.pending_monitor_updates.lock().unwrap();
505		pending_monitor_updates.retain(|update_id| *update_id != completed_update_id);
506
507		// Note that we only check for pending non-chainsync monitor updates and we don't track monitor
508		// updates resulting from chainsync in `pending_monitor_updates`.
509		let monitor_is_pending_updates = monitor_data.has_pending_updates(&pending_monitor_updates);
510		log_debug!(self.logger, "Completed off-chain monitor update {} for channel with funding outpoint {:?}, {}",
511			completed_update_id,
512			funding_txo,
513			if monitor_is_pending_updates {
514				"still have pending off-chain updates"
515			} else {
516				"all off-chain updates complete, returning a MonitorEvent"
517			});
518		if monitor_is_pending_updates {
519			// If there are still monitor updates pending, we cannot yet construct a
520			// Completed event.
521			return Ok(());
522		}
523		let channel_id = monitor_data.monitor.channel_id();
524		self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed {
525			funding_txo, channel_id,
526			monitor_update_id: monitor_data.monitor.get_latest_update_id(),
527		}], monitor_data.monitor.get_counterparty_node_id()));
528
529		self.event_notifier.notify();
530		Ok(())
531	}
532
533	/// This wrapper avoids having to update some of our tests for now as they assume the direct
534	/// chain::Watch API wherein we mark a monitor fully-updated by just calling
535	/// channel_monitor_updated once with the highest ID.
536	#[cfg(any(test, fuzzing))]
537	pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) {
538		let monitors = self.monitors.read().unwrap();
539		let (counterparty_node_id, channel_id) = if let Some(m) = monitors.get(&funding_txo) {
540			(m.monitor.get_counterparty_node_id(), m.monitor.channel_id())
541		} else {
542			(None, ChannelId::v1_from_funding_outpoint(funding_txo))
543		};
544		self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed {
545			funding_txo,
546			channel_id,
547			monitor_update_id,
548		}], counterparty_node_id));
549		self.event_notifier.notify();
550	}
551
552	#[cfg(any(test, feature = "_test_utils"))]
553	pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
554		use crate::events::EventsProvider;
555		let events = core::cell::RefCell::new(Vec::new());
556		let event_handler = |event: events::Event| Ok(events.borrow_mut().push(event));
557		self.process_pending_events(&event_handler);
558		events.into_inner()
559	}
560
561	/// Processes any events asynchronously in the order they were generated since the last call
562	/// using the given event handler.
563	///
564	/// See the trait-level documentation of [`EventsProvider`] for requirements.
565	///
566	/// [`EventsProvider`]: crate::events::EventsProvider
567	pub async fn process_pending_events_async<Future: core::future::Future<Output = Result<(), ReplayEvent>>, H: Fn(Event) -> Future>(
568		&self, handler: H
569	) {
570		// Sadly we can't hold the monitors read lock through an async call. Thus we have to do a
571		// crazy dance to process a monitor's events then only remove them once we've done so.
572		let mons_to_process = self.monitors.read().unwrap().keys().cloned().collect::<Vec<_>>();
573		for funding_txo in mons_to_process {
574			let mut ev;
575			match super::channelmonitor::process_events_body!(
576				self.monitors.read().unwrap().get(&funding_txo).map(|m| &m.monitor), self.logger, ev, handler(ev).await) {
577				Ok(()) => {},
578				Err(ReplayEvent ()) => {
579					self.event_notifier.notify();
580				}
581			}
582		}
583	}
584
585	/// Gets a [`Future`] that completes when an event is available either via
586	/// [`chain::Watch::release_pending_monitor_events`] or
587	/// [`EventsProvider::process_pending_events`].
588	///
589	/// Note that callbacks registered on the [`Future`] MUST NOT call back into this
590	/// [`ChainMonitor`] and should instead register actions to be taken later.
591	///
592	/// [`EventsProvider::process_pending_events`]: crate::events::EventsProvider::process_pending_events
593	pub fn get_update_future(&self) -> Future {
594		self.event_notifier.get_future()
595	}
596
597	/// Triggers rebroadcasts/fee-bumps of pending claims from a force-closed channel. This is
598	/// crucial in preventing certain classes of pinning attacks, detecting substantial mempool
599	/// feerate changes between blocks, and ensuring reliability if broadcasting fails. We recommend
600	/// invoking this every 30 seconds, or lower if running in an environment with spotty
601	/// connections, like on mobile.
602	pub fn rebroadcast_pending_claims(&self) {
603		let monitors = self.monitors.read().unwrap();
604		for (_, monitor_holder) in &*monitors {
605			monitor_holder.monitor.rebroadcast_pending_claims(
606				&*self.broadcaster, &*self.fee_estimator, &self.logger
607			)
608		}
609	}
610
611	/// Triggers rebroadcasts of pending claims from force-closed channels after a transaction
612	/// signature generation failure.
613	///
614	/// `monitor_opt` can be used as a filter to only trigger them for a specific channel monitor.
615	pub fn signer_unblocked(&self, monitor_opt: Option<OutPoint>) {
616		let monitors = self.monitors.read().unwrap();
617		if let Some(funding_txo) = monitor_opt {
618			if let Some(monitor_holder) = monitors.get(&funding_txo) {
619				monitor_holder.monitor.signer_unblocked(
620					&*self.broadcaster, &*self.fee_estimator, &self.logger
621				)
622			}
623		} else {
624			for (_, monitor_holder) in &*monitors {
625				monitor_holder.monitor.signer_unblocked(
626					&*self.broadcaster, &*self.fee_estimator, &self.logger
627				)
628			}
629		}
630	}
631
632	/// Archives fully resolved channel monitors by calling [`Persist::archive_persisted_channel`].
633	///
634	/// This is useful for pruning fully resolved monitors from the monitor set and primary
635	/// storage so they are not kept in memory and reloaded on restart.
636	///
637	/// Should be called occasionally (once every handful of blocks or on startup).
638	///
639	/// Depending on the implementation of [`Persist::archive_persisted_channel`] the monitor
640	/// data could be moved to an archive location or removed entirely.
641	pub fn archive_fully_resolved_channel_monitors(&self) {
642		let mut have_monitors_to_prune = false;
643		for (funding_txo, monitor_holder) in self.monitors.read().unwrap().iter() {
644			let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor, None);
645			let (is_fully_resolved, needs_persistence) = monitor_holder.monitor.check_and_update_full_resolution_status(&logger);
646			if is_fully_resolved {
647				have_monitors_to_prune = true;
648			}
649			if needs_persistence {
650				self.persister.update_persisted_channel(*funding_txo, None, &monitor_holder.monitor);
651			}
652		}
653		if have_monitors_to_prune {
654			let mut monitors = self.monitors.write().unwrap();
655			monitors.retain(|funding_txo, monitor_holder| {
656				let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor, None);
657				let (is_fully_resolved, _) = monitor_holder.monitor.check_and_update_full_resolution_status(&logger);
658				if is_fully_resolved {
659					log_info!(logger,
660						"Archiving fully resolved ChannelMonitor for funding txo {}",
661						funding_txo
662					);
663					self.persister.archive_persisted_channel(*funding_txo);
664					false
665				} else {
666					true
667				}
668			});
669		}
670	}
671}
672
673impl<ChannelSigner: EcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
674chain::Listen for ChainMonitor<ChannelSigner, C, T, F, L, P>
675where
676	C::Target: chain::Filter,
677	T::Target: BroadcasterInterface,
678	F::Target: FeeEstimator,
679	L::Target: Logger,
680	P::Target: Persist<ChannelSigner>,
681{
682	fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
683		log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height);
684		self.process_chain_data(header, Some(height), &txdata, |monitor, txdata| {
685			monitor.block_connected(
686				header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &self.logger)
687		});
688		// Assume we may have some new events and wake the event processor
689		self.event_notifier.notify();
690	}
691
692	fn block_disconnected(&self, header: &Header, height: u32) {
693		let monitor_states = self.monitors.read().unwrap();
694		log_debug!(self.logger, "Latest block {} at height {} removed via block_disconnected", header.block_hash(), height);
695		for monitor_state in monitor_states.values() {
696			monitor_state.monitor.block_disconnected(
697				header, height, &*self.broadcaster, &*self.fee_estimator, &self.logger);
698		}
699	}
700}
701
702impl<ChannelSigner: EcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
703chain::Confirm for ChainMonitor<ChannelSigner, C, T, F, L, P>
704where
705	C::Target: chain::Filter,
706	T::Target: BroadcasterInterface,
707	F::Target: FeeEstimator,
708	L::Target: Logger,
709	P::Target: Persist<ChannelSigner>,
710{
711	fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) {
712		log_debug!(self.logger, "{} provided transactions confirmed at height {} in block {}", txdata.len(), height, header.block_hash());
713		self.process_chain_data(header, None, txdata, |monitor, txdata| {
714			monitor.transactions_confirmed(
715				header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &self.logger)
716		});
717		// Assume we may have some new events and wake the event processor
718		self.event_notifier.notify();
719	}
720
721	fn transaction_unconfirmed(&self, txid: &Txid) {
722		log_debug!(self.logger, "Transaction {} reorganized out of chain", txid);
723		let monitor_states = self.monitors.read().unwrap();
724		for monitor_state in monitor_states.values() {
725			monitor_state.monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &self.logger);
726		}
727	}
728
729	fn best_block_updated(&self, header: &Header, height: u32) {
730		log_debug!(self.logger, "New best block {} at height {} provided via best_block_updated", header.block_hash(), height);
731		self.process_chain_data(header, Some(height), &[], |monitor, txdata| {
732			// While in practice there shouldn't be any recursive calls when given empty txdata,
733			// it's still possible if a chain::Filter implementation returns a transaction.
734			debug_assert!(txdata.is_empty());
735			monitor.best_block_updated(
736				header, height, &*self.broadcaster, &*self.fee_estimator, &self.logger
737			)
738		});
739		// Assume we may have some new events and wake the event processor
740		self.event_notifier.notify();
741	}
742
743	fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option<BlockHash>)> {
744		let mut txids = Vec::new();
745		let monitor_states = self.monitors.read().unwrap();
746		for monitor_state in monitor_states.values() {
747			txids.append(&mut monitor_state.monitor.get_relevant_txids());
748		}
749
750		txids.sort_unstable_by(|a, b| a.0.cmp(&b.0).then(b.1.cmp(&a.1)));
751		txids.dedup_by_key(|(txid, _, _)| *txid);
752		txids
753	}
754}
755
756impl<ChannelSigner: EcdsaChannelSigner, C: Deref , T: Deref , F: Deref , L: Deref , P: Deref >
757chain::Watch<ChannelSigner> for ChainMonitor<ChannelSigner, C, T, F, L, P>
758where C::Target: chain::Filter,
759	    T::Target: BroadcasterInterface,
760	    F::Target: FeeEstimator,
761	    L::Target: Logger,
762	    P::Target: Persist<ChannelSigner>,
763{
764	fn watch_channel(&self, funding_outpoint: OutPoint, monitor: ChannelMonitor<ChannelSigner>) -> Result<ChannelMonitorUpdateStatus, ()> {
765		let logger = WithChannelMonitor::from(&self.logger, &monitor, None);
766		let mut monitors = self.monitors.write().unwrap();
767		let entry = match monitors.entry(funding_outpoint) {
768			hash_map::Entry::Occupied(_) => {
769				log_error!(logger, "Failed to add new channel data: channel monitor for given outpoint is already present");
770				return Err(());
771			},
772			hash_map::Entry::Vacant(e) => e,
773		};
774		log_trace!(logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor));
775		let update_id = monitor.get_latest_update_id();
776		let mut pending_monitor_updates = Vec::new();
777		let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor);
778		match persist_res {
779			ChannelMonitorUpdateStatus::InProgress => {
780				log_info!(logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor));
781				pending_monitor_updates.push(update_id);
782			},
783			ChannelMonitorUpdateStatus::Completed => {
784				log_info!(logger, "Persistence of new ChannelMonitor for channel {} completed", log_funding_info!(monitor));
785			},
786			ChannelMonitorUpdateStatus::UnrecoverableError => {
787				let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
788				log_error!(logger, "{}", err_str);
789				panic!("{}", err_str);
790			},
791		}
792		if let Some(ref chain_source) = self.chain_source {
793			monitor.load_outputs_to_watch(chain_source , &self.logger);
794		}
795		entry.insert(MonitorHolder {
796			monitor,
797			pending_monitor_updates: Mutex::new(pending_monitor_updates),
798		});
799		Ok(persist_res)
800	}
801
802	fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus {
803		// `ChannelMonitorUpdate`'s `channel_id` is `None` prior to 0.0.121 and all channels in those
804		// versions are V1-established. For 0.0.121+ the `channel_id` fields is always `Some`.
805		let channel_id = update.channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo));
806		// Update the monitor that watches the channel referred to by the given outpoint.
807		let monitors = self.monitors.read().unwrap();
808		match monitors.get(&funding_txo) {
809			None => {
810				let logger = WithContext::from(&self.logger, update.counterparty_node_id, Some(channel_id), None);
811				log_error!(logger, "Failed to update channel monitor: no such monitor registered");
812
813				// We should never ever trigger this from within ChannelManager. Technically a
814				// user could use this object with some proxying in between which makes this
815				// possible, but in tests and fuzzing, this should be a panic.
816				#[cfg(debug_assertions)]
817				panic!("ChannelManager generated a channel update for a channel that was not yet registered!");
818				#[cfg(not(debug_assertions))]
819				ChannelMonitorUpdateStatus::InProgress
820			},
821			Some(monitor_state) => {
822				let monitor = &monitor_state.monitor;
823				let logger = WithChannelMonitor::from(&self.logger, &monitor, None);
824				log_trace!(logger, "Updating ChannelMonitor to id {} for channel {}", update.update_id, log_funding_info!(monitor));
825
826				// We hold a `pending_monitor_updates` lock through `update_monitor` to ensure we
827				// have well-ordered updates from the users' point of view. See the
828				// `pending_monitor_updates` docs for more.
829				let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
830				let update_res = monitor.update_monitor(update, &self.broadcaster, &self.fee_estimator, &self.logger);
831
832				let update_id = update.update_id;
833				let persist_res = if update_res.is_err() {
834					// Even if updating the monitor returns an error, the monitor's state will
835					// still be changed. Therefore, we should persist the updated monitor despite the error.
836					// We don't want to persist a `monitor_update` which results in a failure to apply later
837					// while reading `channel_monitor` with updates from storage. Instead, we should persist
838					// the entire `channel_monitor` here.
839					log_warn!(logger, "Failed to update ChannelMonitor for channel {}. Going ahead and persisting the entire ChannelMonitor", log_funding_info!(monitor));
840					self.persister.update_persisted_channel(funding_txo, None, monitor)
841				} else {
842					self.persister.update_persisted_channel(funding_txo, Some(update), monitor)
843				};
844				match persist_res {
845					ChannelMonitorUpdateStatus::InProgress => {
846						pending_monitor_updates.push(update_id);
847						log_debug!(logger,
848							"Persistence of ChannelMonitorUpdate id {:?} for channel {} in progress",
849							update_id,
850							log_funding_info!(monitor)
851						);
852					},
853					ChannelMonitorUpdateStatus::Completed => {
854						log_debug!(logger,
855							"Persistence of ChannelMonitorUpdate id {:?} for channel {} completed",
856							update_id,
857							log_funding_info!(monitor)
858						);
859					},
860					ChannelMonitorUpdateStatus::UnrecoverableError => {
861						// Take the monitors lock for writing so that we poison it and any future
862						// operations going forward fail immediately.
863						core::mem::drop(pending_monitor_updates);
864						core::mem::drop(monitors);
865						let _poison = self.monitors.write().unwrap();
866						let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
867						log_error!(logger, "{}", err_str);
868						panic!("{}", err_str);
869					},
870				}
871				if update_res.is_err() {
872					ChannelMonitorUpdateStatus::InProgress
873				} else {
874					persist_res
875				}
876			}
877		}
878	}
879
880	fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
881		let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
882		for monitor_state in self.monitors.read().unwrap().values() {
883			let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events();
884			if monitor_events.len() > 0 {
885				let monitor_outpoint = monitor_state.monitor.get_funding_txo().0;
886				let monitor_channel_id = monitor_state.monitor.channel_id();
887				let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id();
888				pending_monitor_events.push((monitor_outpoint, monitor_channel_id, monitor_events, counterparty_node_id));
889			}
890		}
891		pending_monitor_events
892	}
893}
894
895impl<ChannelSigner: EcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> events::EventsProvider for ChainMonitor<ChannelSigner, C, T, F, L, P>
896	where C::Target: chain::Filter,
897	      T::Target: BroadcasterInterface,
898	      F::Target: FeeEstimator,
899	      L::Target: Logger,
900	      P::Target: Persist<ChannelSigner>,
901{
902	/// Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity.
903	///
904	/// For channels featuring anchor outputs, this method will also process [`BumpTransaction`]
905	/// events produced from each [`ChannelMonitor`] while there is a balance to claim onchain
906	/// within each channel. As the confirmation of a commitment transaction may be critical to the
907	/// safety of funds, we recommend invoking this every 30 seconds, or lower if running in an
908	/// environment with spotty connections, like on mobile.
909	///
910	/// An [`EventHandler`] may safely call back to the provider, though this shouldn't be needed in
911	/// order to handle these events.
912	///
913	/// [`SpendableOutputs`]: events::Event::SpendableOutputs
914	/// [`BumpTransaction`]: events::Event::BumpTransaction
915	fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
916		for monitor_state in self.monitors.read().unwrap().values() {
917			match monitor_state.monitor.process_pending_events(&handler, &self.logger) {
918				Ok(()) => {},
919				Err(ReplayEvent ()) => {
920					self.event_notifier.notify();
921				}
922			}
923		}
924	}
925}
926
927#[cfg(test)]
928mod tests {
929	use crate::{check_added_monitors, check_closed_event};
930	use crate::{expect_payment_path_successful, get_event_msg};
931	use crate::{get_htlc_update_msgs, get_revoke_commit_msgs};
932	use crate::chain::{ChannelMonitorUpdateStatus, Watch};
933	use crate::chain::channelmonitor::ANTI_REORG_DELAY;
934	use crate::events::{ClosureReason, Event, MessageSendEvent, MessageSendEventsProvider};
935	use crate::ln::functional_test_utils::*;
936	use crate::ln::msgs::ChannelMessageHandler;
937
938	const CHAINSYNC_MONITOR_PARTITION_FACTOR: u32 = 5;
939
940	#[test]
941	fn test_async_ooo_offchain_updates() {
942		// Test that if we have multiple offchain updates being persisted and they complete
943		// out-of-order, the ChainMonitor waits until all have completed before informing the
944		// ChannelManager.
945		let chanmon_cfgs = create_chanmon_cfgs(2);
946		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
947		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
948		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
949		create_announced_chan_between_nodes(&nodes, 0, 1);
950
951		// Route two payments to be claimed at the same time.
952		let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
953		let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
954
955		chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clear();
956		chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
957		chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
958
959		nodes[1].node.claim_funds(payment_preimage_1);
960		check_added_monitors!(nodes[1], 1);
961		nodes[1].node.claim_funds(payment_preimage_2);
962		check_added_monitors!(nodes[1], 1);
963
964		let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone();
965		assert_eq!(persistences.len(), 1);
966		let (funding_txo, updates) = persistences.iter().next().unwrap();
967		assert_eq!(updates.len(), 2);
968
969		// Note that updates is a HashMap so the ordering here is actually random. This shouldn't
970		// fail either way but if it fails intermittently it's depending on the ordering of updates.
971		let mut update_iter = updates.iter();
972		let next_update = update_iter.next().unwrap().clone();
973		// Should contain next_update when pending updates listed.
974		#[cfg(not(c_bindings))]
975		assert!(nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(funding_txo)
976			.unwrap().contains(&next_update));
977		#[cfg(c_bindings)]
978		assert!(nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().iter()
979			.find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update));
980		nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, next_update.clone()).unwrap();
981		// Should not contain the previously pending next_update when pending updates listed.
982		#[cfg(not(c_bindings))]
983		assert!(!nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(funding_txo)
984			.unwrap().contains(&next_update));
985		#[cfg(c_bindings)]
986		assert!(!nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().iter()
987			.find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update));
988		assert!(nodes[1].chain_monitor.release_pending_monitor_events().is_empty());
989		assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
990		assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
991		nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, update_iter.next().unwrap().clone()).unwrap();
992
993		let claim_events = nodes[1].node.get_and_clear_pending_events();
994		assert_eq!(claim_events.len(), 2);
995		match claim_events[0] {
996			Event::PaymentClaimed { ref payment_hash, amount_msat: 1_000_000, .. } => {
997				assert_eq!(payment_hash_1, *payment_hash);
998			},
999			_ => panic!("Unexpected event"),
1000		}
1001		match claim_events[1] {
1002			Event::PaymentClaimed { ref payment_hash, amount_msat: 1_000_000, .. } => {
1003				assert_eq!(payment_hash_2, *payment_hash);
1004			},
1005			_ => panic!("Unexpected event"),
1006		}
1007
1008		// Now manually walk the commitment signed dance - because we claimed two payments
1009		// back-to-back it doesn't fit into the neat walk commitment_signed_dance does.
1010
1011		let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1012		nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1013		expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
1014		nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &updates.commitment_signed);
1015		check_added_monitors!(nodes[0], 1);
1016		let (as_first_raa, as_first_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1017
1018		nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa);
1019		check_added_monitors!(nodes[1], 1);
1020		let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1021		nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_first_update);
1022		check_added_monitors!(nodes[1], 1);
1023		let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1024
1025		nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
1026		expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false);
1027		nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
1028		check_added_monitors!(nodes[0], 1);
1029		nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa);
1030		expect_payment_path_successful!(nodes[0]);
1031		check_added_monitors!(nodes[0], 1);
1032		let (as_second_raa, as_second_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1033
1034		nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa);
1035		check_added_monitors!(nodes[1], 1);
1036		nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_second_update);
1037		check_added_monitors!(nodes[1], 1);
1038		let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1039
1040		nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_raa);
1041		expect_payment_path_successful!(nodes[0]);
1042		check_added_monitors!(nodes[0], 1);
1043	}
1044
1045	#[test]
1046	fn test_chainsync_triggers_distributed_monitor_persistence() {
1047		let chanmon_cfgs = create_chanmon_cfgs(3);
1048		let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1049		let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1050		let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1051
1052		// Use FullBlockViaListen to avoid duplicate calls to process_chain_data and skips_blocks() in
1053		// case of other connect_styles.
1054		*nodes[0].connect_style.borrow_mut() = ConnectStyle::FullBlockViaListen;
1055		*nodes[1].connect_style.borrow_mut() = ConnectStyle::FullBlockViaListen;
1056		*nodes[2].connect_style.borrow_mut() = ConnectStyle::FullBlockViaListen;
1057
1058		let _channel_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1059		let channel_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1_000_000, 0).2;
1060
1061		chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
1062		chanmon_cfgs[1].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
1063		chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
1064
1065		connect_blocks(&nodes[0], CHAINSYNC_MONITOR_PARTITION_FACTOR * 2);
1066		connect_blocks(&nodes[1], CHAINSYNC_MONITOR_PARTITION_FACTOR * 2);
1067		connect_blocks(&nodes[2], CHAINSYNC_MONITOR_PARTITION_FACTOR * 2);
1068
1069		// Connecting [`DEFAULT_CHAINSYNC_PARTITION_FACTOR`] * 2 blocks should trigger only 2 writes
1070		// per monitor/channel.
1071		assert_eq!(2 * 2, chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().len());
1072		assert_eq!(2, chanmon_cfgs[1].persister.chain_sync_monitor_persistences.lock().unwrap().len());
1073		assert_eq!(2, chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().len());
1074
1075		// Test that monitors with pending_claims are persisted on every block.
1076		// Now, close channel_2 i.e. b/w node-0 and node-2 to create pending_claim in node[0].
1077		nodes[0].node.force_close_broadcasting_latest_txn(&channel_2, &nodes[2].node.get_our_node_id(), "Channel force-closed".to_string()).unwrap();
1078		check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false,
1079			[nodes[2].node.get_our_node_id()], 1000000);
1080		check_closed_broadcast(&nodes[0], 1, true);
1081		let close_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
1082		assert_eq!(close_tx.len(), 1);
1083
1084		mine_transaction(&nodes[2], &close_tx[0]);
1085		check_added_monitors(&nodes[2], 1);
1086		check_closed_broadcast(&nodes[2], 1, true);
1087		check_closed_event!(&nodes[2], 1, ClosureReason::CommitmentTxConfirmed, false,
1088			[nodes[0].node.get_our_node_id()], 1000000);
1089
1090		chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
1091		chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
1092
1093		// For channel_2, there should be a monitor write for every block connection.
1094		// We connect [`DEFAULT_CHAINSYNC_MONITOR_PARTITION_FACTOR`] blocks since we don't know when
1095		// channel_1 monitor persistence will occur, with [`DEFAULT_CHAINSYNC_MONITOR_PARTITION_FACTOR`]
1096		// it will be persisted exactly once.
1097		connect_blocks(&nodes[0], CHAINSYNC_MONITOR_PARTITION_FACTOR);
1098		connect_blocks(&nodes[2], CHAINSYNC_MONITOR_PARTITION_FACTOR);
1099
1100		// DEFAULT_CHAINSYNC_MONITOR_PARTITION_FACTOR writes for channel_2 due to pending_claim, 1 for
1101		// channel_1
1102		assert_eq!((CHAINSYNC_MONITOR_PARTITION_FACTOR + 1) as usize, chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().len());
1103		// For node[2], there is no pending_claim
1104		assert_eq!(1, chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().len());
1105
1106		// Confirm claim for node[0] with ANTI_REORG_DELAY and reset monitor write counter.
1107		mine_transaction(&nodes[0], &close_tx[0]);
1108		connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
1109		check_added_monitors(&nodes[0], 1);
1110		chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
1111
1112		// Again connect 1 full cycle of DEFAULT_CHAINSYNC_MONITOR_PARTITION_FACTOR blocks, it should only
1113		// result in 1 write per monitor/channel.
1114		connect_blocks(&nodes[0], CHAINSYNC_MONITOR_PARTITION_FACTOR);
1115		assert_eq!(2, chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().len());
1116	}
1117
1118	#[test]
1119	#[cfg(feature = "std")]
1120	fn update_during_chainsync_poisons_channel() {
1121		let chanmon_cfgs = create_chanmon_cfgs(2);
1122		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1123		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1124		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1125		create_announced_chan_between_nodes(&nodes, 0, 1);
1126		*nodes[0].connect_style.borrow_mut() = ConnectStyle::FullBlockViaListen;
1127
1128		chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::UnrecoverableError);
1129
1130		assert!(std::panic::catch_unwind(|| {
1131			// Returning an UnrecoverableError should always panic immediately
1132			// Connecting [`DEFAULT_CHAINSYNC_PARTITION_FACTOR`] blocks so that we trigger some persistence
1133			// after accounting for block-height based partitioning/distribution.
1134			connect_blocks(&nodes[0], CHAINSYNC_MONITOR_PARTITION_FACTOR);
1135		}).is_err());
1136		assert!(std::panic::catch_unwind(|| {
1137			// ...and also poison our locks causing later use to panic as well
1138			core::mem::drop(nodes);
1139		}).is_err());
1140	}
1141}
1142