use core::cmp;
use core::convert::{TryFrom, TryInto};
use core::ops::Deref;
use core::str::FromStr;
use bitcoin::{BlockHash, Txid};
use crate::{io, log_error};
use crate::alloc::string::ToString;
use crate::prelude::*;
use crate::chain;
use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
use crate::chain::chainmonitor::{Persist, MonitorUpdateId};
use crate::sign::{EntropySource, NodeSigner, ecdsa::WriteableEcdsaChannelSigner, SignerProvider};
use crate::chain::transaction::OutPoint;
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, CLOSED_CHANNEL_UPDATE_ID};
use crate::ln::channelmanager::ChannelManager;
use crate::routing::router::Router;
use crate::routing::gossip::NetworkGraph;
use crate::routing::scoring::WriteableScore;
use crate::util::logger::Logger;
use crate::util::ser::{Readable, ReadableArgs, Writeable};
pub const KVSTORE_NAMESPACE_KEY_ALPHABET: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-";
pub const KVSTORE_NAMESPACE_KEY_MAX_LEN: usize = 120;
pub const CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
pub const CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
pub const CHANNEL_MANAGER_PERSISTENCE_KEY: &str = "manager";
pub const CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitors";
pub const CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
pub const CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitor_updates";
pub const NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
pub const NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
pub const NETWORK_GRAPH_PERSISTENCE_KEY: &str = "network_graph";
pub const SCORER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
pub const SCORER_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
pub const SCORER_PERSISTENCE_KEY: &str = "scorer";
pub const MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL: &[u8] = &[0xFF; 2];
pub trait KVStore {
fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> Result<Vec<u8>, io::Error>;
fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> Result<(), io::Error>;
fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> Result<(), io::Error>;
fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> Result<Vec<String>, io::Error>;
}
pub trait Persister<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>>
where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: 'static + BroadcasterInterface,
ES::Target: 'static + EntropySource,
NS::Target: 'static + NodeSigner,
SP::Target: 'static + SignerProvider,
F::Target: 'static + FeeEstimator,
R::Target: 'static + Router,
L::Target: 'static + Logger,
{
fn persist_manager(&self, channel_manager: &ChannelManager<M, T, ES, NS, SP, F, R, L>) -> Result<(), io::Error>;
fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error>;
fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error>;
}
impl<'a, A: KVStore, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, M, T, ES, NS, SP, F, R, L, S> for A
where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: 'static + BroadcasterInterface,
ES::Target: 'static + EntropySource,
NS::Target: 'static + NodeSigner,
SP::Target: 'static + SignerProvider,
F::Target: 'static + FeeEstimator,
R::Target: 'static + Router,
L::Target: 'static + Logger,
{
fn persist_manager(&self, channel_manager: &ChannelManager<M, T, ES, NS, SP, F, R, L>) -> Result<(), io::Error> {
self.write(CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE,
CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE,
CHANNEL_MANAGER_PERSISTENCE_KEY,
&channel_manager.encode())
}
fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error> {
self.write(NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE,
NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE,
NETWORK_GRAPH_PERSISTENCE_KEY,
&network_graph.encode())
}
fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error> {
self.write(SCORER_PERSISTENCE_PRIMARY_NAMESPACE,
SCORER_PERSISTENCE_SECONDARY_NAMESPACE,
SCORER_PERSISTENCE_KEY,
&scorer.encode())
}
}
impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore> Persist<ChannelSigner> for K {
fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
match self.write(
CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
&key, &monitor.encode())
{
Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError
}
}
fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
match self.write(
CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
&key, &monitor.encode())
{
Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError
}
}
}
pub fn read_channel_monitors<K: Deref, ES: Deref, SP: Deref>(
kv_store: K, entropy_source: ES, signer_provider: SP,
) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>, io::Error>
where
K::Target: KVStore,
ES::Target: EntropySource + Sized,
SP::Target: SignerProvider + Sized,
{
let mut res = Vec::new();
for stored_key in kv_store.list(
CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE)?
{
if stored_key.len() < 66 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Stored key has invalid length"));
}
let txid = Txid::from_str(stored_key.split_at(64).0).map_err(|_| {
io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
})?;
let index: u16 = stored_key.split_at(65).1.parse().map_err(|_| {
io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
})?;
match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>::read(
&mut io::Cursor::new(
kv_store.read(CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, &stored_key)?),
(&*entropy_source, &*signer_provider),
) {
Ok((block_hash, channel_monitor)) => {
if channel_monitor.get_funding_txo().0.txid != txid
|| channel_monitor.get_funding_txo().0.index != index
{
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"ChannelMonitor was stored under the wrong key",
));
}
res.push((block_hash, channel_monitor));
}
Err(_) => {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Failed to read ChannelMonitor"
))
}
}
}
Ok(res)
}
pub struct MonitorUpdatingPersister<K: Deref, L: Deref, ES: Deref, SP: Deref>
where
K::Target: KVStore,
L::Target: Logger,
ES::Target: EntropySource + Sized,
SP::Target: SignerProvider + Sized,
{
kv_store: K,
logger: L,
maximum_pending_updates: u64,
entropy_source: ES,
signer_provider: SP,
}
#[allow(dead_code)]
impl<K: Deref, L: Deref, ES: Deref, SP: Deref>
MonitorUpdatingPersister<K, L, ES, SP>
where
K::Target: KVStore,
L::Target: Logger,
ES::Target: EntropySource + Sized,
SP::Target: SignerProvider + Sized,
{
pub fn new(
kv_store: K, logger: L, maximum_pending_updates: u64, entropy_source: ES,
signer_provider: SP,
) -> Self {
MonitorUpdatingPersister {
kv_store,
logger,
maximum_pending_updates,
entropy_source,
signer_provider,
}
}
pub fn read_all_channel_monitors_with_updates<B: Deref, F: Deref>(
&self, broadcaster: &B, fee_estimator: &F,
) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>, io::Error>
where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
{
let monitor_list = self.kv_store.list(
CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
)?;
let mut res = Vec::with_capacity(monitor_list.len());
for monitor_key in monitor_list {
res.push(self.read_channel_monitor_with_updates(
broadcaster,
fee_estimator,
monitor_key,
)?)
}
Ok(res)
}
pub fn read_channel_monitor_with_updates<B: Deref, F: Deref>(
&self, broadcaster: &B, fee_estimator: &F, monitor_key: String,
) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), io::Error>
where
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
{
let monitor_name = MonitorName::new(monitor_key)?;
let (block_hash, monitor) = self.read_monitor(&monitor_name)?;
let mut current_update_id = monitor.get_latest_update_id();
loop {
current_update_id = match current_update_id.checked_add(1) {
Some(next_update_id) => next_update_id,
None => break,
};
let update_name = UpdateName::from(current_update_id);
let update = match self.read_monitor_update(&monitor_name, &update_name) {
Ok(update) => update,
Err(err) if err.kind() == io::ErrorKind::NotFound => {
break;
}
Err(err) => return Err(err),
};
monitor.update_monitor(&update, broadcaster, fee_estimator, &self.logger)
.map_err(|e| {
log_error!(
self.logger,
"Monitor update failed. monitor: {} update: {} reason: {:?}",
monitor_name.as_str(),
update_name.as_str(),
e
);
io::Error::new(io::ErrorKind::Other, "Monitor update failed")
})?;
}
Ok((block_hash, monitor))
}
fn read_monitor(
&self, monitor_name: &MonitorName,
) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), io::Error> {
let outpoint: OutPoint = monitor_name.try_into()?;
let mut monitor_cursor = io::Cursor::new(self.kv_store.read(
CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
monitor_name.as_str(),
)?);
if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) {
monitor_cursor.set_position(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() as u64);
}
match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>::read(
&mut monitor_cursor,
(&*self.entropy_source, &*self.signer_provider),
) {
Ok((blockhash, channel_monitor)) => {
if channel_monitor.get_funding_txo().0.txid != outpoint.txid
|| channel_monitor.get_funding_txo().0.index != outpoint.index
{
log_error!(
self.logger,
"ChannelMonitor {} was stored under the wrong key!",
monitor_name.as_str()
);
Err(io::Error::new(
io::ErrorKind::InvalidData,
"ChannelMonitor was stored under the wrong key",
))
} else {
Ok((blockhash, channel_monitor))
}
}
Err(e) => {
log_error!(
self.logger,
"Failed to read ChannelMonitor {}, reason: {}",
monitor_name.as_str(),
e,
);
Err(io::Error::new(io::ErrorKind::InvalidData, "Failed to read ChannelMonitor"))
}
}
}
fn read_monitor_update(
&self, monitor_name: &MonitorName, update_name: &UpdateName,
) -> Result<ChannelMonitorUpdate, io::Error> {
let update_bytes = self.kv_store.read(
CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
monitor_name.as_str(),
update_name.as_str(),
)?;
ChannelMonitorUpdate::read(&mut io::Cursor::new(update_bytes)).map_err(|e| {
log_error!(
self.logger,
"Failed to read ChannelMonitorUpdate {}/{}/{}, reason: {}",
CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
monitor_name.as_str(),
update_name.as_str(),
e,
);
io::Error::new(io::ErrorKind::InvalidData, "Failed to read ChannelMonitorUpdate")
})
}
pub fn cleanup_stale_updates(&self, lazy: bool) -> Result<(), io::Error> {
let monitor_keys = self.kv_store.list(
CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
)?;
for monitor_key in monitor_keys {
let monitor_name = MonitorName::new(monitor_key)?;
let (_, current_monitor) = self.read_monitor(&monitor_name)?;
let updates = self
.kv_store
.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str())?;
for update in updates {
let update_name = UpdateName::new(update)?;
if update_name.0 <= current_monitor.get_latest_update_id() {
self.kv_store.remove(
CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
monitor_name.as_str(),
update_name.as_str(),
lazy,
)?;
}
}
}
Ok(())
}
}
impl<ChannelSigner: WriteableEcdsaChannelSigner, K: Deref, L: Deref, ES: Deref, SP: Deref>
Persist<ChannelSigner> for MonitorUpdatingPersister<K, L, ES, SP>
where
K::Target: KVStore,
L::Target: Logger,
ES::Target: EntropySource + Sized,
SP::Target: SignerProvider + Sized,
{
fn persist_new_channel(
&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>,
_monitor_update_call_id: MonitorUpdateId,
) -> chain::ChannelMonitorUpdateStatus {
let monitor_name = MonitorName::from(funding_txo);
let mut monitor_bytes = Vec::with_capacity(
MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() + monitor.serialized_length(),
);
monitor_bytes.extend_from_slice(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL);
monitor.write(&mut monitor_bytes).unwrap();
match self.kv_store.write(
CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
monitor_name.as_str(),
&monitor_bytes,
) {
Ok(_) => {
chain::ChannelMonitorUpdateStatus::Completed
}
Err(e) => {
log_error!(
self.logger,
"Failed to write ChannelMonitor {}/{}/{} reason: {}",
CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
monitor_name.as_str(),
e
);
chain::ChannelMonitorUpdateStatus::UnrecoverableError
}
}
}
fn update_persisted_channel(
&self, funding_txo: OutPoint, update: Option<&ChannelMonitorUpdate>,
monitor: &ChannelMonitor<ChannelSigner>, monitor_update_call_id: MonitorUpdateId,
) -> chain::ChannelMonitorUpdateStatus {
if let Some(update) = update {
if update.update_id != CLOSED_CHANNEL_UPDATE_ID
&& update.update_id % self.maximum_pending_updates != 0
{
let monitor_name = MonitorName::from(funding_txo);
let update_name = UpdateName::from(update.update_id);
match self.kv_store.write(
CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
monitor_name.as_str(),
update_name.as_str(),
&update.encode(),
) {
Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
Err(e) => {
log_error!(
self.logger,
"Failed to write ChannelMonitorUpdate {}/{}/{} reason: {}",
CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
monitor_name.as_str(),
update_name.as_str(),
e
);
chain::ChannelMonitorUpdateStatus::UnrecoverableError
}
}
} else {
let monitor_name = MonitorName::from(funding_txo);
let maybe_old_monitor = match monitor.get_latest_update_id() {
CLOSED_CHANNEL_UPDATE_ID => self.read_monitor(&monitor_name).ok(),
_ => None
};
let monitor_update_status = self.persist_new_channel(funding_txo, monitor, monitor_update_call_id);
if let chain::ChannelMonitorUpdateStatus::Completed = monitor_update_status {
let cleanup_range = if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
maybe_old_monitor.map(|(_, ref old_monitor)| {
let start = old_monitor.get_latest_update_id();
let end = cmp::min(
start.saturating_add(self.maximum_pending_updates),
CLOSED_CHANNEL_UPDATE_ID - 1,
);
(start, end)
})
} else {
let end = monitor.get_latest_update_id();
let start = end.saturating_sub(self.maximum_pending_updates);
Some((start, end))
};
if let Some((start, end)) = cleanup_range {
self.cleanup_in_range(monitor_name, start, end);
}
}
monitor_update_status
}
} else {
self.persist_new_channel(funding_txo, monitor, monitor_update_call_id)
}
}
}
impl<K: Deref, L: Deref, ES: Deref, SP: Deref> MonitorUpdatingPersister<K, L, ES, SP>
where
ES::Target: EntropySource + Sized,
K::Target: KVStore,
L::Target: Logger,
SP::Target: SignerProvider + Sized
{
fn cleanup_in_range(&self, monitor_name: MonitorName, start: u64, end: u64) {
for update_id in start..=end {
let update_name = UpdateName::from(update_id);
if let Err(e) = self.kv_store.remove(
CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
monitor_name.as_str(),
update_name.as_str(),
true,
) {
log_error!(
self.logger,
"Failed to clean up channel monitor updates for monitor {}, reason: {}",
monitor_name.as_str(),
e
);
};
}
}
}
#[derive(Debug)]
struct MonitorName(String);
impl MonitorName {
pub fn new(name: String) -> Result<Self, io::Error> {
MonitorName::do_try_into_outpoint(&name)?;
Ok(Self(name))
}
pub fn as_str(&self) -> &str {
&self.0
}
fn do_try_into_outpoint(name: &str) -> Result<OutPoint, io::Error> {
let mut parts = name.splitn(2, '_');
let txid = if let Some(part) = parts.next() {
Txid::from_str(part).map_err(|_| {
io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
})?
} else {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Stored monitor key is not a splittable string",
));
};
let index = if let Some(part) = parts.next() {
part.parse().map_err(|_| {
io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
})?
} else {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"No tx index value found after underscore in stored key",
));
};
Ok(OutPoint { txid, index })
}
}
impl TryFrom<&MonitorName> for OutPoint {
type Error = io::Error;
fn try_from(value: &MonitorName) -> Result<Self, io::Error> {
MonitorName::do_try_into_outpoint(&value.0)
}
}
impl From<OutPoint> for MonitorName {
fn from(value: OutPoint) -> Self {
MonitorName(format!("{}_{}", value.txid.to_string(), value.index))
}
}
#[derive(Debug)]
struct UpdateName(u64, String);
impl UpdateName {
pub fn new(name: String) -> Result<Self, io::Error> {
match name.parse::<u64>() {
Ok(u) => Ok(u.into()),
Err(_) => {
Err(io::Error::new(io::ErrorKind::InvalidData, "cannot parse u64 from update name"))
}
}
}
pub fn as_str(&self) -> &str {
&self.1
}
}
impl From<u64> for UpdateName {
fn from(value: u64) -> Self {
Self(value, value.to_string())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::chain::chainmonitor::Persist;
use crate::chain::ChannelMonitorUpdateStatus;
use crate::events::{ClosureReason, MessageSendEventsProvider};
use crate::ln::functional_test_utils::*;
use crate::util::test_utils::{self, TestLogger, TestStore};
use crate::{check_added_monitors, check_closed_broadcast};
const EXPECTED_UPDATES_PER_PAYMENT: u64 = 5;
#[test]
fn converts_u64_to_update_name() {
assert_eq!(UpdateName::from(0).as_str(), "0");
assert_eq!(UpdateName::from(21).as_str(), "21");
assert_eq!(UpdateName::from(u64::MAX).as_str(), "18446744073709551615");
}
#[test]
fn bad_update_name_fails() {
assert!(UpdateName::new("deadbeef".to_string()).is_err());
assert!(UpdateName::new("-1".to_string()).is_err());
}
#[test]
fn monitor_from_outpoint_works() {
let monitor_name1 = MonitorName::from(OutPoint {
txid: Txid::from_str("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef").unwrap(),
index: 1,
});
assert_eq!(monitor_name1.as_str(), "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1");
let monitor_name2 = MonitorName::from(OutPoint {
txid: Txid::from_str("f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef").unwrap(),
index: u16::MAX,
});
assert_eq!(monitor_name2.as_str(), "f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef_65535");
}
#[test]
fn bad_monitor_string_fails() {
assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef".to_string()).is_err());
assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_65536".to_string()).is_err());
assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_21".to_string()).is_err());
}
#[test]
fn persister_with_real_monitors() {
let persister_0_max_pending_updates = 7;
let persister_1_max_pending_updates = 3;
let chanmon_cfgs = create_chanmon_cfgs(4);
let persister_0 = MonitorUpdatingPersister {
kv_store: &TestStore::new(false),
logger: &TestLogger::new(),
maximum_pending_updates: persister_0_max_pending_updates,
entropy_source: &chanmon_cfgs[0].keys_manager,
signer_provider: &chanmon_cfgs[0].keys_manager,
};
let persister_1 = MonitorUpdatingPersister {
kv_store: &TestStore::new(false),
logger: &TestLogger::new(),
maximum_pending_updates: persister_1_max_pending_updates,
entropy_source: &chanmon_cfgs[1].keys_manager,
signer_provider: &chanmon_cfgs[1].keys_manager,
};
let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let chain_mon_0 = test_utils::TestChainMonitor::new(
Some(&chanmon_cfgs[0].chain_source),
&chanmon_cfgs[0].tx_broadcaster,
&chanmon_cfgs[0].logger,
&chanmon_cfgs[0].fee_estimator,
&persister_0,
&chanmon_cfgs[0].keys_manager,
);
let chain_mon_1 = test_utils::TestChainMonitor::new(
Some(&chanmon_cfgs[1].chain_source),
&chanmon_cfgs[1].tx_broadcaster,
&chanmon_cfgs[1].logger,
&chanmon_cfgs[1].fee_estimator,
&persister_1,
&chanmon_cfgs[1].keys_manager,
);
node_cfgs[0].chain_monitor = chain_mon_0;
node_cfgs[1].chain_monitor = chain_mon_1;
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let broadcaster_0 = &chanmon_cfgs[2].tx_broadcaster;
let broadcaster_1 = &chanmon_cfgs[3].tx_broadcaster;
let mut persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
assert_eq!(persisted_chan_data_0.len(), 0);
let mut persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
&broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap();
assert_eq!(persisted_chan_data_1.len(), 0);
macro_rules! check_persisted_data {
($expected_update_id: expr) => {
persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
assert_eq!(persisted_chan_data_0.len(), 1);
for (_, mon) in persisted_chan_data_0.iter() {
assert_eq!(mon.get_latest_update_id(), $expected_update_id);
let monitor_name = MonitorName::from(mon.get_funding_txo().0);
if mon.get_latest_update_id() % persister_0_max_pending_updates == 0
|| mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
assert_eq!(
persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
monitor_name.as_str()).unwrap().len(),
0,
"updates stored when they shouldn't be in persister 0"
);
}
}
persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
&broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap();
assert_eq!(persisted_chan_data_1.len(), 1);
for (_, mon) in persisted_chan_data_1.iter() {
assert_eq!(mon.get_latest_update_id(), $expected_update_id);
let monitor_name = MonitorName::from(mon.get_funding_txo().0);
if mon.get_latest_update_id() % persister_1_max_pending_updates == 0
|| mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
assert_eq!(
persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
monitor_name.as_str()).unwrap().len(),
0,
"updates stored when they shouldn't be in persister 1"
);
}
}
};
}
let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
check_persisted_data!(0);
send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000);
check_persisted_data!(EXPECTED_UPDATES_PER_PAYMENT);
send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
check_persisted_data!(2 * EXPECTED_UPDATES_PER_PAYMENT);
let mut sender = 0;
for i in 3..=persister_0_max_pending_updates * 2 {
let receiver;
if sender == 0 {
sender = 1;
receiver = 0;
} else {
sender = 0;
receiver = 1;
}
send_payment(&nodes[sender], &vec![&nodes[receiver]][..], 21_000);
check_persisted_data!(i * EXPECTED_UPDATES_PER_PAYMENT);
}
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
let node_txn = nodes[0].tx_broadcaster.txn_broadcast();
assert_eq!(node_txn.len(), 1);
connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
check_closed_broadcast!(nodes[1], true);
check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
check_added_monitors!(nodes[1], 1);
check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
let (_, monitor) = &persisted_chan_data[0];
let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
assert_eq!(persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
assert_eq!(persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
}
#[test]
fn unrecoverable_error_on_write_failure() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
{
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap();
let cmu = &cmu_map.get(&added_monitors[0].0.to_channel_id()).unwrap()[0];
let test_txo = OutPoint { txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
let ro_persister = MonitorUpdatingPersister {
kv_store: &TestStore::new(true),
logger: &TestLogger::new(),
maximum_pending_updates: 11,
entropy_source: node_cfgs[0].keys_manager,
signer_provider: node_cfgs[0].keys_manager,
};
match ro_persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
ChannelMonitorUpdateStatus::UnrecoverableError => {
}
ChannelMonitorUpdateStatus::Completed => {
panic!("Completed persisting new channel when shouldn't have")
}
ChannelMonitorUpdateStatus::InProgress => {
panic!("Returned InProgress when shouldn't have")
}
}
match ro_persister.update_persisted_channel(test_txo, Some(cmu), &added_monitors[0].1, update_id.2) {
ChannelMonitorUpdateStatus::UnrecoverableError => {
}
ChannelMonitorUpdateStatus::Completed => {
panic!("Completed persisting new channel when shouldn't have")
}
ChannelMonitorUpdateStatus::InProgress => {
panic!("Returned InProgress when shouldn't have")
}
}
added_monitors.clear();
}
nodes[1].node.get_and_clear_pending_msg_events();
}
#[test]
fn clean_stale_updates_works() {
let test_max_pending_updates = 7;
let chanmon_cfgs = create_chanmon_cfgs(3);
let persister_0 = MonitorUpdatingPersister {
kv_store: &TestStore::new(false),
logger: &TestLogger::new(),
maximum_pending_updates: test_max_pending_updates,
entropy_source: &chanmon_cfgs[0].keys_manager,
signer_provider: &chanmon_cfgs[0].keys_manager,
};
let persister_1 = MonitorUpdatingPersister {
kv_store: &TestStore::new(false),
logger: &TestLogger::new(),
maximum_pending_updates: test_max_pending_updates,
entropy_source: &chanmon_cfgs[1].keys_manager,
signer_provider: &chanmon_cfgs[1].keys_manager,
};
let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let chain_mon_0 = test_utils::TestChainMonitor::new(
Some(&chanmon_cfgs[0].chain_source),
&chanmon_cfgs[0].tx_broadcaster,
&chanmon_cfgs[0].logger,
&chanmon_cfgs[0].fee_estimator,
&persister_0,
&chanmon_cfgs[0].keys_manager,
);
let chain_mon_1 = test_utils::TestChainMonitor::new(
Some(&chanmon_cfgs[1].chain_source),
&chanmon_cfgs[1].tx_broadcaster,
&chanmon_cfgs[1].logger,
&chanmon_cfgs[1].fee_estimator,
&persister_1,
&chanmon_cfgs[1].keys_manager,
);
node_cfgs[0].chain_monitor = chain_mon_0;
node_cfgs[1].chain_monitor = chain_mon_1;
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let broadcaster_0 = &chanmon_cfgs[2].tx_broadcaster;
let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
assert_eq!(persisted_chan_data.len(), 0);
let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000);
send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
let (_, monitor) = &persisted_chan_data[0];
let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
persister_0
.kv_store
.write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str(), &[0u8; 1])
.unwrap();
persister_0.cleanup_stale_updates(false).unwrap();
assert!(persister_0
.kv_store
.read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str())
.is_err());
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
persister_0
.kv_store
.write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str(), &[0u8; 1])
.unwrap();
persister_0.cleanup_stale_updates(false).unwrap();
assert!(persister_0
.kv_store
.read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str())
.is_err());
}
}