lightning/routing/
scoring.rs

1// This file is Copyright its original authors, visible in version control
2// history.
3//
4// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7// You may not use this file except in accordance with one or both of these
8// licenses.
9
10//! Utilities for scoring payment channels.
11//!
12//! [`ProbabilisticScorer`] may be given to [`find_route`] to score payment channels during path
13//! finding when a custom [`ScoreLookUp`] implementation is not needed.
14//!
15//! # Example
16//!
17//! ```
18//! # extern crate bitcoin;
19//! #
20//! # use lightning::routing::gossip::NetworkGraph;
21//! # use lightning::routing::router::{RouteParameters, find_route};
22//! # use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters};
23//! # use lightning::sign::KeysManager;
24//! # use lightning::util::logger::{Logger, Record};
25//! # use bitcoin::secp256k1::PublicKey;
26//! #
27//! # struct FakeLogger {};
28//! # impl Logger for FakeLogger {
29//! #     fn log(&self, record: Record) { unimplemented!() }
30//! # }
31//! # fn find_scored_route(payer: PublicKey, route_params: RouteParameters, network_graph: NetworkGraph<&FakeLogger>) {
32//! # let logger = FakeLogger {};
33//! #
34//! // Use the default channel penalties.
35//! let params = ProbabilisticScoringFeeParameters::default();
36//! let decay_params = ProbabilisticScoringDecayParameters::default();
37//! let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
38//!
39//! // Or use custom channel penalties.
40//! let params = ProbabilisticScoringFeeParameters {
41//! 	liquidity_penalty_multiplier_msat: 2 * 1000,
42//! 	..ProbabilisticScoringFeeParameters::default()
43//! };
44//! let decay_params = ProbabilisticScoringDecayParameters::default();
45//! let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
46//! # let random_seed_bytes = [42u8; 32];
47//!
48//! let route = find_route(&payer, &route_params, &network_graph, None, &logger, &scorer, &params, &random_seed_bytes);
49//! # }
50//! ```
51//!
52//! [`find_route`]: crate::routing::router::find_route
53
54use crate::ln::msgs::DecodeError;
55use crate::routing::gossip::{DirectedChannelInfo, EffectiveCapacity, NetworkGraph, NodeId};
56use crate::routing::router::{Path, CandidateRouteHop, PublicHopCandidate};
57use crate::routing::log_approx;
58use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
59use crate::util::logger::Logger;
60
61use crate::prelude::*;
62use core::{cmp, fmt};
63use core::ops::{Deref, DerefMut};
64use core::time::Duration;
65use crate::io::{self, Read};
66use crate::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
67#[cfg(not(c_bindings))]
68use {
69	core::cell::{RefCell, RefMut, Ref},
70	crate::sync::{Mutex, MutexGuard},
71};
72
73/// We define Score ever-so-slightly differently based on whether we are being built for C bindings
74/// or not. For users, `LockableScore` must somehow be writeable to disk. For Rust users, this is
75/// no problem - you move a `Score` that implements `Writeable` into a `Mutex`, lock it, and now
76/// you have the original, concrete, `Score` type, which presumably implements `Writeable`.
77///
78/// For C users, once you've moved the `Score` into a `LockableScore` all you have after locking it
79/// is an opaque trait object with an opaque pointer with no type info. Users could take the unsafe
80/// approach of blindly casting that opaque pointer to a concrete type and calling `Writeable` from
81/// there, but other languages downstream of the C bindings (e.g. Java) can't even do that.
82/// Instead, we really want `Score` and `LockableScore` to implement `Writeable` directly, which we
83/// do here by defining `Score` differently for `cfg(c_bindings)`.
84macro_rules! define_score { ($($supertrait: path)*) => {
85/// An interface used to score payment channels for path finding.
86///
87/// `ScoreLookUp` is used to determine the penalty for a given channel.
88///
89/// Scoring is in terms of fees willing to be paid in order to avoid routing through a channel.
90pub trait ScoreLookUp {
91	/// A configurable type which should contain various passed-in parameters for configuring the scorer,
92	/// on a per-routefinding-call basis through to the scorer methods,
93	/// which are used to determine the parameters for the suitability of channels for use.
94	type ScoreParams;
95	/// Returns the fee in msats willing to be paid to avoid routing `send_amt_msat` through the
96	/// given channel in the direction from `source` to `target`.
97	///
98	/// The channel's capacity (less any other MPP parts that are also being considered for use in
99	/// the same payment) is given by `capacity_msat`. It may be determined from various sources
100	/// such as a chain data, network gossip, or invoice hints. For invoice hints, a capacity near
101	/// [`u64::max_value`] is given to indicate sufficient capacity for the invoice's full amount.
102	/// Thus, implementations should be overflow-safe.
103	fn channel_penalty_msat(
104		&self, candidate: &CandidateRouteHop, usage: ChannelUsage, score_params: &Self::ScoreParams
105	) -> u64;
106}
107
108/// `ScoreUpdate` is used to update the scorer's internal state after a payment attempt.
109pub trait ScoreUpdate {
110	/// Handles updating channel penalties after failing to route through a channel.
111	fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration);
112
113	/// Handles updating channel penalties after successfully routing along a path.
114	fn payment_path_successful(&mut self, path: &Path, duration_since_epoch: Duration);
115
116	/// Handles updating channel penalties after a probe over the given path failed.
117	fn probe_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration);
118
119	/// Handles updating channel penalties after a probe over the given path succeeded.
120	fn probe_successful(&mut self, path: &Path, duration_since_epoch: Duration);
121
122	/// Scorers may wish to reduce their certainty of channel liquidity information over time.
123	/// Thus, this method is provided to allow scorers to observe the passage of time - the holder
124	/// of this object should call this method regularly (generally via the
125	/// `lightning-background-processor` crate).
126	fn time_passed(&mut self, duration_since_epoch: Duration);
127}
128
129/// A trait which can both lookup and update routing channel penalty scores.
130///
131/// This is used in places where both bounds are required and implemented for all types which
132/// implement [`ScoreLookUp`] and [`ScoreUpdate`].
133///
134/// Bindings users may need to manually implement this for their custom scoring implementations.
135pub trait Score : ScoreLookUp + ScoreUpdate $(+ $supertrait)* {}
136
137#[cfg(not(c_bindings))]
138impl<T: ScoreLookUp + ScoreUpdate $(+ $supertrait)*> Score for T {}
139
140#[cfg(not(c_bindings))]
141impl<S: ScoreLookUp, T: Deref<Target=S>> ScoreLookUp for T {
142	type ScoreParams = S::ScoreParams;
143	fn channel_penalty_msat(
144		&self, candidate: &CandidateRouteHop, usage: ChannelUsage, score_params: &Self::ScoreParams
145	) -> u64 {
146		self.deref().channel_penalty_msat(candidate, usage, score_params)
147	}
148}
149
150#[cfg(not(c_bindings))]
151impl<S: ScoreUpdate, T: DerefMut<Target=S>> ScoreUpdate for T {
152	fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
153		self.deref_mut().payment_path_failed(path, short_channel_id, duration_since_epoch)
154	}
155
156	fn payment_path_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
157		self.deref_mut().payment_path_successful(path, duration_since_epoch)
158	}
159
160	fn probe_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
161		self.deref_mut().probe_failed(path, short_channel_id, duration_since_epoch)
162	}
163
164	fn probe_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
165		self.deref_mut().probe_successful(path, duration_since_epoch)
166	}
167
168	fn time_passed(&mut self, duration_since_epoch: Duration) {
169		self.deref_mut().time_passed(duration_since_epoch)
170	}
171}
172} }
173
174#[cfg(c_bindings)]
175define_score!(Writeable);
176
177#[cfg(not(c_bindings))]
178define_score!();
179
180/// A scorer that is accessed under a lock.
181///
182/// Needed so that calls to [`ScoreLookUp::channel_penalty_msat`] in [`find_route`] can be made while
183/// having shared ownership of a scorer but without requiring internal locking in [`ScoreUpdate`]
184/// implementations. Internal locking would be detrimental to route finding performance and could
185/// result in [`ScoreLookUp::channel_penalty_msat`] returning a different value for the same channel.
186///
187/// [`find_route`]: crate::routing::router::find_route
188pub trait LockableScore<'a> {
189	/// The [`ScoreUpdate`] type.
190	type ScoreUpdate: 'a + ScoreUpdate;
191	/// The [`ScoreLookUp`] type.
192	type ScoreLookUp: 'a + ScoreLookUp;
193
194	/// The write locked [`ScoreUpdate`] type.
195	type WriteLocked: DerefMut<Target = Self::ScoreUpdate> + Sized;
196
197	/// The read locked [`ScoreLookUp`] type.
198	type ReadLocked: Deref<Target = Self::ScoreLookUp> + Sized;
199
200	/// Returns read locked scorer.
201	fn read_lock(&'a self) -> Self::ReadLocked;
202
203	/// Returns write locked scorer.
204	fn write_lock(&'a self) -> Self::WriteLocked;
205}
206
207/// Refers to a scorer that is accessible under lock and also writeable to disk
208///
209/// We need this trait to be able to pass in a scorer to `lightning-background-processor` that will enable us to
210/// use the Persister to persist it.
211pub trait WriteableScore<'a>: LockableScore<'a> + Writeable {}
212
213#[cfg(not(c_bindings))]
214impl<'a, T> WriteableScore<'a> for T where T: LockableScore<'a> + Writeable {}
215#[cfg(not(c_bindings))]
216impl<'a, T: Score + 'a> LockableScore<'a> for Mutex<T> {
217	type ScoreUpdate = T;
218	type ScoreLookUp = T;
219
220	type WriteLocked = MutexGuard<'a, Self::ScoreUpdate>;
221	type ReadLocked = MutexGuard<'a, Self::ScoreLookUp>;
222
223	fn read_lock(&'a self) -> Self::ReadLocked {
224		Mutex::lock(self).unwrap()
225	}
226
227	fn write_lock(&'a self) -> Self::WriteLocked {
228		Mutex::lock(self).unwrap()
229	}
230}
231
232#[cfg(not(c_bindings))]
233impl<'a, T: Score + 'a> LockableScore<'a> for RefCell<T> {
234	type ScoreUpdate = T;
235	type ScoreLookUp = T;
236
237	type WriteLocked = RefMut<'a, Self::ScoreUpdate>;
238	type ReadLocked = Ref<'a, Self::ScoreLookUp>;
239
240	fn write_lock(&'a self) -> Self::WriteLocked {
241		self.borrow_mut()
242	}
243
244	fn read_lock(&'a self) -> Self::ReadLocked {
245		self.borrow()
246	}
247}
248
249#[cfg(any(not(c_bindings), feature = "_test_utils", test))]
250impl<'a, T: Score + 'a> LockableScore<'a> for RwLock<T> {
251	type ScoreUpdate = T;
252	type ScoreLookUp = T;
253
254	type WriteLocked = RwLockWriteGuard<'a, Self::ScoreLookUp>;
255	type ReadLocked = RwLockReadGuard<'a, Self::ScoreUpdate>;
256
257	fn read_lock(&'a self) -> Self::ReadLocked {
258		RwLock::read(self).unwrap()
259	}
260
261	fn write_lock(&'a self) -> Self::WriteLocked {
262		RwLock::write(self).unwrap()
263	}
264}
265
266#[cfg(c_bindings)]
267/// A concrete implementation of [`LockableScore`] which supports multi-threading.
268pub struct MultiThreadedLockableScore<T: Score> {
269	score: RwLock<T>,
270}
271
272#[cfg(c_bindings)]
273impl<'a, T: Score + 'a> LockableScore<'a> for MultiThreadedLockableScore<T> {
274	type ScoreUpdate = T;
275	type ScoreLookUp = T;
276	type WriteLocked = MultiThreadedScoreLockWrite<'a, Self::ScoreUpdate>;
277	type ReadLocked = MultiThreadedScoreLockRead<'a, Self::ScoreLookUp>;
278
279	fn read_lock(&'a self) -> Self::ReadLocked {
280		MultiThreadedScoreLockRead(self.score.read().unwrap())
281	}
282
283	fn write_lock(&'a self) -> Self::WriteLocked {
284		MultiThreadedScoreLockWrite(self.score.write().unwrap())
285	}
286}
287
288#[cfg(c_bindings)]
289impl<T: Score> Writeable for MultiThreadedLockableScore<T> {
290	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
291		self.score.read().unwrap().write(writer)
292	}
293}
294
295#[cfg(c_bindings)]
296impl<'a, T: Score + 'a> WriteableScore<'a> for MultiThreadedLockableScore<T> {}
297
298#[cfg(c_bindings)]
299impl<T: Score> MultiThreadedLockableScore<T> {
300	/// Creates a new [`MultiThreadedLockableScore`] given an underlying [`Score`].
301	pub fn new(score: T) -> Self {
302		MultiThreadedLockableScore { score: RwLock::new(score) }
303	}
304}
305
306#[cfg(c_bindings)]
307/// A locked `MultiThreadedLockableScore`.
308pub struct MultiThreadedScoreLockRead<'a, T: Score>(RwLockReadGuard<'a, T>);
309
310#[cfg(c_bindings)]
311/// A locked `MultiThreadedLockableScore`.
312pub struct MultiThreadedScoreLockWrite<'a, T: Score>(RwLockWriteGuard<'a, T>);
313
314#[cfg(c_bindings)]
315impl<'a, T: 'a + Score> Deref for MultiThreadedScoreLockRead<'a, T> {
316	type Target = T;
317
318	fn deref(&self) -> &Self::Target {
319		self.0.deref()
320	}
321}
322
323#[cfg(c_bindings)]
324impl<'a, T: Score> ScoreLookUp for MultiThreadedScoreLockRead<'a, T> {
325	type ScoreParams = T::ScoreParams;
326	fn channel_penalty_msat(&self, candidate:&CandidateRouteHop, usage: ChannelUsage, score_params: &Self::ScoreParams
327	) -> u64 {
328		self.0.channel_penalty_msat(candidate, usage, score_params)
329	}
330}
331
332#[cfg(c_bindings)]
333impl<'a, T: Score> Writeable for MultiThreadedScoreLockWrite<'a, T> {
334	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
335		self.0.write(writer)
336	}
337}
338
339#[cfg(c_bindings)]
340impl<'a, T: 'a + Score> Deref for MultiThreadedScoreLockWrite<'a, T> {
341	type Target = T;
342
343	fn deref(&self) -> &Self::Target {
344		self.0.deref()
345	}
346}
347
348#[cfg(c_bindings)]
349impl<'a, T: 'a + Score> DerefMut for MultiThreadedScoreLockWrite<'a, T> {
350	fn deref_mut(&mut self) -> &mut Self::Target {
351		self.0.deref_mut()
352	}
353}
354
355#[cfg(c_bindings)]
356impl<'a, T: Score> ScoreUpdate for MultiThreadedScoreLockWrite<'a, T> {
357	fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
358		self.0.payment_path_failed(path, short_channel_id, duration_since_epoch)
359	}
360
361	fn payment_path_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
362		self.0.payment_path_successful(path, duration_since_epoch)
363	}
364
365	fn probe_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
366		self.0.probe_failed(path, short_channel_id, duration_since_epoch)
367	}
368
369	fn probe_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
370		self.0.probe_successful(path, duration_since_epoch)
371	}
372
373	fn time_passed(&mut self, duration_since_epoch: Duration) {
374		self.0.time_passed(duration_since_epoch)
375	}
376}
377
378
379/// Proposed use of a channel passed as a parameter to [`ScoreLookUp::channel_penalty_msat`].
380#[derive(Clone, Copy, Debug, PartialEq)]
381pub struct ChannelUsage {
382	/// The amount to send through the channel, denominated in millisatoshis.
383	pub amount_msat: u64,
384
385	/// Total amount, denominated in millisatoshis, already allocated to send through the channel
386	/// as part of a multi-path payment.
387	pub inflight_htlc_msat: u64,
388
389	/// The effective capacity of the channel.
390	pub effective_capacity: EffectiveCapacity,
391}
392
393#[derive(Clone)]
394/// [`ScoreLookUp`] implementation that uses a fixed penalty.
395pub struct FixedPenaltyScorer {
396	penalty_msat: u64,
397}
398
399impl FixedPenaltyScorer {
400	/// Creates a new scorer using `penalty_msat`.
401	pub fn with_penalty(penalty_msat: u64) -> Self {
402		Self { penalty_msat }
403	}
404}
405
406impl ScoreLookUp for FixedPenaltyScorer {
407	type ScoreParams = ();
408	fn channel_penalty_msat(&self, _: &CandidateRouteHop, _: ChannelUsage, _score_params: &Self::ScoreParams) -> u64 {
409		self.penalty_msat
410	}
411}
412
413impl ScoreUpdate for FixedPenaltyScorer {
414	fn payment_path_failed(&mut self, _path: &Path, _short_channel_id: u64, _duration_since_epoch: Duration) {}
415
416	fn payment_path_successful(&mut self, _path: &Path, _duration_since_epoch: Duration) {}
417
418	fn probe_failed(&mut self, _path: &Path, _short_channel_id: u64, _duration_since_epoch: Duration) {}
419
420	fn probe_successful(&mut self, _path: &Path, _duration_since_epoch: Duration) {}
421
422	fn time_passed(&mut self, _duration_since_epoch: Duration) {}
423}
424
425impl Writeable for FixedPenaltyScorer {
426	#[inline]
427	fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
428		write_tlv_fields!(w, {});
429		Ok(())
430	}
431}
432
433impl ReadableArgs<u64> for FixedPenaltyScorer {
434	#[inline]
435	fn read<R: Read>(r: &mut R, penalty_msat: u64) -> Result<Self, DecodeError> {
436		read_tlv_fields!(r, {});
437		Ok(Self { penalty_msat })
438	}
439}
440
441/// [`ScoreLookUp`] implementation using channel success probability distributions.
442///
443/// Channels are tracked with upper and lower liquidity bounds - when an HTLC fails at a channel,
444/// we learn that the upper-bound on the available liquidity is lower than the amount of the HTLC.
445/// When a payment is forwarded through a channel (but fails later in the route), we learn the
446/// lower-bound on the channel's available liquidity must be at least the value of the HTLC.
447///
448/// These bounds are then used to determine a success probability using the formula from
449/// *Optimally Reliable & Cheap Payment Flows on the Lightning Network* by Rene Pickhardt
450/// and Stefan Richter [[1]] (i.e. `(upper_bound - payment_amount) / (upper_bound - lower_bound)`).
451///
452/// This probability is combined with the [`liquidity_penalty_multiplier_msat`] and
453/// [`liquidity_penalty_amount_multiplier_msat`] parameters to calculate a concrete penalty in
454/// milli-satoshis. The penalties, when added across all hops, have the property of being linear in
455/// terms of the entire path's success probability. This allows the router to directly compare
456/// penalties for different paths. See the documentation of those parameters for the exact formulas.
457///
458/// The liquidity bounds are decayed by halving them every [`liquidity_offset_half_life`].
459///
460/// Further, we track the history of our upper and lower liquidity bounds for each channel,
461/// allowing us to assign a second penalty (using [`historical_liquidity_penalty_multiplier_msat`]
462/// and [`historical_liquidity_penalty_amount_multiplier_msat`]) based on the same probability
463/// formula, but using the history of a channel rather than our latest estimates for the liquidity
464/// bounds.
465///
466/// [1]: https://arxiv.org/abs/2107.05322
467/// [`liquidity_penalty_multiplier_msat`]: ProbabilisticScoringFeeParameters::liquidity_penalty_multiplier_msat
468/// [`liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringFeeParameters::liquidity_penalty_amount_multiplier_msat
469/// [`liquidity_offset_half_life`]: ProbabilisticScoringDecayParameters::liquidity_offset_half_life
470/// [`historical_liquidity_penalty_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_multiplier_msat
471/// [`historical_liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_amount_multiplier_msat
472pub struct ProbabilisticScorer<G: Deref<Target = NetworkGraph<L>>, L: Deref>
473where L::Target: Logger {
474	decay_params: ProbabilisticScoringDecayParameters,
475	network_graph: G,
476	logger: L,
477	channel_liquidities: HashMap<u64, ChannelLiquidity>,
478}
479
480/// Parameters for configuring [`ProbabilisticScorer`].
481///
482/// Used to configure base, liquidity, and amount penalties, the sum of which comprises the channel
483/// penalty (i.e., the amount in msats willing to be paid to avoid routing through the channel).
484///
485/// The penalty applied to any channel by the [`ProbabilisticScorer`] is the sum of each of the
486/// parameters here.
487#[derive(Clone)]
488pub struct ProbabilisticScoringFeeParameters {
489	/// A fixed penalty in msats to apply to each channel.
490	///
491	/// In testing, a value of roughly 1/10th of [`historical_liquidity_penalty_multiplier_msat`]
492	/// (implying scaling all estimated probabilities down by a factor of ~79%) resulted in the
493	/// most accurate total success probabilities.
494	///
495	/// Default value: 1,024 msat (i.e. we're willing to pay 1 sat to avoid each additional hop).
496	///
497	/// [`historical_liquidity_penalty_multiplier_msat`]: Self::historical_liquidity_penalty_multiplier_msat
498	pub base_penalty_msat: u64,
499
500	/// A multiplier used with the payment amount to calculate a fixed penalty applied to each
501	/// channel, in excess of the [`base_penalty_msat`].
502	///
503	/// The purpose of the amount penalty is to avoid having fees dominate the channel cost (i.e.,
504	/// fees plus penalty) for large payments. The penalty is computed as the product of this
505	/// multiplier and `2^30`ths of the payment amount.
506	///
507	/// ie `base_penalty_amount_multiplier_msat * amount_msat / 2^30`
508	///
509	/// In testing, a value of roughly ~100x (1/10th * 2^10) of
510	/// [`historical_liquidity_penalty_amount_multiplier_msat`] (implying scaling all estimated
511	/// probabilities down by a factor of ~79%) resulted in the most accurate total success
512	/// probabilities.
513	///
514	/// Default value: 131,072 msat (i.e. we're willing to pay 0.125bps to avoid each additional
515	///                              hop).
516	///
517	/// [`base_penalty_msat`]: Self::base_penalty_msat
518	/// [`historical_liquidity_penalty_amount_multiplier_msat`]: Self::historical_liquidity_penalty_amount_multiplier_msat
519	pub base_penalty_amount_multiplier_msat: u64,
520
521	/// A multiplier used in conjunction with the negative `log10` of the channel's success
522	/// probability for a payment, as determined by our latest estimates of the channel's
523	/// liquidity, to determine the liquidity penalty.
524	///
525	/// The penalty is based in part on the knowledge learned from prior successful and unsuccessful
526	/// payments. This knowledge is decayed over time based on [`liquidity_offset_half_life`]. The
527	/// penalty is effectively limited to `2 * liquidity_penalty_multiplier_msat` (corresponding to
528	/// lower bounding the success probability to `0.01`) when the amount falls within the
529	/// uncertainty bounds of the channel liquidity balance. Amounts above the upper bound will
530	/// result in a `u64::max_value` penalty, however.
531	///
532	/// `-log10(success_probability) * liquidity_penalty_multiplier_msat`
533	///
534	/// In testing, this scoring model performs much worse than the historical scoring model
535	/// configured with the [`historical_liquidity_penalty_multiplier_msat`] and thus is disabled
536	/// by default.
537	///
538	/// Default value: 0 msat
539	///
540	/// [`liquidity_offset_half_life`]: ProbabilisticScoringDecayParameters::liquidity_offset_half_life
541	/// [`historical_liquidity_penalty_multiplier_msat`]: Self::historical_liquidity_penalty_multiplier_msat
542	pub liquidity_penalty_multiplier_msat: u64,
543
544	/// A multiplier used in conjunction with the payment amount and the negative `log10` of the
545	/// channel's success probability for the total amount flowing over a channel, as determined by
546	/// our latest estimates of the channel's liquidity, to determine the amount penalty.
547	///
548	/// The purpose of the amount penalty is to avoid having fees dominate the channel cost (i.e.,
549	/// fees plus penalty) for large payments. The penalty is computed as the product of this
550	/// multiplier and `2^20`ths of the payment amount, weighted by the negative `log10` of the
551	/// success probability.
552	///
553	/// `-log10(success_probability) * liquidity_penalty_amount_multiplier_msat * amount_msat / 2^20`
554	///
555	/// In practice, this means for 0.1 success probability (`-log10(0.1) == 1`) each `2^20`th of
556	/// the amount will result in a penalty of the multiplier. And, as the success probability
557	/// decreases, the negative `log10` weighting will increase dramatically. For higher success
558	/// probabilities, the multiplier will have a decreasing effect as the negative `log10` will
559	/// fall below `1`.
560	///
561	/// In testing, this scoring model performs much worse than the historical scoring model
562	/// configured with the [`historical_liquidity_penalty_amount_multiplier_msat`] and thus is
563	/// disabled by default.
564	///
565	/// Default value: 0 msat
566	///
567	/// [`historical_liquidity_penalty_amount_multiplier_msat`]: Self::historical_liquidity_penalty_amount_multiplier_msat
568	pub liquidity_penalty_amount_multiplier_msat: u64,
569
570	/// A multiplier used in conjunction with the negative `log10` of the channel's success
571	/// probability for the payment, as determined based on the history of our estimates of the
572	/// channel's available liquidity, to determine a penalty.
573	///
574	/// This penalty is similar to [`liquidity_penalty_multiplier_msat`], however, instead of using
575	/// only our latest estimate for the current liquidity available in the channel, it estimates
576	/// success probability based on the estimated liquidity available in the channel through
577	/// history. Specifically, every time we update our liquidity bounds on a given channel, we
578	/// track which of several buckets those bounds fall into, exponentially decaying the
579	/// probability of each bucket as new samples are added.
580	///
581	/// Default value: 10,000 msat (i.e. willing to pay 1 sat to avoid an 80% probability channel,
582	///                            or 6 sats to avoid a 25% probability channel).
583	///
584	/// [`liquidity_penalty_multiplier_msat`]: Self::liquidity_penalty_multiplier_msat
585	pub historical_liquidity_penalty_multiplier_msat: u64,
586
587	/// A multiplier used in conjunction with the payment amount and the negative `log10` of the
588	/// channel's success probability for the total amount flowing over a channel, as determined
589	/// based on the history of our estimates of the channel's available liquidity, to determine a
590	/// penalty.
591	///
592	/// The purpose of the amount penalty is to avoid having fees dominate the channel cost for
593	/// large payments. The penalty is computed as the product of this multiplier and `2^20`ths
594	/// of the payment amount, weighted by the negative `log10` of the success probability.
595	///
596	/// This penalty is similar to [`liquidity_penalty_amount_multiplier_msat`], however, instead
597	/// of using only our latest estimate for the current liquidity available in the channel, it
598	/// estimates success probability based on the estimated liquidity available in the channel
599	/// through history. Specifically, every time we update our liquidity bounds on a given
600	/// channel, we track which of several buckets those bounds fall into, exponentially decaying
601	/// the probability of each bucket as new samples are added.
602	///
603	/// Default value: 1,250 msat (i.e. willing to pay about 0.125 bps per hop to avoid 78%
604	///                            probability channels, or 0.5bps to avoid a 38% probability
605	///                            channel).
606	///
607	/// [`liquidity_penalty_amount_multiplier_msat`]: Self::liquidity_penalty_amount_multiplier_msat
608	pub historical_liquidity_penalty_amount_multiplier_msat: u64,
609
610	/// Manual penalties used for the given nodes. Allows to set a particular penalty for a given
611	/// node. Note that a manual penalty of `u64::max_value()` means the node would not ever be
612	/// considered during path finding.
613	///
614	/// This is not exported to bindings users
615	pub manual_node_penalties: HashMap<NodeId, u64>,
616
617	/// This penalty is applied when `htlc_maximum_msat` is equal to or larger than half of the
618	/// channel's capacity, (ie. htlc_maximum_msat >= 0.5 * channel_capacity) which makes us
619	/// prefer nodes with a smaller `htlc_maximum_msat`. We treat such nodes preferentially
620	/// as this makes balance discovery attacks harder to execute, thereby creating an incentive
621	/// to restrict `htlc_maximum_msat` and improve privacy.
622	///
623	/// Default value: 250 msat
624	pub anti_probing_penalty_msat: u64,
625
626	/// This penalty is applied when the total amount flowing over a channel exceeds our current
627	/// estimate of the channel's available liquidity. The total amount is the amount of the
628	/// current HTLC plus any HTLCs which we've sent over the same channel.
629	///
630	/// Note that in this case all other penalties, including the
631	/// [`liquidity_penalty_multiplier_msat`] and [`liquidity_penalty_amount_multiplier_msat`]-based
632	/// penalties, as well as the [`base_penalty_msat`] and the [`anti_probing_penalty_msat`], if
633	/// applicable, are still included in the overall penalty.
634	///
635	/// If you wish to avoid creating paths with such channels entirely, setting this to a value of
636	/// `u64::max_value()` will guarantee that.
637	///
638	/// Default value: 1_0000_0000_000 msat (1 Bitcoin)
639	///
640	/// [`liquidity_penalty_multiplier_msat`]: Self::liquidity_penalty_multiplier_msat
641	/// [`liquidity_penalty_amount_multiplier_msat`]: Self::liquidity_penalty_amount_multiplier_msat
642	/// [`base_penalty_msat`]: Self::base_penalty_msat
643	/// [`anti_probing_penalty_msat`]: Self::anti_probing_penalty_msat
644	pub considered_impossible_penalty_msat: u64,
645
646	/// In order to calculate most of the scores above, we must first convert a lower and upper
647	/// bound on the available liquidity in a channel into the probability that we think a payment
648	/// will succeed. That probability is derived from a Probability Density Function for where we
649	/// think the liquidity in a channel likely lies, given such bounds.
650	///
651	/// If this flag is set, that PDF is simply a constant - we assume that the actual available
652	/// liquidity in a channel is just as likely to be at any point between our lower and upper
653	/// bounds.
654	///
655	/// If this flag is *not* set, that PDF is `(x - 0.5*capacity) ^ 2`. That is, we use an
656	/// exponential curve which expects the liquidity of a channel to lie "at the edges". This
657	/// matches experimental results - most routing nodes do not aggressively rebalance their
658	/// channels and flows in the network are often unbalanced, leaving liquidity usually
659	/// unavailable.
660	///
661	/// Thus, for the "best" routes, leave this flag `false`. However, the flag does imply a number
662	/// of floating-point multiplications in the hottest routing code, which may lead to routing
663	/// performance degradation on some machines.
664	///
665	/// Default value: false
666	pub linear_success_probability: bool,
667}
668
669impl Default for ProbabilisticScoringFeeParameters {
670	fn default() -> Self {
671		Self {
672			base_penalty_msat: 1024,
673			base_penalty_amount_multiplier_msat: 131_072,
674			liquidity_penalty_multiplier_msat: 0,
675			liquidity_penalty_amount_multiplier_msat: 0,
676			manual_node_penalties: new_hash_map(),
677			anti_probing_penalty_msat: 250,
678			considered_impossible_penalty_msat: 1_0000_0000_000,
679			historical_liquidity_penalty_multiplier_msat: 10_000,
680			historical_liquidity_penalty_amount_multiplier_msat: 1_250,
681			linear_success_probability: false,
682		}
683	}
684}
685
686impl ProbabilisticScoringFeeParameters {
687	/// Marks the node with the given `node_id` as banned,
688	/// i.e it will be avoided during path finding.
689	pub fn add_banned(&mut self, node_id: &NodeId) {
690		self.manual_node_penalties.insert(*node_id, u64::max_value());
691	}
692
693	/// Marks all nodes in the given list as banned, i.e.,
694	/// they will be avoided during path finding.
695	pub fn add_banned_from_list(&mut self, node_ids: Vec<NodeId>) {
696		for id in node_ids {
697			self.manual_node_penalties.insert(id, u64::max_value());
698		}
699	}
700
701	/// Removes the node with the given `node_id` from the list of nodes to avoid.
702	pub fn remove_banned(&mut self, node_id: &NodeId) {
703		self.manual_node_penalties.remove(node_id);
704	}
705
706	/// Sets a manual penalty for the given node.
707	pub fn set_manual_penalty(&mut self, node_id: &NodeId, penalty: u64) {
708		self.manual_node_penalties.insert(*node_id, penalty);
709	}
710
711	/// Removes the node with the given `node_id` from the list of manual penalties.
712	pub fn remove_manual_penalty(&mut self, node_id: &NodeId) {
713		self.manual_node_penalties.remove(node_id);
714	}
715
716	/// Clears the list of manual penalties that are applied during path finding.
717	pub fn clear_manual_penalties(&mut self) {
718		self.manual_node_penalties = new_hash_map();
719	}
720}
721
722#[cfg(test)]
723impl ProbabilisticScoringFeeParameters {
724	fn zero_penalty() -> Self {
725		Self {
726			base_penalty_msat: 0,
727			base_penalty_amount_multiplier_msat: 0,
728			liquidity_penalty_multiplier_msat: 0,
729			liquidity_penalty_amount_multiplier_msat: 0,
730			historical_liquidity_penalty_multiplier_msat: 0,
731			historical_liquidity_penalty_amount_multiplier_msat: 0,
732			manual_node_penalties: new_hash_map(),
733			anti_probing_penalty_msat: 0,
734			considered_impossible_penalty_msat: 0,
735			linear_success_probability: true,
736		}
737	}
738}
739
740/// Parameters for configuring [`ProbabilisticScorer`].
741///
742/// Used to configure decay parameters that are static throughout the lifetime of the scorer.
743/// these decay parameters affect the score of the channel penalty and are not changed on a
744/// per-route penalty cost call.
745#[derive(Copy, Clone)]
746pub struct ProbabilisticScoringDecayParameters {
747	/// If we aren't learning any new datapoints for a channel, the historical liquidity bounds
748	/// tracking can simply live on with increasingly stale data. Instead, when a channel has not
749	/// seen a liquidity estimate update for this amount of time, the historical datapoints are
750	/// decayed by half.
751	/// For an example of historical_no_updates_half_life being used see [`historical_estimated_channel_liquidity_probabilities`]
752	///
753	/// Note that after 16 or more half lives all historical data will be completely gone.
754	///
755	/// Default value: 14 days
756	///
757	/// [`historical_estimated_channel_liquidity_probabilities`]: ProbabilisticScorer::historical_estimated_channel_liquidity_probabilities
758	pub historical_no_updates_half_life: Duration,
759
760	/// Whenever this amount of time elapses since the last update to a channel's liquidity bounds,
761	/// the distance from the bounds to "zero" is cut in half. In other words, the lower-bound on
762	/// the available liquidity is halved and the upper-bound moves half-way to the channel's total
763	/// capacity.
764	///
765	/// Because halving the liquidity bounds grows the uncertainty on the channel's liquidity,
766	/// the penalty for an amount within the new bounds may change. See the [`ProbabilisticScorer`]
767	/// struct documentation for more info on the way the liquidity bounds are used.
768	///
769	/// For example, if the channel's capacity is 1 million sats, and the current upper and lower
770	/// liquidity bounds are 200,000 sats and 600,000 sats, after this amount of time the upper
771	/// and lower liquidity bounds will be decayed to 100,000 and 800,000 sats.
772	///
773	/// Default value: 30 minutes
774	///
775	/// # Note
776	///
777	/// When not built with the `std` feature, time will never elapse. Therefore, the channel
778	/// liquidity knowledge will never decay except when the bounds cross.
779	pub liquidity_offset_half_life: Duration,
780}
781
782impl Default for ProbabilisticScoringDecayParameters {
783	fn default() -> Self {
784		Self {
785			liquidity_offset_half_life: Duration::from_secs(30 * 60),
786			historical_no_updates_half_life: Duration::from_secs(60 * 60 * 24 * 14),
787		}
788	}
789}
790
791#[cfg(test)]
792impl ProbabilisticScoringDecayParameters {
793	fn zero_penalty() -> Self {
794		Self {
795			liquidity_offset_half_life: Duration::from_secs(30 * 60),
796			historical_no_updates_half_life: Duration::from_secs(60 * 60 * 24 * 14),
797		}
798	}
799}
800
801/// Accounting for channel liquidity balance uncertainty.
802///
803/// Direction is defined in terms of [`NodeId`] partial ordering, where the source node is the
804/// first node in the ordering of the channel's counterparties. Thus, swapping the two liquidity
805/// offset fields gives the opposite direction.
806#[repr(C)] // Force the fields in memory to be in the order we specify
807struct ChannelLiquidity {
808	/// Lower channel liquidity bound in terms of an offset from zero.
809	min_liquidity_offset_msat: u64,
810
811	/// Upper channel liquidity bound in terms of an offset from the effective capacity.
812	max_liquidity_offset_msat: u64,
813
814	liquidity_history: HistoricalLiquidityTracker,
815
816	/// Time when either liquidity bound was last modified as an offset since the unix epoch.
817	last_updated: Duration,
818
819	/// Time when the historical liquidity bounds were last modified as an offset against the unix
820	/// epoch.
821	offset_history_last_updated: Duration,
822}
823
824/// A snapshot of [`ChannelLiquidity`] in one direction assuming a certain channel capacity.
825struct DirectedChannelLiquidity<L: Deref<Target = u64>, HT: Deref<Target = HistoricalLiquidityTracker>, T: Deref<Target = Duration>> {
826	min_liquidity_offset_msat: L,
827	max_liquidity_offset_msat: L,
828	liquidity_history: DirectedHistoricalLiquidityTracker<HT>,
829	capacity_msat: u64,
830	last_updated: T,
831	offset_history_last_updated: T,
832}
833
834impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> ProbabilisticScorer<G, L> where L::Target: Logger {
835	/// Creates a new scorer using the given scoring parameters for sending payments from a node
836	/// through a network graph.
837	pub fn new(decay_params: ProbabilisticScoringDecayParameters, network_graph: G, logger: L) -> Self {
838		Self {
839			decay_params,
840			network_graph,
841			logger,
842			channel_liquidities: new_hash_map(),
843		}
844	}
845
846	#[cfg(test)]
847	fn with_channel(mut self, short_channel_id: u64, liquidity: ChannelLiquidity) -> Self {
848		assert!(self.channel_liquidities.insert(short_channel_id, liquidity).is_none());
849		self
850	}
851
852	/// Dump the contents of this scorer into the configured logger.
853	///
854	/// Note that this writes roughly one line per channel for which we have a liquidity estimate,
855	/// which may be a substantial amount of log output.
856	pub fn debug_log_liquidity_stats(&self) {
857		let graph = self.network_graph.read_only();
858		for (scid, liq) in self.channel_liquidities.iter() {
859			if let Some(chan_debug) = graph.channels().get(scid) {
860				let log_direction = |source, target| {
861					if let Some((directed_info, _)) = chan_debug.as_directed_to(target) {
862						let amt = directed_info.effective_capacity().as_msat();
863						let dir_liq = liq.as_directed(source, target, amt);
864
865						let min_buckets = &dir_liq.liquidity_history.min_liquidity_offset_history_buckets();
866						let max_buckets = &dir_liq.liquidity_history.max_liquidity_offset_history_buckets();
867
868						log_debug!(self.logger, core::concat!(
869							"Liquidity from {} to {} via {} is in the range ({}, {}).\n",
870							"\tHistorical min liquidity bucket relative probabilities:\n",
871							"\t\t{} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}\n",
872							"\tHistorical max liquidity bucket relative probabilities:\n",
873							"\t\t{} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}"),
874							source, target, scid, dir_liq.min_liquidity_msat(), dir_liq.max_liquidity_msat(),
875							min_buckets[ 0], min_buckets[ 1], min_buckets[ 2], min_buckets[ 3],
876							min_buckets[ 4], min_buckets[ 5], min_buckets[ 6], min_buckets[ 7],
877							min_buckets[ 8], min_buckets[ 9], min_buckets[10], min_buckets[11],
878							min_buckets[12], min_buckets[13], min_buckets[14], min_buckets[15],
879							min_buckets[16], min_buckets[17], min_buckets[18], min_buckets[19],
880							min_buckets[20], min_buckets[21], min_buckets[22], min_buckets[23],
881							min_buckets[24], min_buckets[25], min_buckets[26], min_buckets[27],
882							min_buckets[28], min_buckets[29], min_buckets[30], min_buckets[31],
883							// Note that the liquidity buckets are an offset from the edge, so we
884							// inverse the max order to get the probabilities from zero.
885							max_buckets[31], max_buckets[30], max_buckets[29], max_buckets[28],
886							max_buckets[27], max_buckets[26], max_buckets[25], max_buckets[24],
887							max_buckets[23], max_buckets[22], max_buckets[21], max_buckets[20],
888							max_buckets[19], max_buckets[18], max_buckets[17], max_buckets[16],
889							max_buckets[15], max_buckets[14], max_buckets[13], max_buckets[12],
890							max_buckets[11], max_buckets[10], max_buckets[ 9], max_buckets[ 8],
891							max_buckets[ 7], max_buckets[ 6], max_buckets[ 5], max_buckets[ 4],
892							max_buckets[ 3], max_buckets[ 2], max_buckets[ 1], max_buckets[ 0]);
893					} else {
894						log_debug!(self.logger, "No amount known for SCID {} from {:?} to {:?}", scid, source, target);
895					}
896				};
897
898				log_direction(&chan_debug.node_one, &chan_debug.node_two);
899				log_direction(&chan_debug.node_two, &chan_debug.node_one);
900			} else {
901				log_debug!(self.logger, "No network graph entry for SCID {}", scid);
902			}
903		}
904	}
905
906	/// Query the estimated minimum and maximum liquidity available for sending a payment over the
907	/// channel with `scid` towards the given `target` node.
908	pub fn estimated_channel_liquidity_range(&self, scid: u64, target: &NodeId) -> Option<(u64, u64)> {
909		let graph = self.network_graph.read_only();
910
911		if let Some(chan) = graph.channels().get(&scid) {
912			if let Some(liq) = self.channel_liquidities.get(&scid) {
913				if let Some((directed_info, source)) = chan.as_directed_to(target) {
914					let amt = directed_info.effective_capacity().as_msat();
915					let dir_liq = liq.as_directed(source, target, amt);
916					return Some((dir_liq.min_liquidity_msat(), dir_liq.max_liquidity_msat()));
917				}
918			}
919		}
920		None
921	}
922
923	/// Query the historical estimated minimum and maximum liquidity available for sending a
924	/// payment over the channel with `scid` towards the given `target` node.
925	///
926	/// Returns two sets of 32 buckets. The first set describes the lower-bound liquidity history,
927	/// the second set describes the upper-bound liquidity history. Each bucket describes the
928	/// relative frequency at which we've seen a liquidity bound in the bucket's range relative to
929	/// the channel's total capacity, on an arbitrary scale. Because the values are slowly decayed,
930	/// more recent data points are weighted more heavily than older datapoints.
931	///
932	/// Note that the range of each bucket varies by its location to provide more granular results
933	/// at the edges of a channel's capacity, where it is more likely to sit.
934	///
935	/// When scoring, the estimated probability that an upper-/lower-bound lies in a given bucket
936	/// is calculated by dividing that bucket's value with the total value of all buckets.
937	///
938	/// For example, using a lower bucket count for illustrative purposes, a value of
939	/// `[0, 0, 0, ..., 0, 32]` indicates that we believe the probability of a bound being very
940	/// close to the channel's capacity to be 100%, and have never (recently) seen it in any other
941	/// bucket. A value of `[31, 0, 0, ..., 0, 0, 32]` indicates we've seen the bound being both
942	/// in the top and bottom bucket, and roughly with similar (recent) frequency.
943	///
944	/// Because the datapoints are decayed slowly over time, values will eventually return to
945	/// `Some(([0; 32], [0; 32]))` or `None` if no data remains for a channel.
946	///
947	/// In order to fetch a single success probability from the buckets provided here, as used in
948	/// the scoring model, see [`Self::historical_estimated_payment_success_probability`].
949	pub fn historical_estimated_channel_liquidity_probabilities(&self, scid: u64, target: &NodeId)
950	-> Option<([u16; 32], [u16; 32])> {
951		let graph = self.network_graph.read_only();
952
953		if let Some(chan) = graph.channels().get(&scid) {
954			if let Some(liq) = self.channel_liquidities.get(&scid) {
955				if let Some((directed_info, source)) = chan.as_directed_to(target) {
956					let amt = directed_info.effective_capacity().as_msat();
957					let dir_liq = liq.as_directed(source, target, amt);
958
959					let min_buckets = *dir_liq.liquidity_history.min_liquidity_offset_history_buckets();
960					let mut max_buckets = *dir_liq.liquidity_history.max_liquidity_offset_history_buckets();
961
962					// Note that the liquidity buckets are an offset from the edge, so we inverse
963					// the max order to get the probabilities from zero.
964					max_buckets.reverse();
965					return Some((min_buckets, max_buckets));
966				}
967			}
968		}
969		None
970	}
971
972	/// Query the probability of payment success sending the given `amount_msat` over the channel
973	/// with `scid` towards the given `target` node, based on the historical estimated liquidity
974	/// bounds.
975	///
976	/// Returns `None` if:
977	///  - the given channel is not in the network graph, the provided `target` is not a party to
978	///    the channel, or we don't have forwarding parameters for either direction in the channel.
979	///  - `allow_fallback_estimation` is *not* set and there is no (or insufficient) historical
980	///    data for the given channel.
981	///
982	/// These are the same bounds as returned by
983	/// [`Self::historical_estimated_channel_liquidity_probabilities`] (but not those returned by
984	/// [`Self::estimated_channel_liquidity_range`]).
985	pub fn historical_estimated_payment_success_probability(
986		&self, scid: u64, target: &NodeId, amount_msat: u64, params: &ProbabilisticScoringFeeParameters,
987		allow_fallback_estimation: bool,
988	) -> Option<f64> {
989		let graph = self.network_graph.read_only();
990
991		if let Some(chan) = graph.channels().get(&scid) {
992			if let Some((directed_info, source)) = chan.as_directed_to(target) {
993				if let Some(liq) = self.channel_liquidities.get(&scid) {
994					let capacity_msat = directed_info.effective_capacity().as_msat();
995					let dir_liq = liq.as_directed(source, target, capacity_msat);
996
997					let res = dir_liq.liquidity_history.calculate_success_probability_times_billion(
998						&params, amount_msat, capacity_msat
999					).map(|p| p as f64 / (1024 * 1024 * 1024) as f64);
1000					if res.is_some() {
1001						return res;
1002					}
1003				}
1004				if allow_fallback_estimation {
1005					let amt = amount_msat;
1006					return Some(
1007						self.calc_live_prob(scid, source, target, directed_info, amt, params, true)
1008					);
1009				}
1010			}
1011		}
1012		None
1013	}
1014
1015	fn calc_live_prob(
1016		&self, scid: u64, source: &NodeId, target: &NodeId, directed_info: DirectedChannelInfo,
1017		amt: u64, params: &ProbabilisticScoringFeeParameters,
1018		min_zero_penalty: bool,
1019	) -> f64 {
1020		let capacity_msat = directed_info.effective_capacity().as_msat();
1021		let dummy_liq = ChannelLiquidity::new(Duration::ZERO);
1022		let liq = self.channel_liquidities.get(&scid)
1023			.unwrap_or(&dummy_liq)
1024			.as_directed(&source, &target, capacity_msat);
1025		let min_liq = liq.min_liquidity_msat();
1026		let max_liq = liq.max_liquidity_msat();
1027		if amt <= liq.min_liquidity_msat() {
1028			return 1.0;
1029		} else if amt > liq.max_liquidity_msat() {
1030			return 0.0;
1031		}
1032		let (num, den) =
1033			success_probability(amt, min_liq, max_liq, capacity_msat, &params, min_zero_penalty);
1034		num as f64 / den as f64
1035	}
1036
1037	/// Query the probability of payment success sending the given `amount_msat` over the channel
1038	/// with `scid` towards the given `target` node, based on the live estimated liquidity bounds.
1039	///
1040	/// This will return `Some` for any channel which is present in the [`NetworkGraph`], including
1041	/// if we have no bound information beside the channel's capacity.
1042	pub fn live_estimated_payment_success_probability(
1043		&self, scid: u64, target: &NodeId, amount_msat: u64, params: &ProbabilisticScoringFeeParameters,
1044	) -> Option<f64> {
1045		let graph = self.network_graph.read_only();
1046
1047		if let Some(chan) = graph.channels().get(&scid) {
1048			if let Some((directed_info, source)) = chan.as_directed_to(target) {
1049				return Some(self.calc_live_prob(scid, source, target, directed_info, amount_msat, params, false));
1050			}
1051		}
1052		None
1053	}
1054}
1055
1056impl ChannelLiquidity {
1057	fn new(last_updated: Duration) -> Self {
1058		Self {
1059			min_liquidity_offset_msat: 0,
1060			max_liquidity_offset_msat: 0,
1061			liquidity_history: HistoricalLiquidityTracker::new(),
1062			last_updated,
1063			offset_history_last_updated: last_updated,
1064		}
1065	}
1066
1067	/// Returns a view of the channel liquidity directed from `source` to `target` assuming
1068	/// `capacity_msat`.
1069	fn as_directed(
1070		&self, source: &NodeId, target: &NodeId, capacity_msat: u64,
1071	) -> DirectedChannelLiquidity<&u64, &HistoricalLiquidityTracker, &Duration> {
1072		let source_less_than_target = source < target;
1073		let (min_liquidity_offset_msat, max_liquidity_offset_msat) =
1074			if source_less_than_target {
1075				(&self.min_liquidity_offset_msat, &self.max_liquidity_offset_msat)
1076			} else {
1077				(&self.max_liquidity_offset_msat, &self.min_liquidity_offset_msat)
1078			};
1079
1080		DirectedChannelLiquidity {
1081			min_liquidity_offset_msat,
1082			max_liquidity_offset_msat,
1083			liquidity_history: self.liquidity_history.as_directed(source_less_than_target),
1084			capacity_msat,
1085			last_updated: &self.last_updated,
1086			offset_history_last_updated: &self.offset_history_last_updated,
1087		}
1088	}
1089
1090	/// Returns a mutable view of the channel liquidity directed from `source` to `target` assuming
1091	/// `capacity_msat`.
1092	fn as_directed_mut(
1093		&mut self, source: &NodeId, target: &NodeId, capacity_msat: u64,
1094	) -> DirectedChannelLiquidity<&mut u64, &mut HistoricalLiquidityTracker, &mut Duration> {
1095		let source_less_than_target = source < target;
1096		let (min_liquidity_offset_msat, max_liquidity_offset_msat) =
1097			if source_less_than_target {
1098				(&mut self.min_liquidity_offset_msat, &mut self.max_liquidity_offset_msat)
1099			} else {
1100				(&mut self.max_liquidity_offset_msat, &mut self.min_liquidity_offset_msat)
1101			};
1102
1103		DirectedChannelLiquidity {
1104			min_liquidity_offset_msat,
1105			max_liquidity_offset_msat,
1106			liquidity_history: self.liquidity_history.as_directed_mut(source_less_than_target),
1107			capacity_msat,
1108			last_updated: &mut self.last_updated,
1109			offset_history_last_updated: &mut self.offset_history_last_updated,
1110		}
1111	}
1112
1113	fn decayed_offset(
1114		&self, offset: u64, duration_since_epoch: Duration,
1115		decay_params: ProbabilisticScoringDecayParameters,
1116	) -> u64 {
1117		let half_life = decay_params.liquidity_offset_half_life.as_secs_f64();
1118		if half_life != 0.0 {
1119			let elapsed_time = duration_since_epoch.saturating_sub(self.last_updated).as_secs_f64();
1120			((offset as f64) * powf64(0.5, elapsed_time / half_life)) as u64
1121		} else {
1122			0
1123		}
1124	}
1125}
1126
1127/// Bounds `-log10` to avoid excessive liquidity penalties for payments with low success
1128/// probabilities.
1129const NEGATIVE_LOG10_UPPER_BOUND: u64 = 2;
1130
1131/// The rough cutoff at which our precision falls off and we should stop bothering to try to log a
1132/// ratio, as X in 1/X.
1133const PRECISION_LOWER_BOUND_DENOMINATOR: u64 = log_approx::LOWER_BITS_BOUND;
1134
1135/// The divisor used when computing the amount penalty.
1136const AMOUNT_PENALTY_DIVISOR: u64 = 1 << 20;
1137const BASE_AMOUNT_PENALTY_DIVISOR: u64 = 1 << 30;
1138
1139/// Raises three `f64`s to the 9th power, without `powi` because it requires `std` (dunno why).
1140#[inline(always)]
1141fn three_f64_pow_9(a: f64, b: f64, c: f64) -> (f64, f64, f64) {
1142	let (a2, b2, c2) = (a * a, b * b, c * c);
1143	let (a4, b4, c4) = (a2 * a2, b2 * b2, c2 * c2);
1144	(a * a4 * a4, b * b4 * b4, c * c4 * c4)
1145}
1146
1147/// If we have no knowledge of the channel, we scale probability down by a multiple of ~82% for the
1148/// historical model by multiplying the denominator of a success probability by this before
1149/// dividing by 64.
1150///
1151/// This number (as well as the PDF) was picked experimentally on probing results to maximize the
1152/// log-loss of succeeding and failing hops.
1153///
1154/// Note that we prefer to increase the denominator rather than decrease the numerator as the
1155/// denominator is more likely to be larger and thus provide greater precision. This is mostly an
1156/// overoptimization but makes a large difference in tests.
1157const MIN_ZERO_IMPLIES_NO_SUCCESSES_PENALTY_ON_64: u64 = 78;
1158
1159#[inline(always)]
1160fn linear_success_probability(
1161	total_inflight_amount_msat: u64, min_liquidity_msat: u64, max_liquidity_msat: u64,
1162	min_zero_implies_no_successes: bool,
1163) -> (u64, u64) {
1164	let (numerator, mut denominator) =
1165		(max_liquidity_msat - total_inflight_amount_msat,
1166		(max_liquidity_msat - min_liquidity_msat).saturating_add(1));
1167
1168	if min_zero_implies_no_successes && min_liquidity_msat == 0 &&
1169		denominator < u64::max_value() / MIN_ZERO_IMPLIES_NO_SUCCESSES_PENALTY_ON_64
1170	{
1171		denominator = denominator * MIN_ZERO_IMPLIES_NO_SUCCESSES_PENALTY_ON_64 / 64
1172	}
1173
1174	(numerator, denominator)
1175}
1176
1177/// Returns a (numerator, denominator) pair each between 0 and 0.0078125, inclusive.
1178#[inline(always)]
1179fn nonlinear_success_probability(
1180	total_inflight_amount_msat: u64, min_liquidity_msat: u64, max_liquidity_msat: u64,
1181	capacity_msat: u64, min_zero_implies_no_successes: bool,
1182) -> (f64, f64) {
1183	let capacity = capacity_msat as f64;
1184	let max = (max_liquidity_msat as f64) / capacity;
1185	let min = (min_liquidity_msat as f64) / capacity;
1186	let amount = (total_inflight_amount_msat as f64) / capacity;
1187
1188	// Assume the channel has a probability density function of
1189	// `128 * (1/256 + 9*(x - 0.5)^8)` for values from 0 to 1 (where 1 is the channel's
1190	// full capacity). The success probability given some liquidity bounds is thus the
1191	// integral under the curve from the amount to maximum estimated liquidity, divided by
1192	// the same integral from the minimum to the maximum estimated liquidity bounds.
1193	//
1194	// Because the integral from x to y is simply
1195	// `128*(1/256 * (y - 0.5) + (y - 0.5)^9) - 128*(1/256 * (x - 0.5) + (x - 0.5)^9), we
1196	// can calculate the cumulative density function between the min/max bounds trivially.
1197	// Note that we don't bother to normalize the CDF to total to 1 (using the 128
1198	// multiple), as it will come out in the division of num / den.
1199	let (max_norm, min_norm, amt_norm) = (max - 0.5, min - 0.5, amount - 0.5);
1200	let (max_pow, min_pow, amt_pow) = three_f64_pow_9(max_norm, min_norm, amt_norm);
1201	let (max_v, min_v, amt_v) = (max_pow + max_norm / 256.0, min_pow + min_norm / 256.0, amt_pow + amt_norm / 256.0);
1202	let mut denominator = max_v - min_v;
1203	let numerator = max_v - amt_v;
1204
1205	if min_zero_implies_no_successes && min_liquidity_msat == 0 {
1206		denominator = denominator * (MIN_ZERO_IMPLIES_NO_SUCCESSES_PENALTY_ON_64 as f64) / 64.0;
1207	}
1208
1209	(numerator, denominator)
1210}
1211
1212/// Given liquidity bounds, calculates the success probability (in the form of a numerator and
1213/// denominator) of an HTLC. This is a key assumption in our scoring models.
1214///
1215/// `total_inflight_amount_msat` includes the amount of the HTLC and any HTLCs in flight over the
1216/// channel.
1217///
1218/// min_zero_implies_no_successes signals that a `min_liquidity_msat` of 0 means we've not
1219/// (recently) seen an HTLC successfully complete over this channel.
1220#[inline(always)]
1221fn success_probability_float(
1222	total_inflight_amount_msat: u64, min_liquidity_msat: u64, max_liquidity_msat: u64,
1223	capacity_msat: u64, params: &ProbabilisticScoringFeeParameters,
1224	min_zero_implies_no_successes: bool,
1225) -> (f64, f64) {
1226	debug_assert!(min_liquidity_msat <= total_inflight_amount_msat);
1227	debug_assert!(total_inflight_amount_msat < max_liquidity_msat);
1228	debug_assert!(max_liquidity_msat <= capacity_msat);
1229
1230	if params.linear_success_probability {
1231		let (numerator, denominator) = linear_success_probability(total_inflight_amount_msat, min_liquidity_msat, max_liquidity_msat, min_zero_implies_no_successes);
1232		(numerator as f64, denominator as f64)
1233	} else {
1234		nonlinear_success_probability(total_inflight_amount_msat, min_liquidity_msat, max_liquidity_msat, capacity_msat, min_zero_implies_no_successes)
1235	}
1236}
1237
1238#[inline(always)]
1239/// Identical to [`success_probability_float`] but returns integer numerator and denominators.
1240///
1241/// Must not return a numerator or denominator greater than 2^31 for arguments less than 2^31.
1242fn success_probability(
1243	total_inflight_amount_msat: u64, min_liquidity_msat: u64, max_liquidity_msat: u64,
1244	capacity_msat: u64, params: &ProbabilisticScoringFeeParameters,
1245	min_zero_implies_no_successes: bool,
1246) -> (u64, u64) {
1247	debug_assert!(min_liquidity_msat <= total_inflight_amount_msat);
1248	debug_assert!(total_inflight_amount_msat < max_liquidity_msat);
1249	debug_assert!(max_liquidity_msat <= capacity_msat);
1250
1251	if params.linear_success_probability {
1252		linear_success_probability(total_inflight_amount_msat, min_liquidity_msat, max_liquidity_msat, min_zero_implies_no_successes)
1253	} else {
1254		// We calculate the nonlinear probabilities using floats anyway, so just stub out to
1255		// the float version and then convert to integers.
1256		let (num, den) = nonlinear_success_probability(
1257			total_inflight_amount_msat, min_liquidity_msat, max_liquidity_msat, capacity_msat,
1258			min_zero_implies_no_successes,
1259		);
1260
1261		// Because our numerator and denominator max out at 0.0078125 we need to multiply them
1262		// by quite a large factor to get something useful (ideally in the 2^30 range).
1263		const BILLIONISH: f64 = 1024.0 * 1024.0 * 1024.0 * 64.0;
1264		let numerator = (num * BILLIONISH) as u64 + 1;
1265		let denominator = (den * BILLIONISH) as u64 + 1;
1266		debug_assert!(numerator <= 1 << 30, "Got large numerator ({}) from float {}.", numerator, num);
1267		debug_assert!(denominator <= 1 << 30, "Got large denominator ({}) from float {}.", denominator, den);
1268		(numerator, denominator)
1269	}
1270}
1271
1272impl<L: Deref<Target = u64>, HT: Deref<Target = HistoricalLiquidityTracker>, T: Deref<Target = Duration>>
1273DirectedChannelLiquidity< L, HT, T> {
1274	/// Returns a liquidity penalty for routing the given HTLC `amount_msat` through the channel in
1275	/// this direction.
1276	fn penalty_msat(
1277		&self, amount_msat: u64, inflight_htlc_msat: u64,
1278		score_params: &ProbabilisticScoringFeeParameters,
1279	) -> u64 {
1280		let total_inflight_amount_msat = amount_msat.saturating_add(inflight_htlc_msat);
1281		let available_capacity = self.capacity_msat;
1282		let max_liquidity_msat = self.max_liquidity_msat();
1283		let min_liquidity_msat = core::cmp::min(self.min_liquidity_msat(), max_liquidity_msat);
1284
1285		let mut res = 0;
1286		if score_params.liquidity_penalty_multiplier_msat != 0 ||
1287		   score_params.liquidity_penalty_amount_multiplier_msat != 0 {
1288			if total_inflight_amount_msat <= min_liquidity_msat {
1289				// If the in-flight is less than the minimum liquidity estimate, we don't assign a
1290				// liquidity penalty at all (as the success probability is 100%).
1291			} else if total_inflight_amount_msat >= max_liquidity_msat {
1292				// Equivalent to hitting the else clause below with the amount equal to the effective
1293				// capacity and without any certainty on the liquidity upper bound, plus the
1294				// impossibility penalty.
1295				let negative_log10_times_2048 = NEGATIVE_LOG10_UPPER_BOUND * 2048;
1296				res = Self::combined_penalty_msat(amount_msat, negative_log10_times_2048,
1297						score_params.liquidity_penalty_multiplier_msat,
1298						score_params.liquidity_penalty_amount_multiplier_msat);
1299			} else {
1300				let (numerator, denominator) = success_probability(
1301					total_inflight_amount_msat, min_liquidity_msat, max_liquidity_msat,
1302					available_capacity, score_params, false,
1303				);
1304				if denominator - numerator < denominator / PRECISION_LOWER_BOUND_DENOMINATOR {
1305					// If the failure probability is < 1.5625% (as 1 - numerator/denominator < 1/64),
1306					// don't bother trying to use the log approximation as it gets too noisy to be
1307					// particularly helpful, instead just round down to 0.
1308				} else {
1309					let negative_log10_times_2048 =
1310						log_approx::negative_log10_times_2048(numerator, denominator);
1311					res = Self::combined_penalty_msat(amount_msat, negative_log10_times_2048,
1312						score_params.liquidity_penalty_multiplier_msat,
1313						score_params.liquidity_penalty_amount_multiplier_msat);
1314				}
1315			}
1316		}
1317
1318		if total_inflight_amount_msat >= max_liquidity_msat {
1319			res = res.saturating_add(score_params.considered_impossible_penalty_msat);
1320		}
1321
1322		if total_inflight_amount_msat >= available_capacity {
1323			// We're trying to send more than the capacity, use a max penalty.
1324			res = res.saturating_add(Self::combined_penalty_msat(amount_msat,
1325				NEGATIVE_LOG10_UPPER_BOUND * 2048,
1326				score_params.historical_liquidity_penalty_multiplier_msat,
1327				score_params.historical_liquidity_penalty_amount_multiplier_msat));
1328			return res;
1329		}
1330
1331		if score_params.historical_liquidity_penalty_multiplier_msat != 0 ||
1332		   score_params.historical_liquidity_penalty_amount_multiplier_msat != 0 {
1333			if let Some(cumulative_success_prob_times_billion) = self.liquidity_history
1334				.calculate_success_probability_times_billion(
1335					score_params, total_inflight_amount_msat, self.capacity_msat
1336				)
1337			{
1338				let historical_negative_log10_times_2048 =
1339					log_approx::negative_log10_times_2048(cumulative_success_prob_times_billion + 1, 1024 * 1024 * 1024);
1340				res = res.saturating_add(Self::combined_penalty_msat(amount_msat,
1341					historical_negative_log10_times_2048, score_params.historical_liquidity_penalty_multiplier_msat,
1342					score_params.historical_liquidity_penalty_amount_multiplier_msat));
1343			} else {
1344				// If we don't have any valid points (or, once decayed, we have less than a full
1345				// point), redo the non-historical calculation with no liquidity bounds tracked and
1346				// the historical penalty multipliers.
1347				let (numerator, denominator) = success_probability(
1348					total_inflight_amount_msat, 0, available_capacity, available_capacity,
1349					score_params, true,
1350				);
1351				let negative_log10_times_2048 =
1352					log_approx::negative_log10_times_2048(numerator, denominator);
1353				res = res.saturating_add(Self::combined_penalty_msat(amount_msat, negative_log10_times_2048,
1354					score_params.historical_liquidity_penalty_multiplier_msat,
1355					score_params.historical_liquidity_penalty_amount_multiplier_msat));
1356			}
1357		}
1358
1359		res
1360	}
1361
1362	/// Computes the liquidity penalty from the penalty multipliers.
1363	#[inline(always)]
1364	fn combined_penalty_msat(amount_msat: u64, mut negative_log10_times_2048: u64,
1365		liquidity_penalty_multiplier_msat: u64, liquidity_penalty_amount_multiplier_msat: u64,
1366	) -> u64 {
1367		negative_log10_times_2048 =
1368			negative_log10_times_2048.min(NEGATIVE_LOG10_UPPER_BOUND * 2048);
1369
1370		// Upper bound the liquidity penalty to ensure some channel is selected.
1371		let liquidity_penalty_msat = negative_log10_times_2048
1372			.saturating_mul(liquidity_penalty_multiplier_msat) / 2048;
1373		let amount_penalty_msat = negative_log10_times_2048
1374			.saturating_mul(liquidity_penalty_amount_multiplier_msat)
1375			.saturating_mul(amount_msat) / 2048 / AMOUNT_PENALTY_DIVISOR;
1376
1377		liquidity_penalty_msat.saturating_add(amount_penalty_msat)
1378	}
1379
1380	/// Returns the lower bound of the channel liquidity balance in this direction.
1381	#[inline(always)]
1382	fn min_liquidity_msat(&self) -> u64 {
1383		*self.min_liquidity_offset_msat
1384	}
1385
1386	/// Returns the upper bound of the channel liquidity balance in this direction.
1387	#[inline(always)]
1388	fn max_liquidity_msat(&self) -> u64 {
1389		self.capacity_msat
1390			.saturating_sub(*self.max_liquidity_offset_msat)
1391	}
1392}
1393
1394impl<L: DerefMut<Target = u64>, HT: DerefMut<Target = HistoricalLiquidityTracker>, T: DerefMut<Target = Duration>>
1395DirectedChannelLiquidity<L, HT, T> {
1396	/// Adjusts the channel liquidity balance bounds when failing to route `amount_msat`.
1397	fn failed_at_channel<Log: Deref>(
1398		&mut self, amount_msat: u64, duration_since_epoch: Duration, chan_descr: fmt::Arguments, logger: &Log
1399	) where Log::Target: Logger {
1400		let existing_max_msat = self.max_liquidity_msat();
1401		if amount_msat < existing_max_msat {
1402			log_debug!(logger, "Setting max liquidity of {} from {} to {}", chan_descr, existing_max_msat, amount_msat);
1403			self.set_max_liquidity_msat(amount_msat, duration_since_epoch);
1404		} else {
1405			log_trace!(logger, "Max liquidity of {} is {} (already less than or equal to {})",
1406				chan_descr, existing_max_msat, amount_msat);
1407		}
1408		self.update_history_buckets(0, duration_since_epoch);
1409	}
1410
1411	/// Adjusts the channel liquidity balance bounds when failing to route `amount_msat` downstream.
1412	fn failed_downstream<Log: Deref>(
1413		&mut self, amount_msat: u64, duration_since_epoch: Duration, chan_descr: fmt::Arguments, logger: &Log
1414	) where Log::Target: Logger {
1415		let existing_min_msat = self.min_liquidity_msat();
1416		if amount_msat > existing_min_msat {
1417			log_debug!(logger, "Setting min liquidity of {} from {} to {}", existing_min_msat, chan_descr, amount_msat);
1418			self.set_min_liquidity_msat(amount_msat, duration_since_epoch);
1419		} else {
1420			log_trace!(logger, "Min liquidity of {} is {} (already greater than or equal to {})",
1421				chan_descr, existing_min_msat, amount_msat);
1422		}
1423		self.update_history_buckets(0, duration_since_epoch);
1424	}
1425
1426	/// Adjusts the channel liquidity balance bounds when successfully routing `amount_msat`.
1427	fn successful<Log: Deref>(&mut self,
1428		amount_msat: u64, duration_since_epoch: Duration, chan_descr: fmt::Arguments, logger: &Log
1429	) where Log::Target: Logger {
1430		let max_liquidity_msat = self.max_liquidity_msat().checked_sub(amount_msat).unwrap_or(0);
1431		log_debug!(logger, "Subtracting {} from max liquidity of {} (setting it to {})", amount_msat, chan_descr, max_liquidity_msat);
1432		self.set_max_liquidity_msat(max_liquidity_msat, duration_since_epoch);
1433		self.update_history_buckets(amount_msat, duration_since_epoch);
1434	}
1435
1436	/// Updates the history buckets for this channel. Because the history buckets track what we now
1437	/// know about the channel's state *prior to our payment* (i.e. what we assume is "steady
1438	/// state"), we allow the caller to set an offset applied to our liquidity bounds which
1439	/// represents the amount of the successful payment we just made.
1440	fn update_history_buckets(&mut self, bucket_offset_msat: u64, duration_since_epoch: Duration) {
1441		self.liquidity_history.track_datapoint(
1442			*self.min_liquidity_offset_msat + bucket_offset_msat,
1443			self.max_liquidity_offset_msat.saturating_sub(bucket_offset_msat),
1444			self.capacity_msat,
1445		);
1446		*self.offset_history_last_updated = duration_since_epoch;
1447	}
1448
1449	/// Adjusts the lower bound of the channel liquidity balance in this direction.
1450	fn set_min_liquidity_msat(&mut self, amount_msat: u64, duration_since_epoch: Duration) {
1451		*self.min_liquidity_offset_msat = amount_msat;
1452		if amount_msat > self.max_liquidity_msat() {
1453			*self.max_liquidity_offset_msat = 0;
1454		}
1455		*self.last_updated = duration_since_epoch;
1456	}
1457
1458	/// Adjusts the upper bound of the channel liquidity balance in this direction.
1459	fn set_max_liquidity_msat(&mut self, amount_msat: u64, duration_since_epoch: Duration) {
1460		*self.max_liquidity_offset_msat = self.capacity_msat.checked_sub(amount_msat).unwrap_or(0);
1461		if amount_msat < *self.min_liquidity_offset_msat {
1462			*self.min_liquidity_offset_msat = 0;
1463		}
1464		*self.last_updated = duration_since_epoch;
1465	}
1466}
1467
1468impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> ScoreLookUp for ProbabilisticScorer<G, L> where L::Target: Logger {
1469	type ScoreParams = ProbabilisticScoringFeeParameters;
1470	fn channel_penalty_msat(
1471		&self, candidate: &CandidateRouteHop, usage: ChannelUsage, score_params: &ProbabilisticScoringFeeParameters
1472	) -> u64 {
1473		let (scid, target) = match candidate {
1474			CandidateRouteHop::PublicHop(PublicHopCandidate { info, short_channel_id }) => {
1475				(short_channel_id, info.target())
1476			},
1477			_ => return 0,
1478		};
1479		let source = candidate.source();
1480		if let Some(penalty) = score_params.manual_node_penalties.get(target) {
1481			return *penalty;
1482		}
1483
1484		let base_penalty_msat = score_params.base_penalty_msat.saturating_add(
1485			score_params.base_penalty_amount_multiplier_msat
1486				.saturating_mul(usage.amount_msat) / BASE_AMOUNT_PENALTY_DIVISOR);
1487
1488		let mut anti_probing_penalty_msat = 0;
1489		match usage.effective_capacity {
1490			EffectiveCapacity::ExactLiquidity { liquidity_msat: amount_msat } |
1491				EffectiveCapacity::HintMaxHTLC { amount_msat } =>
1492			{
1493				if usage.amount_msat > amount_msat {
1494					return u64::max_value();
1495				} else {
1496					return base_penalty_msat;
1497				}
1498			},
1499			EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat } => {
1500				if htlc_maximum_msat >= capacity_msat/2 {
1501					anti_probing_penalty_msat = score_params.anti_probing_penalty_msat;
1502				}
1503			},
1504			_ => {},
1505		}
1506
1507		let capacity_msat = usage.effective_capacity.as_msat();
1508		self.channel_liquidities
1509			.get(scid)
1510			.unwrap_or(&ChannelLiquidity::new(Duration::ZERO))
1511			.as_directed(&source, &target, capacity_msat)
1512			.penalty_msat(usage.amount_msat, usage.inflight_htlc_msat, score_params)
1513			.saturating_add(anti_probing_penalty_msat)
1514			.saturating_add(base_penalty_msat)
1515	}
1516}
1517
1518impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> ScoreUpdate for ProbabilisticScorer<G, L> where L::Target: Logger {
1519	fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
1520		let amount_msat = path.final_value_msat();
1521		log_trace!(self.logger, "Scoring path through to SCID {} as having failed at {} msat", short_channel_id, amount_msat);
1522		let network_graph = self.network_graph.read_only();
1523		for (hop_idx, hop) in path.hops.iter().enumerate() {
1524			let target = NodeId::from_pubkey(&hop.pubkey);
1525			let channel_directed_from_source = network_graph.channels()
1526				.get(&hop.short_channel_id)
1527				.and_then(|channel| channel.as_directed_to(&target));
1528
1529			let at_failed_channel = hop.short_channel_id == short_channel_id;
1530			if at_failed_channel && hop_idx == 0 {
1531				log_warn!(self.logger, "Payment failed at the first hop - we do not attempt to learn channel info in such cases as we can directly observe local state.\n\tBecause we know the local state, we should generally not see failures here - this may be an indication that your channel peer on channel {} is broken and you may wish to close the channel.", hop.short_channel_id);
1532			}
1533
1534			// Only score announced channels.
1535			if let Some((channel, source)) = channel_directed_from_source {
1536				let capacity_msat = channel.effective_capacity().as_msat();
1537				if at_failed_channel {
1538					self.channel_liquidities
1539						.entry(hop.short_channel_id)
1540						.or_insert_with(|| ChannelLiquidity::new(duration_since_epoch))
1541						.as_directed_mut(source, &target, capacity_msat)
1542						.failed_at_channel(amount_msat, duration_since_epoch,
1543							format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
1544				} else {
1545					self.channel_liquidities
1546						.entry(hop.short_channel_id)
1547						.or_insert_with(|| ChannelLiquidity::new(duration_since_epoch))
1548						.as_directed_mut(source, &target, capacity_msat)
1549						.failed_downstream(amount_msat, duration_since_epoch,
1550							format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
1551				}
1552			} else {
1553				log_debug!(self.logger, "Not able to penalize channel with SCID {} as we do not have graph info for it (likely a route-hint last-hop).",
1554					hop.short_channel_id);
1555			}
1556			if at_failed_channel { break; }
1557		}
1558	}
1559
1560	fn payment_path_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
1561		let amount_msat = path.final_value_msat();
1562		log_trace!(self.logger, "Scoring path through SCID {} as having succeeded at {} msat.",
1563			path.hops.split_last().map(|(hop, _)| hop.short_channel_id).unwrap_or(0), amount_msat);
1564		let network_graph = self.network_graph.read_only();
1565		for hop in &path.hops {
1566			let target = NodeId::from_pubkey(&hop.pubkey);
1567			let channel_directed_from_source = network_graph.channels()
1568				.get(&hop.short_channel_id)
1569				.and_then(|channel| channel.as_directed_to(&target));
1570
1571			// Only score announced channels.
1572			if let Some((channel, source)) = channel_directed_from_source {
1573				let capacity_msat = channel.effective_capacity().as_msat();
1574				self.channel_liquidities
1575					.entry(hop.short_channel_id)
1576					.or_insert_with(|| ChannelLiquidity::new(duration_since_epoch))
1577					.as_directed_mut(source, &target, capacity_msat)
1578					.successful(amount_msat, duration_since_epoch,
1579						format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
1580			} else {
1581				log_debug!(self.logger, "Not able to learn for channel with SCID {} as we do not have graph info for it (likely a route-hint last-hop).",
1582					hop.short_channel_id);
1583			}
1584		}
1585	}
1586
1587	fn probe_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) {
1588		self.payment_path_failed(path, short_channel_id, duration_since_epoch)
1589	}
1590
1591	fn probe_successful(&mut self, path: &Path, duration_since_epoch: Duration) {
1592		self.payment_path_failed(path, u64::max_value(), duration_since_epoch)
1593	}
1594
1595	fn time_passed(&mut self, duration_since_epoch: Duration) {
1596		let decay_params = self.decay_params;
1597		self.channel_liquidities.retain(|_scid, liquidity| {
1598			liquidity.min_liquidity_offset_msat =
1599				liquidity.decayed_offset(liquidity.min_liquidity_offset_msat, duration_since_epoch, decay_params);
1600			liquidity.max_liquidity_offset_msat =
1601				liquidity.decayed_offset(liquidity.max_liquidity_offset_msat, duration_since_epoch, decay_params);
1602			liquidity.last_updated = duration_since_epoch;
1603
1604			let elapsed_time =
1605				duration_since_epoch.saturating_sub(liquidity.offset_history_last_updated);
1606			if elapsed_time > decay_params.historical_no_updates_half_life {
1607				let half_life = decay_params.historical_no_updates_half_life.as_secs_f64();
1608				if half_life != 0.0 {
1609					liquidity.liquidity_history.decay_buckets(elapsed_time.as_secs_f64() / half_life);
1610					liquidity.offset_history_last_updated = duration_since_epoch;
1611				}
1612			}
1613			liquidity.min_liquidity_offset_msat != 0 || liquidity.max_liquidity_offset_msat != 0 ||
1614				liquidity.liquidity_history.has_datapoints()
1615		});
1616	}
1617}
1618
1619#[cfg(c_bindings)]
1620impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> Score for ProbabilisticScorer<G, L>
1621where L::Target: Logger {}
1622
1623#[cfg(feature = "std")]
1624#[inline]
1625fn powf64(n: f64, exp: f64) -> f64 {
1626	n.powf(exp)
1627}
1628#[cfg(not(feature = "std"))]
1629fn powf64(n: f64, exp: f64) -> f64 {
1630	libm::powf(n as f32, exp as f32) as f64
1631}
1632
1633mod bucketed_history {
1634	use super::*;
1635
1636	// Because liquidity is often skewed heavily in one direction, we store historical state
1637	// distribution in buckets of different size. For backwards compatibility, buckets of size 1/8th
1638	// must fit evenly into the buckets here.
1639	//
1640	// The smallest bucket is 2^-14th of the channel, for each of our 32 buckets here we define the
1641	// width of the bucket in 2^14'ths of the channel. This increases exponentially until we reach
1642	// a full 16th of the channel's capacity, which is reapeated a few times for backwards
1643	// compatibility. The four middle buckets represent full octiles of the channel's capacity.
1644	//
1645	// For a 1 BTC channel, this let's us differentiate between failures in the bottom 6k sats, or
1646	// between the 12,000th sat and 24,000th sat, while only needing to store and operate on 32
1647	// buckets in total.
1648
1649	// By default u16s may not be cache-aligned, but we'd rather not have to read a third cache
1650	// line just to access it
1651	#[repr(align(128))]
1652	struct BucketStartPos([u16; 33]);
1653	impl BucketStartPos {
1654		const fn new() -> Self {
1655			Self([
1656				0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 3072, 4096, 6144, 8192, 10240, 12288,
1657				13312, 14336, 15360, 15872, 16128, 16256, 16320, 16352, 16368, 16376, 16380, 16382, 16383, 16384,
1658			])
1659		}
1660	}
1661	impl core::ops::Index<usize> for BucketStartPos {
1662		type Output = u16;
1663		#[inline(always)]
1664		fn index(&self, index: usize) -> &u16 { &self.0[index] }
1665	}
1666	const BUCKET_START_POS: BucketStartPos = BucketStartPos::new();
1667
1668	const LEGACY_TO_BUCKET_RANGE: [(u8, u8); 8] = [
1669		(0, 12), (12, 14), (14, 15), (15, 16), (16, 17), (17, 18), (18, 20), (20, 32)
1670	];
1671
1672	const POSITION_TICKS: u16 = 1 << 14;
1673
1674	fn pos_to_bucket(pos: u16) -> usize {
1675		for bucket in 0..32 {
1676			if pos < BUCKET_START_POS[bucket + 1] {
1677				return bucket;
1678			}
1679		}
1680		debug_assert!(false);
1681		return 32;
1682	}
1683
1684	#[cfg(test)]
1685	#[test]
1686	fn check_bucket_maps() {
1687		const BUCKET_WIDTH_IN_16384S: [u16; 32] = [
1688			1, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 1024, 1024, 2048, 2048,
1689			2048, 2048, 1024, 1024, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1, 1];
1690
1691		let mut min_size_iter = 0;
1692		let mut legacy_bucket_iter = 0;
1693		for (bucket, width) in BUCKET_WIDTH_IN_16384S.iter().enumerate() {
1694			assert_eq!(BUCKET_START_POS[bucket], min_size_iter);
1695			for i in 0..*width {
1696				assert_eq!(pos_to_bucket(min_size_iter + i) as usize, bucket);
1697			}
1698			min_size_iter += *width;
1699			if min_size_iter % (POSITION_TICKS / 8) == 0 {
1700				assert_eq!(LEGACY_TO_BUCKET_RANGE[legacy_bucket_iter].1 as usize, bucket + 1);
1701				if legacy_bucket_iter + 1 < 8 {
1702					assert_eq!(LEGACY_TO_BUCKET_RANGE[legacy_bucket_iter + 1].0 as usize, bucket + 1);
1703				}
1704				legacy_bucket_iter += 1;
1705			}
1706		}
1707		assert_eq!(BUCKET_START_POS[32], POSITION_TICKS);
1708		assert_eq!(min_size_iter, POSITION_TICKS);
1709	}
1710
1711	#[inline]
1712	fn amount_to_pos(amount_msat: u64, capacity_msat: u64) -> u16 {
1713		let pos = if amount_msat < u64::max_value() / (POSITION_TICKS as u64) {
1714			(amount_msat * (POSITION_TICKS as u64) / capacity_msat.saturating_add(1))
1715				.try_into().unwrap_or(POSITION_TICKS)
1716		} else {
1717			// Only use 128-bit arithmetic when multiplication will overflow to avoid 128-bit
1718			// division. This branch should only be hit in fuzz testing since the amount would
1719			// need to be over 2.88 million BTC in practice.
1720			((amount_msat as u128) * (POSITION_TICKS as u128)
1721					/ (capacity_msat as u128).saturating_add(1))
1722				.try_into().unwrap_or(POSITION_TICKS)
1723		};
1724		// If we are running in a client that doesn't validate gossip, its possible for a channel's
1725		// capacity to change due to a `channel_update` message which, if received while a payment
1726		// is in-flight, could cause this to fail. Thus, we only assert in test.
1727		#[cfg(test)]
1728		debug_assert!(pos < POSITION_TICKS);
1729		pos
1730	}
1731
1732	/// Prior to LDK 0.0.117 we used eight buckets which were split evenly across the either
1733	/// octiles. This was changed to use 32 buckets for accuracy reasons in 0.0.117, however we
1734	/// support reading the legacy values here for backwards compatibility.
1735	pub(super) struct LegacyHistoricalBucketRangeTracker {
1736		buckets: [u16; 8],
1737	}
1738
1739	impl LegacyHistoricalBucketRangeTracker {
1740		pub(crate) fn into_current(self) -> HistoricalBucketRangeTracker {
1741			let mut buckets = [0; 32];
1742			for (idx, legacy_bucket) in self.buckets.iter().enumerate() {
1743				let mut new_val = *legacy_bucket;
1744				let (start, end) = LEGACY_TO_BUCKET_RANGE[idx];
1745				new_val /= (end - start) as u16;
1746				for i in start..end {
1747					buckets[i as usize] = new_val;
1748				}
1749			}
1750			HistoricalBucketRangeTracker { buckets }
1751		}
1752	}
1753
1754	/// Tracks the historical state of a distribution as a weighted average of how much time was spent
1755	/// in each of 32 buckets.
1756	#[derive(Clone, Copy)]
1757	pub(super) struct HistoricalBucketRangeTracker {
1758		buckets: [u16; 32],
1759	}
1760
1761	/// Buckets are stored in fixed point numbers with a 5 bit fractional part. Thus, the value
1762	/// "one" is 32, or this constant.
1763	pub const BUCKET_FIXED_POINT_ONE: u16 = 32;
1764
1765	impl HistoricalBucketRangeTracker {
1766		pub(super) fn new() -> Self { Self { buckets: [0; 32] } }
1767		fn track_datapoint(&mut self, liquidity_offset_msat: u64, capacity_msat: u64) {
1768			// We have 32 leaky buckets for min and max liquidity. Each bucket tracks the amount of time
1769			// we spend in each bucket as a 16-bit fixed-point number with a 5 bit fractional part.
1770			//
1771			// Each time we update our liquidity estimate, we add 32 (1.0 in our fixed-point system) to
1772			// the buckets for the current min and max liquidity offset positions.
1773			//
1774			// We then decay each bucket by multiplying by 2047/2048 (avoiding dividing by a
1775			// non-power-of-two). This ensures we can't actually overflow the u16 - when we get to
1776			// 63,457 adding 32 and decaying by 2047/2048 leaves us back at 63,457.
1777			//
1778			// In total, this allows us to track data for the last 8,000 or so payments across a given
1779			// channel.
1780			//
1781			// These constants are a balance - we try to fit in 2 bytes per bucket to reduce overhead,
1782			// and need to balance having more bits in the decimal part (to ensure decay isn't too
1783			// non-linear) with having too few bits in the mantissa, causing us to not store very many
1784			// datapoints.
1785			//
1786			// The constants were picked experimentally, selecting a decay amount that restricts us
1787			// from overflowing buckets without having to cap them manually.
1788
1789			let pos: u16 = amount_to_pos(liquidity_offset_msat, capacity_msat);
1790			if pos < POSITION_TICKS {
1791				for e in self.buckets.iter_mut() {
1792					*e = ((*e as u32) * 2047 / 2048) as u16;
1793				}
1794				let bucket = pos_to_bucket(pos);
1795				self.buckets[bucket] = self.buckets[bucket].saturating_add(BUCKET_FIXED_POINT_ONE);
1796			}
1797		}
1798
1799		/// Applies decay at the given half-life to all buckets.
1800		fn decay(&mut self, half_lives: f64) {
1801			let factor = (1024.0 * powf64(0.5, half_lives)) as u64;
1802			for bucket in self.buckets.iter_mut() {
1803				*bucket = ((*bucket as u64) * factor / 1024) as u16;
1804			}
1805		}
1806	}
1807
1808	impl_writeable_tlv_based!(HistoricalBucketRangeTracker, { (0, buckets, required) });
1809	impl_writeable_tlv_based!(LegacyHistoricalBucketRangeTracker, { (0, buckets, required) });
1810
1811	#[derive(Clone, Copy)]
1812	#[repr(C)]// Force the fields in memory to be in the order we specify.
1813	pub(super) struct HistoricalLiquidityTracker {
1814		// This struct sits inside a `(u64, ChannelLiquidity)` in memory, and we first read the
1815		// liquidity offsets in `ChannelLiquidity` when calculating the non-historical score. This
1816		// means that the first handful of bytes of this struct will already be sitting in cache by
1817		// the time we go to look at them.
1818		//
1819		// Because the first thing we do is check if `total_valid_points` is sufficient to consider
1820		// the data here at all, and can return early if it is not, we want this to go first to
1821		// avoid hitting a second cache line load entirely in that case.
1822		//
1823		// Note that we store it as an `f64` rather than a `u64` (potentially losing some
1824		// precision) because we ultimately need the value as an `f64` when dividing bucket weights
1825		// by it. Storing it as an `f64` avoids doing the additional int -> float conversion in the
1826		// hot score-calculation path.
1827		total_valid_points_tracked: f64,
1828		min_liquidity_offset_history: HistoricalBucketRangeTracker,
1829		max_liquidity_offset_history: HistoricalBucketRangeTracker,
1830	}
1831
1832	impl HistoricalLiquidityTracker {
1833		pub(super) fn new() -> HistoricalLiquidityTracker {
1834			HistoricalLiquidityTracker {
1835				min_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
1836				max_liquidity_offset_history: HistoricalBucketRangeTracker::new(),
1837				total_valid_points_tracked: 0.0,
1838			}
1839		}
1840
1841		pub(super) fn from_min_max(
1842			min_liquidity_offset_history: HistoricalBucketRangeTracker,
1843			max_liquidity_offset_history: HistoricalBucketRangeTracker,
1844		) -> HistoricalLiquidityTracker {
1845			let mut res = HistoricalLiquidityTracker {
1846				min_liquidity_offset_history,
1847				max_liquidity_offset_history,
1848				total_valid_points_tracked: 0.0,
1849			};
1850			res.recalculate_valid_point_count();
1851			res
1852		}
1853
1854		pub(super) fn has_datapoints(&self) -> bool {
1855			self.min_liquidity_offset_history.buckets != [0; 32] ||
1856				self.max_liquidity_offset_history.buckets != [0; 32]
1857		}
1858
1859		pub(super) fn decay_buckets(&mut self, half_lives: f64) {
1860			self.min_liquidity_offset_history.decay(half_lives);
1861			self.max_liquidity_offset_history.decay(half_lives);
1862			self.recalculate_valid_point_count();
1863		}
1864
1865		fn recalculate_valid_point_count(&mut self) {
1866			let mut total_valid_points_tracked = 0u128;
1867			for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
1868				for max_bucket in self.max_liquidity_offset_history.buckets.iter().take(32 - min_idx) {
1869					// In testing, raising the weights of buckets to a high power led to better
1870					// scoring results. Thus, we raise the bucket weights to the 4th power here (by
1871					// squaring the result of multiplying the weights). This results in
1872					// bucket_weight having at max 64 bits, which means we have to do our summation
1873					// in 128-bit math.
1874					let mut bucket_weight = (*min_bucket as u64) * (*max_bucket as u64);
1875					bucket_weight *= bucket_weight;
1876					total_valid_points_tracked += bucket_weight as u128;
1877				}
1878			}
1879			self.total_valid_points_tracked = total_valid_points_tracked as f64;
1880		}
1881
1882		pub(super) fn writeable_min_offset_history(&self) -> &HistoricalBucketRangeTracker {
1883			&self.min_liquidity_offset_history
1884		}
1885
1886		pub(super) fn writeable_max_offset_history(&self) -> &HistoricalBucketRangeTracker {
1887			&self.max_liquidity_offset_history
1888		}
1889
1890		pub(super) fn as_directed<'a>(&'a self, source_less_than_target: bool)
1891		-> DirectedHistoricalLiquidityTracker<&'a HistoricalLiquidityTracker> {
1892			DirectedHistoricalLiquidityTracker { source_less_than_target, tracker: self }
1893		}
1894
1895		pub(super) fn as_directed_mut<'a>(&'a mut self, source_less_than_target: bool)
1896		-> DirectedHistoricalLiquidityTracker<&'a mut HistoricalLiquidityTracker> {
1897			DirectedHistoricalLiquidityTracker { source_less_than_target, tracker: self }
1898		}
1899	}
1900
1901	/// A set of buckets representing the history of where we've seen the minimum- and maximum-
1902	/// liquidity bounds for a given channel.
1903	pub(super) struct DirectedHistoricalLiquidityTracker<D: Deref<Target = HistoricalLiquidityTracker>> {
1904		source_less_than_target: bool,
1905		tracker: D,
1906	}
1907
1908	impl<D: DerefMut<Target = HistoricalLiquidityTracker>> DirectedHistoricalLiquidityTracker<D> {
1909		pub(super) fn track_datapoint(
1910			&mut self, min_offset_msat: u64, max_offset_msat: u64, capacity_msat: u64,
1911		) {
1912			if self.source_less_than_target {
1913				self.tracker.min_liquidity_offset_history.track_datapoint(min_offset_msat, capacity_msat);
1914				self.tracker.max_liquidity_offset_history.track_datapoint(max_offset_msat, capacity_msat);
1915			} else {
1916				self.tracker.max_liquidity_offset_history.track_datapoint(min_offset_msat, capacity_msat);
1917				self.tracker.min_liquidity_offset_history.track_datapoint(max_offset_msat, capacity_msat);
1918			}
1919			self.tracker.recalculate_valid_point_count();
1920		}
1921	}
1922
1923	impl<D: Deref<Target = HistoricalLiquidityTracker>> DirectedHistoricalLiquidityTracker<D> {
1924		pub(super) fn min_liquidity_offset_history_buckets(&self) -> &[u16; 32] {
1925			if self.source_less_than_target {
1926				&self.tracker.min_liquidity_offset_history.buckets
1927			} else {
1928				&self.tracker.max_liquidity_offset_history.buckets
1929			}
1930		}
1931
1932		pub(super) fn max_liquidity_offset_history_buckets(&self) -> &[u16; 32] {
1933			if self.source_less_than_target {
1934				&self.tracker.max_liquidity_offset_history.buckets
1935			} else {
1936				&self.tracker.min_liquidity_offset_history.buckets
1937			}
1938		}
1939
1940		#[inline]
1941		pub(super) fn calculate_success_probability_times_billion(
1942			&self, params: &ProbabilisticScoringFeeParameters, total_inflight_amount_msat: u64,
1943			capacity_msat: u64
1944		) -> Option<u64> {
1945			// If historical penalties are enabled, we try to calculate a probability of success
1946			// given our historical distribution of min- and max-liquidity bounds in a channel.
1947			// To do so, we walk the set of historical liquidity bucket (min, max) combinations
1948			// (where min_idx < max_idx, as having a minimum above our maximum is an invalid
1949			// state). For each pair, we calculate the probability as if the bucket's corresponding
1950			// min- and max- liquidity bounds were our current liquidity bounds and then multiply
1951			// that probability by the weight of the selected buckets.
1952			let payment_pos = amount_to_pos(total_inflight_amount_msat, capacity_msat);
1953			if payment_pos >= POSITION_TICKS { return None; }
1954
1955			let min_liquidity_offset_history_buckets =
1956				self.min_liquidity_offset_history_buckets();
1957			let max_liquidity_offset_history_buckets =
1958				self.max_liquidity_offset_history_buckets();
1959
1960			let total_valid_points_tracked = self.tracker.total_valid_points_tracked;
1961			#[cfg(debug_assertions)] {
1962				let mut actual_valid_points_tracked = 0u128;
1963				for (min_idx, min_bucket) in min_liquidity_offset_history_buckets.iter().enumerate() {
1964					for max_bucket in max_liquidity_offset_history_buckets.iter().take(32 - min_idx) {
1965						let mut bucket_weight = (*min_bucket as u64) * (*max_bucket as u64);
1966						bucket_weight *= bucket_weight;
1967						actual_valid_points_tracked += bucket_weight as u128;
1968					}
1969				}
1970				assert_eq!(total_valid_points_tracked, actual_valid_points_tracked as f64);
1971			}
1972
1973			// If the total valid points is smaller than 1.0 (i.e. 32 in our fixed-point scheme),
1974			// treat it as if we were fully decayed.
1975			const FULLY_DECAYED: f64 = BUCKET_FIXED_POINT_ONE as f64 * BUCKET_FIXED_POINT_ONE as f64 *
1976				BUCKET_FIXED_POINT_ONE as f64 * BUCKET_FIXED_POINT_ONE as f64;
1977			if total_valid_points_tracked < FULLY_DECAYED.into() {
1978				return None;
1979			}
1980
1981			let mut cumulative_success_prob = 0.0f64;
1982			// Special-case the 0th min bucket - it generally means we failed a payment, so only
1983			// consider the highest (i.e. largest-offset-from-max-capacity) max bucket for all
1984			// points against the 0th min bucket. This avoids the case where we fail to route
1985			// increasingly lower values over a channel, but treat each failure as a separate
1986			// datapoint, many of which may have relatively high maximum-available-liquidity
1987			// values, which will result in us thinking we have some nontrivial probability of
1988			// routing up to that amount.
1989			if min_liquidity_offset_history_buckets[0] != 0 {
1990				// Track the highest max-buckets with any data at all, as well as the highest
1991				// max-bucket with at least BUCKET_FIXED_POINT_ONE.
1992				let mut highest_max_bucket_with_points = 0;
1993				let mut highest_max_bucket_with_full_points = None;
1994				let mut total_weight = 0u128;
1995				for (max_idx, max_bucket) in max_liquidity_offset_history_buckets.iter().enumerate() {
1996					if *max_bucket >= BUCKET_FIXED_POINT_ONE {
1997						highest_max_bucket_with_full_points = Some(cmp::max(highest_max_bucket_with_full_points.unwrap_or(0), max_idx));
1998					}
1999					if *max_bucket != 0 {
2000						highest_max_bucket_with_points = cmp::max(highest_max_bucket_with_points, max_idx);
2001					}
2002					// In testing, raising the weights of buckets to a high power led to better
2003					// scoring results. Thus, we raise the bucket weights to the 4th power here (by
2004					// squaring the result of multiplying the weights), matching the logic in
2005					// `recalculate_valid_point_count`.
2006					let bucket_weight = (*max_bucket as u64) * (min_liquidity_offset_history_buckets[0] as u64);
2007					total_weight += (bucket_weight * bucket_weight) as u128;
2008				}
2009				debug_assert!(total_weight as f64 <= total_valid_points_tracked);
2010				// Use the highest max-bucket with at least BUCKET_FIXED_POINT_ONE, but if none is
2011				// available use the highest max-bucket with any non-zero value. This ensures that
2012				// if we have substantially decayed data we don't end up thinking the highest
2013				// max-bucket is zero even though we have no points in the 0th max-bucket and do
2014				// have points elsewhere.
2015				let selected_max = highest_max_bucket_with_full_points.unwrap_or(highest_max_bucket_with_points);
2016				let max_bucket_end_pos = BUCKET_START_POS[32 - selected_max] - 1;
2017				if payment_pos < max_bucket_end_pos {
2018					let (numerator, denominator) = success_probability_float(payment_pos as u64, 0,
2019						max_bucket_end_pos as u64, POSITION_TICKS as u64 - 1, params, true);
2020					let bucket_prob = total_weight as f64 / total_valid_points_tracked;
2021					cumulative_success_prob += bucket_prob * numerator / denominator;
2022				}
2023			}
2024
2025			for (min_idx, min_bucket) in min_liquidity_offset_history_buckets.iter().enumerate().skip(1) {
2026				let min_bucket_start_pos = BUCKET_START_POS[min_idx];
2027				for (max_idx, max_bucket) in max_liquidity_offset_history_buckets.iter().enumerate().take(32 - min_idx) {
2028					let max_bucket_end_pos = BUCKET_START_POS[32 - max_idx] - 1;
2029					if payment_pos >= max_bucket_end_pos {
2030						// Success probability 0, the payment amount may be above the max liquidity
2031						break;
2032					}
2033
2034					// In testing, raising the weights of buckets to a high power led to better
2035					// scoring results. Thus, we raise the bucket weights to the 4th power here (by
2036					// squaring the result of multiplying the weights), matching the logic in
2037					// `recalculate_valid_point_count`.
2038					let mut bucket_weight = (*min_bucket as u64) * (*max_bucket as u64);
2039					bucket_weight *= bucket_weight;
2040					debug_assert!(bucket_weight as f64 <= total_valid_points_tracked);
2041					let bucket_prob = bucket_weight as f64 / total_valid_points_tracked;
2042
2043					if payment_pos < min_bucket_start_pos {
2044						cumulative_success_prob += bucket_prob;
2045					} else {
2046						let (numerator, denominator) = success_probability_float(payment_pos as u64,
2047							min_bucket_start_pos as u64, max_bucket_end_pos as u64,
2048							POSITION_TICKS as u64 - 1, params, true);
2049						cumulative_success_prob += bucket_prob * numerator / denominator;
2050					}
2051				}
2052			}
2053
2054			Some((cumulative_success_prob * (1024.0 * 1024.0 * 1024.0)) as u64)
2055		}
2056	}
2057
2058	#[cfg(test)]
2059	mod tests {
2060		use super::{HistoricalBucketRangeTracker, HistoricalLiquidityTracker, ProbabilisticScoringFeeParameters};
2061
2062		#[test]
2063		fn historical_liquidity_bucket_decay() {
2064			let mut bucket = HistoricalBucketRangeTracker::new();
2065			bucket.track_datapoint(100, 1000);
2066			assert_eq!(
2067				bucket.buckets,
2068				[
2069					0u16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2070					0, 0, 0, 0, 0, 0, 0
2071				]
2072			);
2073
2074			bucket.decay(2.0);
2075			assert_eq!(
2076				bucket.buckets,
2077				[
2078					0u16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2079					0, 0, 0, 0, 0, 0, 0
2080				]
2081			);
2082		}
2083
2084		#[test]
2085		fn historical_heavy_buckets_operations() {
2086			// Checks that we don't hit overflows when working with tons of data (even an
2087			// impossible-to-reach amount of data).
2088			let mut tracker = HistoricalLiquidityTracker::new();
2089			tracker.min_liquidity_offset_history.buckets = [0xffff; 32];
2090			tracker.max_liquidity_offset_history.buckets = [0xffff; 32];
2091			tracker.recalculate_valid_point_count();
2092
2093			let mut directed = tracker.as_directed_mut(true);
2094			let default_params = ProbabilisticScoringFeeParameters::default();
2095			directed.calculate_success_probability_times_billion(&default_params, 42, 1000);
2096			directed.track_datapoint(42, 52, 1000);
2097
2098			tracker.decay_buckets(1.0);
2099		}
2100	}
2101}
2102use bucketed_history::{LegacyHistoricalBucketRangeTracker, HistoricalBucketRangeTracker, DirectedHistoricalLiquidityTracker, HistoricalLiquidityTracker};
2103
2104impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> Writeable for ProbabilisticScorer<G, L> where L::Target: Logger {
2105	#[inline]
2106	fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
2107		write_tlv_fields!(w, {
2108			(0, self.channel_liquidities, required),
2109		});
2110		Ok(())
2111	}
2112}
2113
2114impl<G: Deref<Target = NetworkGraph<L>>, L: Deref>
2115ReadableArgs<(ProbabilisticScoringDecayParameters, G, L)> for ProbabilisticScorer<G, L> where L::Target: Logger {
2116	#[inline]
2117	fn read<R: Read>(
2118		r: &mut R, args: (ProbabilisticScoringDecayParameters, G, L)
2119	) -> Result<Self, DecodeError> {
2120		let (decay_params, network_graph, logger) = args;
2121		let mut channel_liquidities = new_hash_map();
2122		read_tlv_fields!(r, {
2123			(0, channel_liquidities, required),
2124		});
2125		Ok(Self {
2126			decay_params,
2127			network_graph,
2128			logger,
2129			channel_liquidities,
2130		})
2131	}
2132}
2133
2134impl Writeable for ChannelLiquidity {
2135	#[inline]
2136	fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
2137		write_tlv_fields!(w, {
2138			(0, self.min_liquidity_offset_msat, required),
2139			// 1 was the min_liquidity_offset_history in octile form
2140			(2, self.max_liquidity_offset_msat, required),
2141			// 3 was the max_liquidity_offset_history in octile form
2142			(4, self.last_updated, required),
2143			(5, self.liquidity_history.writeable_min_offset_history(), required),
2144			(7, self.liquidity_history.writeable_max_offset_history(), required),
2145			(9, self.offset_history_last_updated, required),
2146		});
2147		Ok(())
2148	}
2149}
2150
2151impl Readable for ChannelLiquidity {
2152	#[inline]
2153	fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
2154		let mut min_liquidity_offset_msat = 0;
2155		let mut max_liquidity_offset_msat = 0;
2156		let mut legacy_min_liq_offset_history: Option<LegacyHistoricalBucketRangeTracker> = None;
2157		let mut legacy_max_liq_offset_history: Option<LegacyHistoricalBucketRangeTracker> = None;
2158		let mut min_liquidity_offset_history: Option<HistoricalBucketRangeTracker> = None;
2159		let mut max_liquidity_offset_history: Option<HistoricalBucketRangeTracker> = None;
2160		let mut last_updated = Duration::from_secs(0);
2161		let mut offset_history_last_updated = None;
2162		read_tlv_fields!(r, {
2163			(0, min_liquidity_offset_msat, required),
2164			(1, legacy_min_liq_offset_history, option),
2165			(2, max_liquidity_offset_msat, required),
2166			(3, legacy_max_liq_offset_history, option),
2167			(4, last_updated, required),
2168			(5, min_liquidity_offset_history, option),
2169			(7, max_liquidity_offset_history, option),
2170			(9, offset_history_last_updated, option),
2171		});
2172
2173		if min_liquidity_offset_history.is_none() {
2174			if let Some(legacy_buckets) = legacy_min_liq_offset_history {
2175				min_liquidity_offset_history = Some(legacy_buckets.into_current());
2176			} else {
2177				min_liquidity_offset_history = Some(HistoricalBucketRangeTracker::new());
2178			}
2179		}
2180		if max_liquidity_offset_history.is_none() {
2181			if let Some(legacy_buckets) = legacy_max_liq_offset_history {
2182				max_liquidity_offset_history = Some(legacy_buckets.into_current());
2183			} else {
2184				max_liquidity_offset_history = Some(HistoricalBucketRangeTracker::new());
2185			}
2186		}
2187		Ok(Self {
2188			min_liquidity_offset_msat,
2189			max_liquidity_offset_msat,
2190			liquidity_history: HistoricalLiquidityTracker::from_min_max(
2191				min_liquidity_offset_history.unwrap(), max_liquidity_offset_history.unwrap()
2192			),
2193			last_updated,
2194			offset_history_last_updated: offset_history_last_updated.unwrap_or(last_updated),
2195		})
2196	}
2197}
2198
2199#[cfg(test)]
2200mod tests {
2201	use super::{ChannelLiquidity, HistoricalLiquidityTracker, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters, ProbabilisticScorer};
2202	use crate::blinded_path::BlindedHop;
2203	use crate::util::config::UserConfig;
2204
2205	use crate::ln::channelmanager;
2206	use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, UnsignedChannelAnnouncement, UnsignedChannelUpdate};
2207	use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId};
2208	use crate::routing::router::{BlindedTail, Path, RouteHop, CandidateRouteHop, PublicHopCandidate};
2209	use crate::routing::scoring::{ChannelUsage, ScoreLookUp, ScoreUpdate};
2210	use crate::util::ser::{ReadableArgs, Writeable};
2211	use crate::util::test_utils::{self, TestLogger};
2212
2213	use bitcoin::constants::ChainHash;
2214	use bitcoin::hashes::Hash;
2215	use bitcoin::hashes::sha256d::Hash as Sha256dHash;
2216	use bitcoin::network::Network;
2217	use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
2218	use core::time::Duration;
2219	use crate::io;
2220
2221	fn source_privkey() -> SecretKey {
2222		SecretKey::from_slice(&[42; 32]).unwrap()
2223	}
2224
2225	fn target_privkey() -> SecretKey {
2226		SecretKey::from_slice(&[43; 32]).unwrap()
2227	}
2228
2229	fn source_pubkey() -> PublicKey {
2230		let secp_ctx = Secp256k1::new();
2231		PublicKey::from_secret_key(&secp_ctx, &source_privkey())
2232	}
2233
2234	fn target_pubkey() -> PublicKey {
2235		let secp_ctx = Secp256k1::new();
2236		PublicKey::from_secret_key(&secp_ctx, &target_privkey())
2237	}
2238
2239	fn source_node_id() -> NodeId {
2240		NodeId::from_pubkey(&source_pubkey())
2241	}
2242
2243	fn target_node_id() -> NodeId {
2244		NodeId::from_pubkey(&target_pubkey())
2245	}
2246
2247	// `ProbabilisticScorer` tests
2248
2249	fn sender_privkey() -> SecretKey {
2250		SecretKey::from_slice(&[41; 32]).unwrap()
2251	}
2252
2253	fn recipient_privkey() -> SecretKey {
2254		SecretKey::from_slice(&[45; 32]).unwrap()
2255	}
2256
2257	fn sender_pubkey() -> PublicKey {
2258		let secp_ctx = Secp256k1::new();
2259		PublicKey::from_secret_key(&secp_ctx, &sender_privkey())
2260	}
2261
2262	fn recipient_pubkey() -> PublicKey {
2263		let secp_ctx = Secp256k1::new();
2264		PublicKey::from_secret_key(&secp_ctx, &recipient_privkey())
2265	}
2266
2267	fn recipient_node_id() -> NodeId {
2268		NodeId::from_pubkey(&recipient_pubkey())
2269	}
2270
2271	fn network_graph(logger: &TestLogger) -> NetworkGraph<&TestLogger> {
2272		let mut network_graph = NetworkGraph::new(Network::Testnet, logger);
2273		add_channel(&mut network_graph, 42, source_privkey(), target_privkey());
2274		add_channel(&mut network_graph, 43, target_privkey(), recipient_privkey());
2275
2276		network_graph
2277	}
2278
2279	fn add_channel(
2280		network_graph: &mut NetworkGraph<&TestLogger>, short_channel_id: u64, node_1_key: SecretKey,
2281		node_2_key: SecretKey
2282	) {
2283		let genesis_hash = ChainHash::using_genesis_block(Network::Testnet);
2284		let node_1_secret = &SecretKey::from_slice(&[39; 32]).unwrap();
2285		let node_2_secret = &SecretKey::from_slice(&[40; 32]).unwrap();
2286		let secp_ctx = Secp256k1::new();
2287		let unsigned_announcement = UnsignedChannelAnnouncement {
2288			features: channelmanager::provided_channel_features(&UserConfig::default()),
2289			chain_hash: genesis_hash,
2290			short_channel_id,
2291			node_id_1: NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, &node_1_key)),
2292			node_id_2: NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, &node_2_key)),
2293			bitcoin_key_1: NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, &node_1_secret)),
2294			bitcoin_key_2: NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, &node_2_secret)),
2295			excess_data: Vec::new(),
2296		};
2297		let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]);
2298		let signed_announcement = ChannelAnnouncement {
2299			node_signature_1: secp_ctx.sign_ecdsa(&msghash, &node_1_key),
2300			node_signature_2: secp_ctx.sign_ecdsa(&msghash, &node_2_key),
2301			bitcoin_signature_1: secp_ctx.sign_ecdsa(&msghash, &node_1_secret),
2302			bitcoin_signature_2: secp_ctx.sign_ecdsa(&msghash, &node_2_secret),
2303			contents: unsigned_announcement,
2304		};
2305		let chain_source: Option<&crate::util::test_utils::TestChainSource> = None;
2306		network_graph.update_channel_from_announcement(
2307			&signed_announcement, &chain_source).unwrap();
2308		update_channel(network_graph, short_channel_id, node_1_key, 0, 1_000, 100);
2309		update_channel(network_graph, short_channel_id, node_2_key, 1, 0, 100);
2310	}
2311
2312	fn update_channel(
2313		network_graph: &mut NetworkGraph<&TestLogger>, short_channel_id: u64, node_key: SecretKey,
2314		channel_flags: u8, htlc_maximum_msat: u64, timestamp: u32,
2315	) {
2316		let genesis_hash = ChainHash::using_genesis_block(Network::Testnet);
2317		let secp_ctx = Secp256k1::new();
2318		let unsigned_update = UnsignedChannelUpdate {
2319			chain_hash: genesis_hash,
2320			short_channel_id,
2321			timestamp,
2322			message_flags: 1, // Only must_be_one
2323			channel_flags,
2324			cltv_expiry_delta: 18,
2325			htlc_minimum_msat: 0,
2326			htlc_maximum_msat,
2327			fee_base_msat: 1,
2328			fee_proportional_millionths: 0,
2329			excess_data: Vec::new(),
2330		};
2331		let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_update.encode()[..])[..]);
2332		let signed_update = ChannelUpdate {
2333			signature: secp_ctx.sign_ecdsa(&msghash, &node_key),
2334			contents: unsigned_update,
2335		};
2336		network_graph.update_channel(&signed_update).unwrap();
2337	}
2338
2339	fn path_hop(pubkey: PublicKey, short_channel_id: u64, fee_msat: u64) -> RouteHop {
2340		let config = UserConfig::default();
2341		RouteHop {
2342			pubkey,
2343			node_features: channelmanager::provided_node_features(&config),
2344			short_channel_id,
2345			channel_features: channelmanager::provided_channel_features(&config),
2346			fee_msat,
2347			cltv_expiry_delta: 18,
2348			maybe_announced_channel: true,
2349		}
2350	}
2351
2352	fn payment_path_for_amount(amount_msat: u64) -> Path {
2353		Path {
2354			hops: vec![
2355				path_hop(source_pubkey(), 41, 1),
2356				path_hop(target_pubkey(), 42, 2),
2357				path_hop(recipient_pubkey(), 43, amount_msat),
2358			], blinded_tail: None,
2359		}
2360	}
2361
2362	#[test]
2363	fn liquidity_bounds_directed_from_lowest_node_id() {
2364		let logger = TestLogger::new();
2365		let last_updated = Duration::ZERO;
2366		let offset_history_last_updated = Duration::ZERO;
2367		let network_graph = network_graph(&logger);
2368		let decay_params = ProbabilisticScoringDecayParameters::default();
2369		let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger)
2370			.with_channel(42,
2371				ChannelLiquidity {
2372					min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100,
2373					last_updated, offset_history_last_updated,
2374					liquidity_history: HistoricalLiquidityTracker::new(),
2375				})
2376			.with_channel(43,
2377				ChannelLiquidity {
2378					min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100,
2379					last_updated, offset_history_last_updated,
2380					liquidity_history: HistoricalLiquidityTracker::new(),
2381				});
2382		let source = source_node_id();
2383		let target = target_node_id();
2384		let recipient = recipient_node_id();
2385		assert!(source > target);
2386		assert!(target < recipient);
2387
2388		// Update minimum liquidity.
2389
2390		let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2391			.as_directed(&source, &target, 1_000);
2392		assert_eq!(liquidity.min_liquidity_msat(), 100);
2393		assert_eq!(liquidity.max_liquidity_msat(), 300);
2394
2395		let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2396			.as_directed(&target, &source, 1_000);
2397		assert_eq!(liquidity.min_liquidity_msat(), 700);
2398		assert_eq!(liquidity.max_liquidity_msat(), 900);
2399
2400		scorer.channel_liquidities.get_mut(&42).unwrap()
2401			.as_directed_mut(&source, &target, 1_000)
2402			.set_min_liquidity_msat(200, Duration::ZERO);
2403
2404		let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2405			.as_directed(&source, &target, 1_000);
2406		assert_eq!(liquidity.min_liquidity_msat(), 200);
2407		assert_eq!(liquidity.max_liquidity_msat(), 300);
2408
2409		let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2410			.as_directed(&target, &source, 1_000);
2411		assert_eq!(liquidity.min_liquidity_msat(), 700);
2412		assert_eq!(liquidity.max_liquidity_msat(), 800);
2413
2414		// Update maximum liquidity.
2415
2416		let liquidity = scorer.channel_liquidities.get(&43).unwrap()
2417			.as_directed(&target, &recipient, 1_000);
2418		assert_eq!(liquidity.min_liquidity_msat(), 700);
2419		assert_eq!(liquidity.max_liquidity_msat(), 900);
2420
2421		let liquidity = scorer.channel_liquidities.get(&43).unwrap()
2422			.as_directed(&recipient, &target, 1_000);
2423		assert_eq!(liquidity.min_liquidity_msat(), 100);
2424		assert_eq!(liquidity.max_liquidity_msat(), 300);
2425
2426		scorer.channel_liquidities.get_mut(&43).unwrap()
2427			.as_directed_mut(&target, &recipient, 1_000)
2428			.set_max_liquidity_msat(200, Duration::ZERO);
2429
2430		let liquidity = scorer.channel_liquidities.get(&43).unwrap()
2431			.as_directed(&target, &recipient, 1_000);
2432		assert_eq!(liquidity.min_liquidity_msat(), 0);
2433		assert_eq!(liquidity.max_liquidity_msat(), 200);
2434
2435		let liquidity = scorer.channel_liquidities.get(&43).unwrap()
2436			.as_directed(&recipient, &target, 1_000);
2437		assert_eq!(liquidity.min_liquidity_msat(), 800);
2438		assert_eq!(liquidity.max_liquidity_msat(), 1000);
2439	}
2440
2441	#[test]
2442	fn resets_liquidity_upper_bound_when_crossed_by_lower_bound() {
2443		let logger = TestLogger::new();
2444		let last_updated = Duration::ZERO;
2445		let offset_history_last_updated = Duration::ZERO;
2446		let network_graph = network_graph(&logger);
2447		let decay_params = ProbabilisticScoringDecayParameters::default();
2448		let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger)
2449			.with_channel(42,
2450				ChannelLiquidity {
2451					min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400,
2452					last_updated, offset_history_last_updated,
2453					liquidity_history: HistoricalLiquidityTracker::new(),
2454				});
2455		let source = source_node_id();
2456		let target = target_node_id();
2457		assert!(source > target);
2458
2459		// Check initial bounds.
2460		let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2461			.as_directed(&source, &target, 1_000);
2462		assert_eq!(liquidity.min_liquidity_msat(), 400);
2463		assert_eq!(liquidity.max_liquidity_msat(), 800);
2464
2465		let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2466			.as_directed(&target, &source, 1_000);
2467		assert_eq!(liquidity.min_liquidity_msat(), 200);
2468		assert_eq!(liquidity.max_liquidity_msat(), 600);
2469
2470		// Reset from source to target.
2471		scorer.channel_liquidities.get_mut(&42).unwrap()
2472			.as_directed_mut(&source, &target, 1_000)
2473			.set_min_liquidity_msat(900, Duration::ZERO);
2474
2475		let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2476			.as_directed(&source, &target, 1_000);
2477		assert_eq!(liquidity.min_liquidity_msat(), 900);
2478		assert_eq!(liquidity.max_liquidity_msat(), 1_000);
2479
2480		let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2481			.as_directed(&target, &source, 1_000);
2482		assert_eq!(liquidity.min_liquidity_msat(), 0);
2483		assert_eq!(liquidity.max_liquidity_msat(), 100);
2484
2485		// Reset from target to source.
2486		scorer.channel_liquidities.get_mut(&42).unwrap()
2487			.as_directed_mut(&target, &source, 1_000)
2488			.set_min_liquidity_msat(400, Duration::ZERO);
2489
2490		let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2491			.as_directed(&source, &target, 1_000);
2492		assert_eq!(liquidity.min_liquidity_msat(), 0);
2493		assert_eq!(liquidity.max_liquidity_msat(), 600);
2494
2495		let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2496			.as_directed(&target, &source, 1_000);
2497		assert_eq!(liquidity.min_liquidity_msat(), 400);
2498		assert_eq!(liquidity.max_liquidity_msat(), 1_000);
2499	}
2500
2501	#[test]
2502	fn resets_liquidity_lower_bound_when_crossed_by_upper_bound() {
2503		let logger = TestLogger::new();
2504		let last_updated = Duration::ZERO;
2505		let offset_history_last_updated = Duration::ZERO;
2506		let network_graph = network_graph(&logger);
2507		let decay_params = ProbabilisticScoringDecayParameters::default();
2508		let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger)
2509			.with_channel(42,
2510				ChannelLiquidity {
2511					min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400,
2512					last_updated, offset_history_last_updated,
2513					liquidity_history: HistoricalLiquidityTracker::new(),
2514				});
2515		let source = source_node_id();
2516		let target = target_node_id();
2517		assert!(source > target);
2518
2519		// Check initial bounds.
2520		let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2521			.as_directed(&source, &target, 1_000);
2522		assert_eq!(liquidity.min_liquidity_msat(), 400);
2523		assert_eq!(liquidity.max_liquidity_msat(), 800);
2524
2525		let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2526			.as_directed(&target, &source, 1_000);
2527		assert_eq!(liquidity.min_liquidity_msat(), 200);
2528		assert_eq!(liquidity.max_liquidity_msat(), 600);
2529
2530		// Reset from source to target.
2531		scorer.channel_liquidities.get_mut(&42).unwrap()
2532			.as_directed_mut(&source, &target, 1_000)
2533			.set_max_liquidity_msat(300, Duration::ZERO);
2534
2535		let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2536			.as_directed(&source, &target, 1_000);
2537		assert_eq!(liquidity.min_liquidity_msat(), 0);
2538		assert_eq!(liquidity.max_liquidity_msat(), 300);
2539
2540		let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2541			.as_directed(&target, &source, 1_000);
2542		assert_eq!(liquidity.min_liquidity_msat(), 700);
2543		assert_eq!(liquidity.max_liquidity_msat(), 1_000);
2544
2545		// Reset from target to source.
2546		scorer.channel_liquidities.get_mut(&42).unwrap()
2547			.as_directed_mut(&target, &source, 1_000)
2548			.set_max_liquidity_msat(600, Duration::ZERO);
2549
2550		let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2551			.as_directed(&source, &target, 1_000);
2552		assert_eq!(liquidity.min_liquidity_msat(), 400);
2553		assert_eq!(liquidity.max_liquidity_msat(), 1_000);
2554
2555		let liquidity = scorer.channel_liquidities.get(&42).unwrap()
2556			.as_directed(&target, &source, 1_000);
2557		assert_eq!(liquidity.min_liquidity_msat(), 0);
2558		assert_eq!(liquidity.max_liquidity_msat(), 600);
2559	}
2560
2561	#[test]
2562	fn increased_penalty_nearing_liquidity_upper_bound() {
2563		let logger = TestLogger::new();
2564		let network_graph = network_graph(&logger);
2565		let params = ProbabilisticScoringFeeParameters {
2566			liquidity_penalty_multiplier_msat: 1_000,
2567			..ProbabilisticScoringFeeParameters::zero_penalty()
2568		};
2569		let decay_params = ProbabilisticScoringDecayParameters::default();
2570		let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
2571		let source = source_node_id();
2572
2573		let usage = ChannelUsage {
2574			amount_msat: 1_024,
2575			inflight_htlc_msat: 0,
2576			effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_000 },
2577		};
2578		let network_graph = network_graph.read_only();
2579		let channel = network_graph.channel(42).unwrap();
2580		let (info, _) = channel.as_directed_from(&source).unwrap();
2581		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
2582			info,
2583			short_channel_id: 42,
2584		});
2585		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
2586		let usage = ChannelUsage { amount_msat: 10_240, ..usage };
2587		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
2588		let usage = ChannelUsage { amount_msat: 102_400, ..usage };
2589		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 47);
2590		let usage = ChannelUsage { amount_msat: 1_023_999, ..usage };
2591		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 2_000);
2592
2593		let usage = ChannelUsage {
2594			amount_msat: 128,
2595			inflight_htlc_msat: 0,
2596			effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
2597		};
2598		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 58);
2599		let usage = ChannelUsage { amount_msat: 256, ..usage };
2600		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 125);
2601		let usage = ChannelUsage { amount_msat: 374, ..usage };
2602		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 198);
2603		let usage = ChannelUsage { amount_msat: 512, ..usage };
2604		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 300);
2605		let usage = ChannelUsage { amount_msat: 640, ..usage };
2606		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 425);
2607		let usage = ChannelUsage { amount_msat: 768, ..usage };
2608		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 602);
2609		let usage = ChannelUsage { amount_msat: 896, ..usage };
2610		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 902);
2611	}
2612
2613	#[test]
2614	fn constant_penalty_outside_liquidity_bounds() {
2615		let logger = TestLogger::new();
2616		let last_updated = Duration::ZERO;
2617		let offset_history_last_updated = Duration::ZERO;
2618		let network_graph = network_graph(&logger);
2619		let params = ProbabilisticScoringFeeParameters {
2620			liquidity_penalty_multiplier_msat: 1_000,
2621			considered_impossible_penalty_msat: u64::max_value(),
2622			..ProbabilisticScoringFeeParameters::zero_penalty()
2623		};
2624		let decay_params = ProbabilisticScoringDecayParameters {
2625			..ProbabilisticScoringDecayParameters::zero_penalty()
2626		};
2627		let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger)
2628			.with_channel(42,
2629				ChannelLiquidity {
2630					min_liquidity_offset_msat: 40, max_liquidity_offset_msat: 40,
2631					last_updated, offset_history_last_updated,
2632					liquidity_history: HistoricalLiquidityTracker::new(),
2633				});
2634		let source = source_node_id();
2635
2636		let usage = ChannelUsage {
2637			amount_msat: 39,
2638			inflight_htlc_msat: 0,
2639			effective_capacity: EffectiveCapacity::Total { capacity_msat: 100, htlc_maximum_msat: 1_000 },
2640		};
2641		let channel = network_graph.read_only().channel(42).unwrap().to_owned();
2642		let (info, _) = channel.as_directed_from(&source).unwrap();
2643		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
2644			info,
2645			short_channel_id: 42,
2646		});
2647		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
2648		let usage = ChannelUsage { amount_msat: 50, ..usage };
2649		assert_ne!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
2650		assert_ne!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
2651		let usage = ChannelUsage { amount_msat: 61, ..usage };
2652		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
2653	}
2654
2655	#[test]
2656	fn does_not_further_penalize_own_channel() {
2657		let logger = TestLogger::new();
2658		let network_graph = network_graph(&logger);
2659		let params = ProbabilisticScoringFeeParameters {
2660			liquidity_penalty_multiplier_msat: 1_000,
2661			..ProbabilisticScoringFeeParameters::zero_penalty()
2662		};
2663		let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
2664		let source = source_node_id();
2665		let usage = ChannelUsage {
2666			amount_msat: 500,
2667			inflight_htlc_msat: 0,
2668			effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
2669		};
2670		let failed_path = payment_path_for_amount(500);
2671		let successful_path = payment_path_for_amount(200);
2672		let channel = &network_graph.read_only().channel(42).unwrap().to_owned();
2673		let (info, _) = channel.as_directed_from(&source).unwrap();
2674		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
2675			info,
2676			short_channel_id: 41,
2677		});
2678
2679		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 301);
2680
2681		scorer.payment_path_failed(&failed_path, 41, Duration::ZERO);
2682		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 301);
2683
2684		scorer.payment_path_successful(&successful_path, Duration::ZERO);
2685		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 301);
2686	}
2687
2688	#[test]
2689	fn sets_liquidity_lower_bound_on_downstream_failure() {
2690		let logger = TestLogger::new();
2691		let network_graph = network_graph(&logger);
2692		let params = ProbabilisticScoringFeeParameters {
2693			liquidity_penalty_multiplier_msat: 1_000,
2694			..ProbabilisticScoringFeeParameters::zero_penalty()
2695		};
2696		let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
2697		let source = source_node_id();
2698		let path = payment_path_for_amount(500);
2699
2700		let usage = ChannelUsage {
2701			amount_msat: 250,
2702			inflight_htlc_msat: 0,
2703			effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
2704		};
2705		let channel = network_graph.read_only().channel(42).unwrap().to_owned();
2706		let (info, _) = channel.as_directed_from(&source).unwrap();
2707		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
2708			info,
2709			short_channel_id: 42,
2710		});
2711		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 128);
2712		let usage = ChannelUsage { amount_msat: 500, ..usage };
2713		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 301);
2714		let usage = ChannelUsage { amount_msat: 750, ..usage };
2715		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 602);
2716
2717		scorer.payment_path_failed(&path, 43, Duration::ZERO);
2718
2719		let usage = ChannelUsage { amount_msat: 250, ..usage };
2720		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
2721		let usage = ChannelUsage { amount_msat: 500, ..usage };
2722		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
2723		let usage = ChannelUsage { amount_msat: 750, ..usage };
2724		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 300);
2725	}
2726
2727	#[test]
2728	fn sets_liquidity_upper_bound_on_failure() {
2729		let logger = TestLogger::new();
2730		let network_graph = network_graph(&logger);
2731		let params = ProbabilisticScoringFeeParameters {
2732			liquidity_penalty_multiplier_msat: 1_000,
2733			considered_impossible_penalty_msat: u64::max_value(),
2734			..ProbabilisticScoringFeeParameters::zero_penalty()
2735		};
2736		let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
2737		let source = source_node_id();
2738		let path = payment_path_for_amount(500);
2739
2740		let usage = ChannelUsage {
2741			amount_msat: 250,
2742			inflight_htlc_msat: 0,
2743			effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
2744		};
2745		let channel = network_graph.read_only().channel(42).unwrap().to_owned();
2746		let (info, _) = channel.as_directed_from(&source).unwrap();
2747		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
2748			info,
2749			short_channel_id: 42,
2750		});
2751		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 128);
2752		let usage = ChannelUsage { amount_msat: 500, ..usage };
2753		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 301);
2754		let usage = ChannelUsage { amount_msat: 750, ..usage };
2755		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 602);
2756
2757		scorer.payment_path_failed(&path, 42, Duration::ZERO);
2758
2759		let usage = ChannelUsage { amount_msat: 250, ..usage };
2760		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 300);
2761		let usage = ChannelUsage { amount_msat: 500, ..usage };
2762		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
2763		let usage = ChannelUsage { amount_msat: 750, ..usage };
2764		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
2765	}
2766
2767	#[test]
2768	fn ignores_channels_after_removed_failed_channel() {
2769		// Previously, if we'd tried to send over a channel which was removed from the network
2770		// graph before we call `payment_path_failed` (which is the default if the we get a "no
2771		// such channel" error in the `InvoicePayer`), we would call `failed_downstream` on all
2772		// channels in the route, even ones which they payment never reached. This tests to ensure
2773		// we do not score such channels.
2774		let secp_ctx = Secp256k1::new();
2775		let logger = TestLogger::new();
2776		let mut network_graph = NetworkGraph::new(Network::Testnet, &logger);
2777		let secret_a = SecretKey::from_slice(&[42; 32]).unwrap();
2778		let secret_b = SecretKey::from_slice(&[43; 32]).unwrap();
2779		let secret_c = SecretKey::from_slice(&[44; 32]).unwrap();
2780		let secret_d = SecretKey::from_slice(&[45; 32]).unwrap();
2781		add_channel(&mut network_graph, 42, secret_a, secret_b);
2782		// Don't add the channel from B -> C.
2783		add_channel(&mut network_graph, 44, secret_c, secret_d);
2784
2785		let pub_a = PublicKey::from_secret_key(&secp_ctx, &secret_a);
2786		let pub_b = PublicKey::from_secret_key(&secp_ctx, &secret_b);
2787		let pub_c = PublicKey::from_secret_key(&secp_ctx, &secret_c);
2788		let pub_d = PublicKey::from_secret_key(&secp_ctx, &secret_d);
2789
2790		let path = vec![
2791			path_hop(pub_b, 42, 1),
2792			path_hop(pub_c, 43, 2),
2793			path_hop(pub_d, 44, 100),
2794		];
2795
2796		let node_a = NodeId::from_pubkey(&pub_a);
2797		let node_b = NodeId::from_pubkey(&pub_b);
2798		let node_c = NodeId::from_pubkey(&pub_c);
2799
2800		let params = ProbabilisticScoringFeeParameters {
2801			liquidity_penalty_multiplier_msat: 1_000,
2802			..ProbabilisticScoringFeeParameters::zero_penalty()
2803		};
2804		let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
2805
2806		let usage = ChannelUsage {
2807			amount_msat: 250,
2808			inflight_htlc_msat: 0,
2809			effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
2810		};
2811		let channel = network_graph.read_only().channel(42).unwrap().to_owned();
2812		let (info, _) = channel.as_directed_from(&node_a).unwrap();
2813		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
2814			info,
2815			short_channel_id: 42,
2816		});
2817		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 128);
2818		// Note that a default liquidity bound is used for B -> C as no channel exists
2819		let channel = network_graph.read_only().channel(42).unwrap().to_owned();
2820		let (info, _) = channel.as_directed_from(&node_b).unwrap();
2821		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
2822			info,
2823			short_channel_id: 43,
2824		});
2825		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 128);
2826		let channel = network_graph.read_only().channel(44).unwrap().to_owned();
2827		let (info, _) = channel.as_directed_from(&node_c).unwrap();
2828		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
2829			info,
2830			short_channel_id: 44,
2831		});
2832		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 128);
2833
2834		scorer.payment_path_failed(&Path { hops: path, blinded_tail: None }, 43, Duration::ZERO);
2835
2836		let channel = network_graph.read_only().channel(42).unwrap().to_owned();
2837		let (info, _) = channel.as_directed_from(&node_a).unwrap();
2838		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
2839			info,
2840			short_channel_id: 42,
2841		});
2842		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 80);
2843		// Note that a default liquidity bound is used for B -> C as no channel exists
2844		let channel = network_graph.read_only().channel(42).unwrap().to_owned();
2845		let (info, _) = channel.as_directed_from(&node_b).unwrap();
2846		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
2847			info,
2848			short_channel_id: 43,
2849		});
2850		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 128);
2851		let channel = network_graph.read_only().channel(44).unwrap().to_owned();
2852		let (info, _) = channel.as_directed_from(&node_c).unwrap();
2853		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
2854			info,
2855			short_channel_id: 44,
2856		});
2857		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 128);
2858	}
2859
2860	#[test]
2861	fn reduces_liquidity_upper_bound_along_path_on_success() {
2862		let logger = TestLogger::new();
2863		let network_graph = network_graph(&logger);
2864		let params = ProbabilisticScoringFeeParameters {
2865			liquidity_penalty_multiplier_msat: 1_000,
2866			..ProbabilisticScoringFeeParameters::zero_penalty()
2867		};
2868		let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
2869		let source = source_node_id();
2870		let usage = ChannelUsage {
2871			amount_msat: 250,
2872			inflight_htlc_msat: 0,
2873			effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
2874		};
2875		let network_graph = network_graph.read_only().channels().clone();
2876		let channel_42 = network_graph.get(&42).unwrap();
2877		let channel_43 = network_graph.get(&43).unwrap();
2878		let (info, _) = channel_42.as_directed_from(&source).unwrap();
2879		let candidate_41 = CandidateRouteHop::PublicHop(PublicHopCandidate {
2880			info,
2881			short_channel_id: 41,
2882		});
2883		let (info, target) = channel_42.as_directed_from(&source).unwrap();
2884		let candidate_42 = CandidateRouteHop::PublicHop(PublicHopCandidate {
2885			info,
2886			short_channel_id: 42,
2887		});
2888		let (info, _) = channel_43.as_directed_from(&target).unwrap();
2889		let candidate_43 = CandidateRouteHop::PublicHop(PublicHopCandidate {
2890			info,
2891			short_channel_id: 43,
2892		});
2893		assert_eq!(scorer.channel_penalty_msat(&candidate_41, usage, &params), 128);
2894		assert_eq!(scorer.channel_penalty_msat(&candidate_42, usage, &params), 128);
2895		assert_eq!(scorer.channel_penalty_msat(&candidate_43, usage, &params), 128);
2896
2897		scorer.payment_path_successful(&payment_path_for_amount(500), Duration::ZERO);
2898
2899		assert_eq!(scorer.channel_penalty_msat(&candidate_41, usage, &params), 128);
2900		assert_eq!(scorer.channel_penalty_msat(&candidate_42, usage, &params), 300);
2901		assert_eq!(scorer.channel_penalty_msat(&candidate_43, usage, &params), 300);
2902	}
2903
2904	#[test]
2905	fn decays_liquidity_bounds_over_time() {
2906		let logger = TestLogger::new();
2907		let network_graph = network_graph(&logger);
2908		let params = ProbabilisticScoringFeeParameters {
2909			liquidity_penalty_multiplier_msat: 1_000,
2910			considered_impossible_penalty_msat: u64::max_value(),
2911			..ProbabilisticScoringFeeParameters::zero_penalty()
2912		};
2913		let decay_params = ProbabilisticScoringDecayParameters {
2914			liquidity_offset_half_life: Duration::from_secs(10),
2915			..ProbabilisticScoringDecayParameters::zero_penalty()
2916		};
2917		let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
2918		let source = source_node_id();
2919
2920		let usage = ChannelUsage {
2921			amount_msat: 0,
2922			inflight_htlc_msat: 0,
2923			effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
2924		};
2925		let channel = network_graph.read_only().channel(42).unwrap().to_owned();
2926		let (info, _) = channel.as_directed_from(&source).unwrap();
2927		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
2928			info,
2929			short_channel_id: 42,
2930		});
2931		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
2932		let usage = ChannelUsage { amount_msat: 1_023, ..usage };
2933		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 2_000);
2934
2935		scorer.payment_path_failed(&payment_path_for_amount(768), 42, Duration::ZERO);
2936		scorer.payment_path_failed(&payment_path_for_amount(128), 43, Duration::ZERO);
2937
2938		// Initial penalties
2939		let usage = ChannelUsage { amount_msat: 128, ..usage };
2940		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
2941		let usage = ChannelUsage { amount_msat: 256, ..usage };
2942		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 93);
2943		let usage = ChannelUsage { amount_msat: 768, ..usage };
2944		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 1_479);
2945		let usage = ChannelUsage { amount_msat: 896, ..usage };
2946		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
2947
2948		// Half decay (i.e., three-quarter life)
2949		scorer.time_passed(Duration::from_secs(5));
2950		let usage = ChannelUsage { amount_msat: 128, ..usage };
2951		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 22);
2952		let usage = ChannelUsage { amount_msat: 256, ..usage };
2953		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 106);
2954		let usage = ChannelUsage { amount_msat: 768, ..usage };
2955		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 921);
2956		let usage = ChannelUsage { amount_msat: 896, ..usage };
2957		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
2958
2959		// One decay (i.e., half life)
2960		scorer.time_passed(Duration::from_secs(10));
2961		let usage = ChannelUsage { amount_msat: 64, ..usage };
2962		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
2963		let usage = ChannelUsage { amount_msat: 128, ..usage };
2964		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 34);
2965		let usage = ChannelUsage { amount_msat: 896, ..usage };
2966		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 1_970);
2967		let usage = ChannelUsage { amount_msat: 960, ..usage };
2968		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
2969
2970		// Fully decay liquidity lower bound.
2971		scorer.time_passed(Duration::from_secs(10 * 8));
2972		let usage = ChannelUsage { amount_msat: 0, ..usage };
2973		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
2974		let usage = ChannelUsage { amount_msat: 1, ..usage };
2975		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
2976		let usage = ChannelUsage { amount_msat: 1_023, ..usage };
2977		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 2_000);
2978		let usage = ChannelUsage { amount_msat: 1_024, ..usage };
2979		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
2980
2981		// Fully decay liquidity upper bound.
2982		scorer.time_passed(Duration::from_secs(10 * 9));
2983		let usage = ChannelUsage { amount_msat: 0, ..usage };
2984		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
2985		let usage = ChannelUsage { amount_msat: 1_024, ..usage };
2986		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
2987
2988		scorer.time_passed(Duration::from_secs(10 * 10));
2989		let usage = ChannelUsage { amount_msat: 0, ..usage };
2990		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
2991		let usage = ChannelUsage { amount_msat: 1_024, ..usage };
2992		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
2993	}
2994
2995	#[test]
2996	fn restricts_liquidity_bounds_after_decay() {
2997		let logger = TestLogger::new();
2998		let network_graph = network_graph(&logger);
2999		let params = ProbabilisticScoringFeeParameters {
3000			liquidity_penalty_multiplier_msat: 1_000,
3001			..ProbabilisticScoringFeeParameters::zero_penalty()
3002		};
3003		let decay_params = ProbabilisticScoringDecayParameters {
3004			liquidity_offset_half_life: Duration::from_secs(10),
3005			..ProbabilisticScoringDecayParameters::default()
3006		};
3007		let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
3008		let source = source_node_id();
3009		let usage = ChannelUsage {
3010			amount_msat: 512,
3011			inflight_htlc_msat: 0,
3012			effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
3013		};
3014		let channel = network_graph.read_only().channel(42).unwrap().to_owned();
3015		let (info, _) = channel.as_directed_from(&source).unwrap();
3016		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
3017			info,
3018			short_channel_id: 42,
3019		});
3020
3021		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 300);
3022
3023		// More knowledge gives higher confidence (256, 768), meaning a lower penalty.
3024		scorer.payment_path_failed(&payment_path_for_amount(768), 42, Duration::ZERO);
3025		scorer.payment_path_failed(&payment_path_for_amount(256), 43, Duration::ZERO);
3026		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 281);
3027
3028		// Decaying knowledge gives less confidence (128, 896), meaning a higher penalty.
3029		scorer.time_passed(Duration::from_secs(10));
3030		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 291);
3031
3032		// Reducing the upper bound gives more confidence (128, 832) that the payment amount (512)
3033		// is closer to the upper bound, meaning a higher penalty.
3034		scorer.payment_path_successful(&payment_path_for_amount(64), Duration::from_secs(10));
3035		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 331);
3036
3037		// Increasing the lower bound gives more confidence (256, 832) that the payment amount (512)
3038		// is closer to the lower bound, meaning a lower penalty.
3039		scorer.payment_path_failed(&payment_path_for_amount(256), 43, Duration::from_secs(10));
3040		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 245);
3041
3042		// Further decaying affects the lower bound more than the upper bound (128, 928).
3043		scorer.time_passed(Duration::from_secs(20));
3044		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 280);
3045	}
3046
3047	#[test]
3048	fn restores_persisted_liquidity_bounds() {
3049		let logger = TestLogger::new();
3050		let network_graph = network_graph(&logger);
3051		let params = ProbabilisticScoringFeeParameters {
3052			liquidity_penalty_multiplier_msat: 1_000,
3053			considered_impossible_penalty_msat: u64::max_value(),
3054			..ProbabilisticScoringFeeParameters::zero_penalty()
3055		};
3056		let decay_params = ProbabilisticScoringDecayParameters {
3057			liquidity_offset_half_life: Duration::from_secs(10),
3058			..ProbabilisticScoringDecayParameters::default()
3059		};
3060		let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
3061		let source = source_node_id();
3062		let usage = ChannelUsage {
3063			amount_msat: 500,
3064			inflight_htlc_msat: 0,
3065			effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
3066		};
3067
3068		scorer.payment_path_failed(&payment_path_for_amount(500), 42, Duration::ZERO);
3069		let channel = network_graph.read_only().channel(42).unwrap().to_owned();
3070		let (info, _) = channel.as_directed_from(&source).unwrap();
3071		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
3072			info,
3073			short_channel_id: 42,
3074		});
3075		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
3076
3077		scorer.time_passed(Duration::from_secs(10));
3078		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 473);
3079
3080		scorer.payment_path_failed(&payment_path_for_amount(250), 43, Duration::from_secs(10));
3081		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 300);
3082
3083		let mut serialized_scorer = Vec::new();
3084		scorer.write(&mut serialized_scorer).unwrap();
3085
3086		let mut serialized_scorer = io::Cursor::new(&serialized_scorer);
3087		let deserialized_scorer =
3088			<ProbabilisticScorer<_, _>>::read(&mut serialized_scorer, (decay_params, &network_graph, &logger)).unwrap();
3089		assert_eq!(deserialized_scorer.channel_penalty_msat(&candidate, usage, &params), 300);
3090	}
3091
3092	fn do_decays_persisted_liquidity_bounds(decay_before_reload: bool) {
3093		let logger = TestLogger::new();
3094		let network_graph = network_graph(&logger);
3095		let params = ProbabilisticScoringFeeParameters {
3096			liquidity_penalty_multiplier_msat: 1_000,
3097			considered_impossible_penalty_msat: u64::max_value(),
3098			..ProbabilisticScoringFeeParameters::zero_penalty()
3099		};
3100		let decay_params = ProbabilisticScoringDecayParameters {
3101			liquidity_offset_half_life: Duration::from_secs(10),
3102			..ProbabilisticScoringDecayParameters::zero_penalty()
3103		};
3104		let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
3105		let source = source_node_id();
3106		let usage = ChannelUsage {
3107			amount_msat: 500,
3108			inflight_htlc_msat: 0,
3109			effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
3110		};
3111
3112		scorer.payment_path_failed(&payment_path_for_amount(500), 42, Duration::ZERO);
3113		let channel = network_graph.read_only().channel(42).unwrap().to_owned();
3114		let (info, _) = channel.as_directed_from(&source).unwrap();
3115		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
3116			info,
3117			short_channel_id: 42,
3118		});
3119		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
3120
3121		if decay_before_reload {
3122			scorer.time_passed(Duration::from_secs(10));
3123		}
3124
3125		let mut serialized_scorer = Vec::new();
3126		scorer.write(&mut serialized_scorer).unwrap();
3127
3128		let mut serialized_scorer = io::Cursor::new(&serialized_scorer);
3129		let mut deserialized_scorer =
3130			<ProbabilisticScorer<_, _>>::read(&mut serialized_scorer, (decay_params, &network_graph, &logger)).unwrap();
3131		if !decay_before_reload {
3132			scorer.time_passed(Duration::from_secs(10));
3133			deserialized_scorer.time_passed(Duration::from_secs(10));
3134		}
3135		assert_eq!(deserialized_scorer.channel_penalty_msat(&candidate, usage, &params), 473);
3136
3137		scorer.payment_path_failed(&payment_path_for_amount(250), 43, Duration::from_secs(10));
3138		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 300);
3139
3140		deserialized_scorer.time_passed(Duration::from_secs(20));
3141		assert_eq!(deserialized_scorer.channel_penalty_msat(&candidate, usage, &params), 370);
3142	}
3143
3144	#[test]
3145	fn decays_persisted_liquidity_bounds() {
3146		do_decays_persisted_liquidity_bounds(false);
3147		do_decays_persisted_liquidity_bounds(true);
3148	}
3149
3150	#[test]
3151	fn scores_realistic_payments() {
3152		// Shows the scores of "realistic" sends of 100k sats over channels of 1-10m sats (with a
3153		// 50k sat reserve).
3154		let logger = TestLogger::new();
3155		let network_graph = network_graph(&logger);
3156		let params = ProbabilisticScoringFeeParameters::default();
3157		let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
3158		let source = source_node_id();
3159
3160		let usage = ChannelUsage {
3161			amount_msat: 100_000_000,
3162			inflight_htlc_msat: 0,
3163			effective_capacity: EffectiveCapacity::Total { capacity_msat: 950_000_000, htlc_maximum_msat: 1_000 },
3164		};
3165		let channel = network_graph.read_only().channel(42).unwrap().to_owned();
3166		let (info, _) = channel.as_directed_from(&source).unwrap();
3167		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
3168			info,
3169			short_channel_id: 42,
3170		});
3171		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 42_252);
3172		let usage = ChannelUsage {
3173			effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
3174		};
3175		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 36_005);
3176		let usage = ChannelUsage {
3177			effective_capacity: EffectiveCapacity::Total { capacity_msat: 2_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
3178		};
3179		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 32_851);
3180		let usage = ChannelUsage {
3181			effective_capacity: EffectiveCapacity::Total { capacity_msat: 3_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
3182		};
3183		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 30_832);
3184		let usage = ChannelUsage {
3185			effective_capacity: EffectiveCapacity::Total { capacity_msat: 4_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
3186		};
3187		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 29_886);
3188		let usage = ChannelUsage {
3189			effective_capacity: EffectiveCapacity::Total { capacity_msat: 5_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
3190		};
3191		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 28_939);
3192		let usage = ChannelUsage {
3193			effective_capacity: EffectiveCapacity::Total { capacity_msat: 6_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
3194		};
3195		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 28_435);
3196		let usage = ChannelUsage {
3197			effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_450_000_000, htlc_maximum_msat: 1_000 }, ..usage
3198		};
3199		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 27_993);
3200		let usage = ChannelUsage {
3201			effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
3202		};
3203		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 27_993);
3204		let usage = ChannelUsage {
3205			effective_capacity: EffectiveCapacity::Total { capacity_msat: 8_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
3206		};
3207		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 27_488);
3208		let usage = ChannelUsage {
3209			effective_capacity: EffectiveCapacity::Total { capacity_msat: 9_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
3210		};
3211		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 27_047);
3212	}
3213
3214	#[test]
3215	fn adds_base_penalty_to_liquidity_penalty() {
3216		let logger = TestLogger::new();
3217		let network_graph = network_graph(&logger);
3218		let source = source_node_id();
3219		let usage = ChannelUsage {
3220			amount_msat: 128,
3221			inflight_htlc_msat: 0,
3222			effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
3223		};
3224
3225		let params = ProbabilisticScoringFeeParameters {
3226			liquidity_penalty_multiplier_msat: 1_000,
3227			..ProbabilisticScoringFeeParameters::zero_penalty()
3228		};
3229		let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
3230		let channel = network_graph.read_only().channel(42).unwrap().to_owned();
3231		let (info, _) = channel.as_directed_from(&source).unwrap();
3232		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
3233			info,
3234			short_channel_id: 42,
3235		});
3236		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 58);
3237
3238		let params = ProbabilisticScoringFeeParameters {
3239			base_penalty_msat: 500, liquidity_penalty_multiplier_msat: 1_000,
3240			anti_probing_penalty_msat: 0, ..ProbabilisticScoringFeeParameters::zero_penalty()
3241		};
3242		let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
3243		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 558);
3244
3245		let params = ProbabilisticScoringFeeParameters {
3246			base_penalty_msat: 500, liquidity_penalty_multiplier_msat: 1_000,
3247			base_penalty_amount_multiplier_msat: (1 << 30),
3248			anti_probing_penalty_msat: 0, ..ProbabilisticScoringFeeParameters::zero_penalty()
3249		};
3250
3251		let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
3252		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 558 + 128);
3253	}
3254
3255	#[test]
3256	fn adds_amount_penalty_to_liquidity_penalty() {
3257		let logger = TestLogger::new();
3258		let network_graph = network_graph(&logger);
3259		let source = source_node_id();
3260		let usage = ChannelUsage {
3261			amount_msat: 512_000,
3262			inflight_htlc_msat: 0,
3263			effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_000 },
3264		};
3265
3266		let params = ProbabilisticScoringFeeParameters {
3267			liquidity_penalty_multiplier_msat: 1_000,
3268			liquidity_penalty_amount_multiplier_msat: 0,
3269			..ProbabilisticScoringFeeParameters::zero_penalty()
3270		};
3271		let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
3272		let channel = network_graph.read_only().channel(42).unwrap().to_owned();
3273		let (info, _) = channel.as_directed_from(&source).unwrap();
3274		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
3275			info,
3276			short_channel_id: 42,
3277		});
3278		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 300);
3279
3280		let params = ProbabilisticScoringFeeParameters {
3281			liquidity_penalty_multiplier_msat: 1_000,
3282			liquidity_penalty_amount_multiplier_msat: 256,
3283			..ProbabilisticScoringFeeParameters::zero_penalty()
3284		};
3285		let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
3286		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 337);
3287	}
3288
3289	#[test]
3290	fn calculates_log10_without_overflowing_u64_max_value() {
3291		let logger = TestLogger::new();
3292		let network_graph = network_graph(&logger);
3293		let source = source_node_id();
3294		let usage = ChannelUsage {
3295			amount_msat: u64::max_value(),
3296			inflight_htlc_msat: 0,
3297			effective_capacity: EffectiveCapacity::Infinite,
3298		};
3299		let params = ProbabilisticScoringFeeParameters {
3300			liquidity_penalty_multiplier_msat: 40_000,
3301			..ProbabilisticScoringFeeParameters::zero_penalty()
3302		};
3303		let decay_params = ProbabilisticScoringDecayParameters::zero_penalty();
3304		let channel = network_graph.read_only().channel(42).unwrap().to_owned();
3305		let (info, _) = channel.as_directed_from(&source).unwrap();
3306		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
3307			info,
3308			short_channel_id: 42,
3309		});
3310		let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
3311		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 80_000);
3312	}
3313
3314	#[test]
3315	fn accounts_for_inflight_htlc_usage() {
3316		let logger = TestLogger::new();
3317		let network_graph = network_graph(&logger);
3318		let params = ProbabilisticScoringFeeParameters {
3319			considered_impossible_penalty_msat: u64::max_value(),
3320			..ProbabilisticScoringFeeParameters::zero_penalty()
3321		};
3322		let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
3323		let source = source_node_id();
3324
3325		let usage = ChannelUsage {
3326			amount_msat: 750,
3327			inflight_htlc_msat: 0,
3328			effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
3329		};
3330		let network_graph = network_graph.read_only();
3331		let channel = network_graph.channel(42).unwrap();
3332		let (info, _) = channel.as_directed_from(&source).unwrap();
3333		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
3334			info,
3335			short_channel_id: 42,
3336		});
3337		assert_ne!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
3338
3339		let usage = ChannelUsage { inflight_htlc_msat: 251, ..usage };
3340		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
3341	}
3342
3343	#[test]
3344	fn removes_uncertainity_when_exact_liquidity_known() {
3345		let logger = TestLogger::new();
3346		let network_graph = network_graph(&logger);
3347		let params = ProbabilisticScoringFeeParameters::default();
3348		let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
3349		let source = source_node_id();
3350
3351		let base_penalty_msat = params.base_penalty_msat;
3352		let usage = ChannelUsage {
3353			amount_msat: 750,
3354			inflight_htlc_msat: 0,
3355			effective_capacity: EffectiveCapacity::ExactLiquidity { liquidity_msat: 1_000 },
3356		};
3357		let network_graph = network_graph.read_only();
3358		let channel = network_graph.channel(42).unwrap();
3359		let (info, _) = channel.as_directed_from(&source).unwrap();
3360		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
3361			info,
3362			short_channel_id: 42,
3363		});
3364		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), base_penalty_msat);
3365
3366		let usage = ChannelUsage { amount_msat: 1_000, ..usage };
3367		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), base_penalty_msat);
3368
3369		let usage = ChannelUsage { amount_msat: 1_001, ..usage };
3370		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
3371	}
3372
3373	#[test]
3374	fn remembers_historical_failures() {
3375		let logger = TestLogger::new();
3376		let network_graph = network_graph(&logger);
3377		let params = ProbabilisticScoringFeeParameters {
3378			historical_liquidity_penalty_multiplier_msat: 1024,
3379			historical_liquidity_penalty_amount_multiplier_msat: 1024,
3380			..ProbabilisticScoringFeeParameters::zero_penalty()
3381		};
3382		let decay_params = ProbabilisticScoringDecayParameters {
3383			liquidity_offset_half_life: Duration::from_secs(60 * 60),
3384			historical_no_updates_half_life: Duration::from_secs(10),
3385		};
3386		let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
3387		let source = source_node_id();
3388		let target = target_node_id();
3389
3390		let usage = ChannelUsage {
3391			amount_msat: 100,
3392			inflight_htlc_msat: 0,
3393			effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
3394		};
3395		let usage_1 = ChannelUsage {
3396			amount_msat: 1,
3397			inflight_htlc_msat: 0,
3398			effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
3399		};
3400
3401		{
3402			let network_graph = network_graph.read_only();
3403			let channel = network_graph.channel(42).unwrap();
3404			let (info, _) = channel.as_directed_from(&source).unwrap();
3405			let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
3406				info,
3407				short_channel_id: 42,
3408			});
3409
3410			// With no historical data the normal liquidity penalty calculation is used.
3411			assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 135);
3412		}
3413		assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
3414		None);
3415		assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 42, &params, false),
3416		None);
3417
3418		scorer.payment_path_failed(&payment_path_for_amount(1), 42, Duration::ZERO);
3419		{
3420			let network_graph = network_graph.read_only();
3421			let channel = network_graph.channel(42).unwrap();
3422			let (info, _) = channel.as_directed_from(&source).unwrap();
3423			let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
3424				info,
3425				short_channel_id: 42,
3426			});
3427
3428			assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 2048);
3429			assert_eq!(scorer.channel_penalty_msat(&candidate, usage_1, &params), 220);
3430		}
3431		// The "it failed" increment is 32, where the probability should lie several buckets into
3432		// the first octile.
3433		assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
3434			Some(([32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
3435				[0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])));
3436		assert!(scorer.historical_estimated_payment_success_probability(42, &target, 1, &params, false)
3437			.unwrap() > 0.35);
3438		assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 500, &params, false),
3439			Some(0.0));
3440
3441		// Even after we tell the scorer we definitely have enough available liquidity, it will
3442		// still remember that there was some failure in the past, and assign a non-0 penalty.
3443		scorer.payment_path_failed(&payment_path_for_amount(1000), 43, Duration::ZERO);
3444		{
3445			let network_graph = network_graph.read_only();
3446			let channel = network_graph.channel(42).unwrap();
3447			let (info, _) = channel.as_directed_from(&source).unwrap();
3448			let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
3449				info,
3450				short_channel_id: 42,
3451			});
3452
3453			assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 83);
3454		}
3455		// The first points should be decayed just slightly and the last bucket has a new point.
3456		assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
3457			Some(([31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0],
3458				[0, 0, 0, 0, 0, 0, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32])));
3459
3460		// The exact success probability is a bit complicated and involves integer rounding, so we
3461		// simply check bounds here.
3462		let five_hundred_prob =
3463			scorer.historical_estimated_payment_success_probability(42, &target, 500, &params, false).unwrap();
3464		assert!(five_hundred_prob > 0.61, "{}", five_hundred_prob);
3465		assert!(five_hundred_prob < 0.62, "{}", five_hundred_prob);
3466		let one_prob =
3467			scorer.historical_estimated_payment_success_probability(42, &target, 1, &params, false).unwrap();
3468		assert!(one_prob < 0.89, "{}", one_prob);
3469		assert!(one_prob > 0.88, "{}", one_prob);
3470
3471		// Advance the time forward 16 half-lives (which the docs claim will ensure all data is
3472		// gone), and check that we're back to where we started.
3473		scorer.time_passed(Duration::from_secs(10 * 16));
3474		{
3475			let network_graph = network_graph.read_only();
3476			let channel = network_graph.channel(42).unwrap();
3477			let (info, _) = channel.as_directed_from(&source).unwrap();
3478			let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
3479				info,
3480				short_channel_id: 42,
3481			});
3482
3483			assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 135);
3484		}
3485		// Once fully decayed we still have data, but its all-0s. In the future we may remove the
3486		// data entirely instead.
3487		assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
3488			Some(([0; 32], [0; 32])));
3489		assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 1, &params, false), None);
3490
3491		let usage = ChannelUsage {
3492			amount_msat: 100,
3493			inflight_htlc_msat: 1024,
3494			effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
3495		};
3496		scorer.payment_path_failed(&payment_path_for_amount(1), 42, Duration::from_secs(10 * 16));
3497		{
3498			let network_graph = network_graph.read_only();
3499			let channel = network_graph.channel(42).unwrap();
3500			let (info, _) = channel.as_directed_from(&source).unwrap();
3501			let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
3502				info,
3503				short_channel_id: 42,
3504			});
3505
3506			assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 2048);
3507
3508			let usage = ChannelUsage {
3509				amount_msat: 1,
3510				inflight_htlc_msat: 0,
3511				effective_capacity: EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: 0 },
3512			};
3513			assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 2048);
3514		}
3515
3516		// Advance to decay all liquidity offsets to zero.
3517		scorer.time_passed(Duration::from_secs(10 * (16 + 60 * 60)));
3518
3519		// Once even the bounds have decayed information about the channel should be removed
3520		// entirely.
3521		assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
3522			None);
3523
3524		// Use a path in the opposite direction, which have zero for htlc_maximum_msat. This will
3525		// ensure that the effective capacity is zero to test division-by-zero edge cases.
3526		let path = vec![
3527			path_hop(target_pubkey(), 43, 2),
3528			path_hop(source_pubkey(), 42, 1),
3529			path_hop(sender_pubkey(), 41, 0),
3530		];
3531		scorer.payment_path_failed(&Path { hops: path, blinded_tail: None }, 42, Duration::from_secs(10 * (16 + 60 * 60)));
3532	}
3533
3534	#[test]
3535	fn adds_anti_probing_penalty() {
3536		let logger = TestLogger::new();
3537		let network_graph = network_graph(&logger);
3538		let source = source_node_id();
3539		let params = ProbabilisticScoringFeeParameters {
3540			anti_probing_penalty_msat: 500,
3541			..ProbabilisticScoringFeeParameters::zero_penalty()
3542		};
3543		let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
3544
3545		// Check we receive no penalty for a low htlc_maximum_msat.
3546		let usage = ChannelUsage {
3547			amount_msat: 512_000,
3548			inflight_htlc_msat: 0,
3549			effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_000 },
3550		};
3551		let network_graph = network_graph.read_only();
3552		let channel = network_graph.channel(42).unwrap();
3553		let (info, _) = channel.as_directed_from(&source).unwrap();
3554		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
3555			info,
3556			short_channel_id: 42,
3557		});
3558		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
3559
3560		// Check we receive anti-probing penalty for htlc_maximum_msat == channel_capacity.
3561		let usage = ChannelUsage {
3562			amount_msat: 512_000,
3563			inflight_htlc_msat: 0,
3564			effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_024_000 },
3565		};
3566		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 500);
3567
3568		// Check we receive anti-probing penalty for htlc_maximum_msat == channel_capacity/2.
3569		let usage = ChannelUsage {
3570			amount_msat: 512_000,
3571			inflight_htlc_msat: 0,
3572			effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 512_000 },
3573		};
3574		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 500);
3575
3576		// Check we receive no anti-probing penalty for htlc_maximum_msat == channel_capacity/2 - 1.
3577		let usage = ChannelUsage {
3578			amount_msat: 512_000,
3579			inflight_htlc_msat: 0,
3580			effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 511_999 },
3581		};
3582		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 0);
3583	}
3584
3585	#[test]
3586	fn scores_with_blinded_path() {
3587		// Make sure we'll account for a blinded path's final_value_msat in scoring
3588		let logger = TestLogger::new();
3589		let network_graph = network_graph(&logger);
3590		let params = ProbabilisticScoringFeeParameters {
3591			liquidity_penalty_multiplier_msat: 1_000,
3592			..ProbabilisticScoringFeeParameters::zero_penalty()
3593		};
3594		let decay_params = ProbabilisticScoringDecayParameters::default();
3595		let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
3596		let source = source_node_id();
3597		let usage = ChannelUsage {
3598			amount_msat: 512,
3599			inflight_htlc_msat: 0,
3600			effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
3601		};
3602		let channel = network_graph.read_only().channel(42).unwrap().to_owned();
3603		let (info, target) = channel.as_directed_from(&source).unwrap();
3604		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
3605			info,
3606			short_channel_id: 42,
3607		});
3608		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 300);
3609
3610		let mut path = payment_path_for_amount(768);
3611		let recipient_hop = path.hops.pop().unwrap();
3612		path.blinded_tail = Some(BlindedTail {
3613			hops: vec![BlindedHop { blinded_node_id: test_utils::pubkey(44), encrypted_payload: Vec::new() }],
3614			blinding_point: test_utils::pubkey(42),
3615			excess_final_cltv_expiry_delta: recipient_hop.cltv_expiry_delta,
3616			final_value_msat: recipient_hop.fee_msat,
3617		});
3618
3619		// Check the liquidity before and after scoring payment failures to ensure the blinded path's
3620		// final value is taken into account.
3621		assert!(scorer.channel_liquidities.get(&42).is_none());
3622
3623		scorer.payment_path_failed(&path, 42, Duration::ZERO);
3624		path.blinded_tail.as_mut().unwrap().final_value_msat = 256;
3625		scorer.payment_path_failed(&path, 43, Duration::ZERO);
3626
3627		let liquidity = scorer.channel_liquidities.get(&42).unwrap()
3628			.as_directed(&source, &target, 1_000);
3629		assert_eq!(liquidity.min_liquidity_msat(), 256);
3630		assert_eq!(liquidity.max_liquidity_msat(), 768);
3631	}
3632
3633	#[test]
3634	fn realistic_historical_failures() {
3635		// The motivation for the unequal sized buckets came largely from attempting to pay 10k
3636		// sats over a one bitcoin channel. This tests that case explicitly, ensuring that we score
3637		// properly.
3638		let logger = TestLogger::new();
3639		let mut network_graph = network_graph(&logger);
3640		let params = ProbabilisticScoringFeeParameters {
3641			historical_liquidity_penalty_multiplier_msat: 1024,
3642			historical_liquidity_penalty_amount_multiplier_msat: 1024,
3643			..ProbabilisticScoringFeeParameters::zero_penalty()
3644		};
3645		let decay_params = ProbabilisticScoringDecayParameters {
3646			liquidity_offset_half_life: Duration::from_secs(60 * 60),
3647			historical_no_updates_half_life: Duration::from_secs(10),
3648			..ProbabilisticScoringDecayParameters::default()
3649		};
3650
3651		let capacity_msat = 100_000_000_000;
3652		update_channel(&mut network_graph, 42, source_privkey(), 0, capacity_msat, 200);
3653		update_channel(&mut network_graph, 42, target_privkey(), 1, capacity_msat, 200);
3654
3655		let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
3656		let source = source_node_id();
3657
3658		let mut amount_msat = 10_000_000;
3659		let usage = ChannelUsage {
3660			amount_msat,
3661			inflight_htlc_msat: 0,
3662			effective_capacity: EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat: capacity_msat },
3663		};
3664		let channel = network_graph.read_only().channel(42).unwrap().to_owned();
3665		let (info, target) = channel.as_directed_from(&source).unwrap();
3666		let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate {
3667			info,
3668			short_channel_id: 42,
3669		});
3670		// With no historical data the normal liquidity penalty calculation is used, which results
3671		// in a success probability of ~82%.
3672		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 910);
3673		assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
3674			None);
3675		assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 42, &params, false),
3676			None);
3677
3678		// Fail to pay once, and then check the buckets and penalty.
3679		scorer.payment_path_failed(&payment_path_for_amount(amount_msat), 42, Duration::ZERO);
3680		// The penalty should be the maximum penalty, as the payment we're scoring is now in the
3681		// same bucket which is the only maximum datapoint.
3682		assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params),
3683			2048 + 2048 * amount_msat / super::AMOUNT_PENALTY_DIVISOR);
3684		// The "it failed" increment is 32, which we should apply to the first upper-bound (between
3685		// 6k sats and 12k sats).
3686		assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
3687			Some(([32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
3688				[0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])));
3689		// The success probability estimate itself should be zero.
3690		assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, amount_msat, &params, false),
3691			Some(0.0));
3692
3693		// Now test again with the amount in the bottom bucket.
3694		amount_msat /= 2;
3695		// The new amount is entirely within the only minimum bucket with score, so the probability
3696		// we assign is 1/2.
3697		assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, amount_msat, &params, false),
3698			Some(0.5));
3699
3700		// ...but once we see a failure, we consider the payment to be substantially less likely,
3701		// even though not a probability of zero as we still look at the second max bucket which
3702		// now shows 31.
3703		scorer.payment_path_failed(&payment_path_for_amount(amount_msat), 42, Duration::ZERO);
3704		assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
3705			Some(([63, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
3706				[32, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])));
3707		assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, amount_msat, &params, false),
3708			Some(0.0));
3709	}
3710}
3711
3712#[cfg(ldk_bench)]
3713pub mod benches {
3714	use super::*;
3715	use criterion::Criterion;
3716	use crate::routing::router::{bench_utils, RouteHop};
3717	use crate::util::test_utils::TestLogger;
3718	use crate::types::features::{ChannelFeatures, NodeFeatures};
3719
3720	pub fn decay_100k_channel_bounds(bench: &mut Criterion) {
3721		let logger = TestLogger::new();
3722		let (network_graph, mut scorer) = bench_utils::read_graph_scorer(&logger).unwrap();
3723		let mut cur_time = Duration::ZERO;
3724			cur_time += Duration::from_millis(1);
3725			scorer.time_passed(cur_time);
3726		bench.bench_function("decay_100k_channel_bounds", |b| b.iter(|| {
3727			cur_time += Duration::from_millis(1);
3728			scorer.time_passed(cur_time);
3729		}));
3730	}
3731}