@@ -939,6 +939,9 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerU
939
939
///
940
940
/// Because the datapoints are decayed slowly over time, values will eventually return to
941
941
/// `Some(([0; 8], [0; 8]))`.
942
+ ///
943
+ /// In order to fetch a single success probability from the buckets provided here, as used in
944
+ /// the scoring model, see [`Self::historical_estimated_payment_success_probability`].
942
945
pub fn historical_estimated_channel_liquidity_probabilities ( & self , scid : u64 , target : & NodeId )
943
946
-> Option < ( [ u16 ; 8 ] , [ u16 ; 8 ] ) > {
944
947
let graph = self . network_graph . read_only ( ) ;
@@ -953,7 +956,7 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerU
953
956
min_liquidity_offset_history : & dir_liq. min_liquidity_offset_history ,
954
957
max_liquidity_offset_history : & dir_liq. max_liquidity_offset_history ,
955
958
} ;
956
- let ( min_buckets, mut max_buckets, _) = buckets. get_decayed_buckets ( T :: now ( ) ,
959
+ let ( min_buckets, mut max_buckets, _) = buckets. get_decayed_buckets ( dir_liq . now ,
957
960
* dir_liq. last_updated , self . decay_params . historical_no_updates_half_life ) ;
958
961
// Note that the liquidity buckets are an offset from the edge, so we inverse
959
962
// the max order to get the probabilities from zero.
@@ -964,6 +967,39 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerU
964
967
}
965
968
None
966
969
}
970
+
971
+ /// Query the probability of payment success sending the given `amount_msat` over the channel
972
+ /// with `scid` towards the given `target` node, based on the historical estimated liquidity
973
+ /// bounds.
974
+ ///
975
+ /// These are the same bounds as returned by
976
+ /// [`Self::historical_estimated_channel_liquidity_probabilities`] (but not those returned by
977
+ /// [`Self::estimated_channel_liquidity_range`]).
978
+ pub fn historical_estimated_payment_success_probability (
979
+ & self , scid : u64 , target : & NodeId , amount_msat : u64 )
980
+ -> Option < f64 > {
981
+ let graph = self . network_graph . read_only ( ) ;
982
+
983
+ if let Some ( chan) = graph. channels ( ) . get ( & scid) {
984
+ if let Some ( liq) = self . channel_liquidities . get ( & scid) {
985
+ if let Some ( ( directed_info, source) ) = chan. as_directed_to ( target) {
986
+ let capacity_msat = directed_info. effective_capacity ( ) . as_msat ( ) ;
987
+ let dir_liq = liq. as_directed ( source, target, 0 , capacity_msat, self . decay_params ) ;
988
+
989
+ let buckets = HistoricalMinMaxBuckets {
990
+ min_liquidity_offset_history : & dir_liq. min_liquidity_offset_history ,
991
+ max_liquidity_offset_history : & dir_liq. max_liquidity_offset_history ,
992
+ } ;
993
+
994
+ return buckets. calculate_success_probability_times_billion ( dir_liq. now ,
995
+ * dir_liq. last_updated , self . decay_params . historical_no_updates_half_life ,
996
+ amount_msat, capacity_msat
997
+ ) . map ( |p| p as f64 / ( 1024 * 1024 * 1024 ) as f64 ) ;
998
+ }
999
+ }
1000
+ }
1001
+ None
1002
+ }
967
1003
}
968
1004
969
1005
impl < T : Time > ChannelLiquidity < T > {
@@ -2847,13 +2883,19 @@ mod tests {
2847
2883
assert_eq ! ( scorer. channel_penalty_msat( 42 , & source, & target, usage, & params) , 47 ) ;
2848
2884
assert_eq ! ( scorer. historical_estimated_channel_liquidity_probabilities( 42 , & target) ,
2849
2885
None ) ;
2886
+ assert_eq ! ( scorer. historical_estimated_payment_success_probability( 42 , & target, 42 ) ,
2887
+ None ) ;
2850
2888
2851
2889
scorer. payment_path_failed ( & payment_path_for_amount ( 1 ) , 42 ) ;
2852
2890
assert_eq ! ( scorer. channel_penalty_msat( 42 , & source, & target, usage, & params) , 2048 ) ;
2853
2891
// The "it failed" increment is 32, where the probability should lie fully in the first
2854
2892
// octile.
2855
2893
assert_eq ! ( scorer. historical_estimated_channel_liquidity_probabilities( 42 , & target) ,
2856
2894
Some ( ( [ 32 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ] , [ 32 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ] ) ) ) ;
2895
+ assert_eq ! ( scorer. historical_estimated_payment_success_probability( 42 , & target, 1 ) ,
2896
+ Some ( 1.0 ) ) ;
2897
+ assert_eq ! ( scorer. historical_estimated_payment_success_probability( 42 , & target, 500 ) ,
2898
+ Some ( 0.0 ) ) ;
2857
2899
2858
2900
// Even after we tell the scorer we definitely have enough available liquidity, it will
2859
2901
// still remember that there was some failure in the past, and assign a non-0 penalty.
@@ -2863,6 +2905,17 @@ mod tests {
2863
2905
assert_eq ! ( scorer. historical_estimated_channel_liquidity_probabilities( 42 , & target) ,
2864
2906
Some ( ( [ 31 , 0 , 0 , 0 , 0 , 0 , 0 , 32 ] , [ 31 , 0 , 0 , 0 , 0 , 0 , 0 , 32 ] ) ) ) ;
2865
2907
2908
+ // The exact success probability is a bit complicated and involves integer rounding, so we
2909
+ // simply check bounds here.
2910
+ let five_hundred_prob =
2911
+ scorer. historical_estimated_payment_success_probability ( 42 , & target, 500 ) . unwrap ( ) ;
2912
+ assert ! ( five_hundred_prob > 0.5 ) ;
2913
+ assert ! ( five_hundred_prob < 0.52 ) ;
2914
+ let one_prob =
2915
+ scorer. historical_estimated_payment_success_probability ( 42 , & target, 1 ) . unwrap ( ) ;
2916
+ assert ! ( one_prob < 1.0 ) ;
2917
+ assert ! ( one_prob > 0.99 ) ;
2918
+
2866
2919
// Advance the time forward 16 half-lives (which the docs claim will ensure all data is
2867
2920
// gone), and check that we're back to where we started.
2868
2921
SinceEpoch :: advance ( Duration :: from_secs ( 10 * 16 ) ) ;
@@ -2871,6 +2924,7 @@ mod tests {
2871
2924
// data entirely instead.
2872
2925
assert_eq ! ( scorer. historical_estimated_channel_liquidity_probabilities( 42 , & target) ,
2873
2926
Some ( ( [ 0 ; 8 ] , [ 0 ; 8 ] ) ) ) ;
2927
+ assert_eq ! ( scorer. historical_estimated_payment_success_probability( 42 , & target, 1 ) , None ) ;
2874
2928
2875
2929
let mut usage = ChannelUsage {
2876
2930
amount_msat : 100 ,
0 commit comments