Skip to content

Commit eb20aab

Browse files
committed
Special-case the 0th minimum bucket in historical scoring
Points in the 0th minimum bucket either indicate we sent a payment which is < 1/16,384th of the channel's capacity or, more likely, we failed to send a payment. In either case, averaging the success probability across the full range of upper-bounds doesn't make a whole lot of sense - if we've never managed to send a "real" payment over a channel, we should be considering it quite poor. To address this, we special-case the 0th minimum bucket and only look at the largest-offset max bucket when calculating the success probability.
1 parent 30bf20b commit eb20aab

File tree

1 file changed

+38
-11
lines changed

1 file changed

+38
-11
lines changed

lightning/src/routing/scoring.rs

Lines changed: 38 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1787,7 +1787,36 @@ mod bucketed_history {
17871787
}
17881788

17891789
let mut cumulative_success_prob_times_billion = 0;
1790-
for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
1790+
// Special-case the 0th min bucket - it generally means we failed a payment, so only
1791+
// consider the highest (i.e. largest-offset-from-max-capacity) max bucket for all
1792+
// points against the 0th min bucket. This avoids the case where we fail to route
1793+
// increasingly lower values over a channel, but treat each failure as a separate
1794+
// datapoint, many of which may have relatively high maximum-available-liquidity
1795+
// values, which will result in us thinking we have some nontrivial probability of
1796+
// routing up to that amount.
1797+
if self.min_liquidity_offset_history.buckets[0] != 0 {
1798+
let mut highest_max_bucket_with_points = 0;
1799+
let mut total_max_points = 0;
1800+
for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate() {
1801+
if *max_bucket >= 32 {
1802+
highest_max_bucket_with_points = cmp::max(highest_max_bucket_with_points, max_idx);
1803+
}
1804+
total_max_points += *max_bucket as u64;
1805+
}
1806+
let max_bucket_end_pos = BUCKET_START_POS[32 - highest_max_bucket_with_points] - 1;
1807+
if payment_pos < max_bucket_end_pos {
1808+
let bucket_prob_times_billion =
1809+
(self.min_liquidity_offset_history.buckets[0] as u64) * total_max_points
1810+
* 1024 * 1024 * 1024 / total_valid_points_tracked;
1811+
cumulative_success_prob_times_billion += bucket_prob_times_billion *
1812+
((max_bucket_end_pos - payment_pos) as u64) /
1813+
// Add an additional one in the divisor as the payment bucket has been
1814+
// rounded down.
1815+
(max_bucket_end_pos + 1) as u64;
1816+
}
1817+
}
1818+
1819+
for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate().skip(1) {
17911820
let min_bucket_start_pos = BUCKET_START_POS[min_idx];
17921821
for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate().take(32 - min_idx) {
17931822
let max_bucket_end_pos = BUCKET_START_POS[32 - max_idx] - 1;
@@ -3054,7 +3083,7 @@ mod tests {
30543083
// Even after we tell the scorer we definitely have enough available liquidity, it will
30553084
// still remember that there was some failure in the past, and assign a non-0 penalty.
30563085
scorer.payment_path_failed(&payment_path_for_amount(1000), 43);
3057-
assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 198);
3086+
assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 32);
30583087
// The first points should be decayed just slightly and the last bucket has a new point.
30593088
assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
30603089
Some(([31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0],
@@ -3064,12 +3093,12 @@ mod tests {
30643093
// simply check bounds here.
30653094
let five_hundred_prob =
30663095
scorer.historical_estimated_payment_success_probability(42, &target, 500).unwrap();
3067-
assert!(five_hundred_prob > 0.5);
3068-
assert!(five_hundred_prob < 0.52);
3096+
assert!(five_hundred_prob > 0.66);
3097+
assert!(five_hundred_prob < 0.68);
30693098
let one_prob =
30703099
scorer.historical_estimated_payment_success_probability(42, &target, 1).unwrap();
3071-
assert!(one_prob < 0.95);
3072-
assert!(one_prob > 0.90);
3100+
assert!(one_prob < 1.0);
3101+
assert!(one_prob > 0.95);
30733102

30743103
// Advance the time forward 16 half-lives (which the docs claim will ensure all data is
30753104
// gone), and check that we're back to where we started.
@@ -3089,7 +3118,7 @@ mod tests {
30893118
scorer.payment_path_failed(&payment_path_for_amount(1), 42);
30903119
assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 2048);
30913120
usage.inflight_htlc_msat = 0;
3092-
assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 409);
3121+
assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 866);
30933122

30943123
let usage = ChannelUsage {
30953124
amount_msat: 1,
@@ -3275,9 +3304,7 @@ mod tests {
32753304
assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
32763305
Some(([63, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
32773306
[32, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])));
3278-
assert!(scorer.historical_estimated_payment_success_probability(42, &target, amount_msat)
3279-
.unwrap() > 0.24);
3280-
assert!(scorer.historical_estimated_payment_success_probability(42, &target, amount_msat)
3281-
.unwrap() < 0.25);
3307+
assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, amount_msat),
3308+
Some(0.0));
32823309
}
32833310
}

0 commit comments

Comments
 (0)