Skip to content

Commit 5a31e5a

Browse files
committed
Special-case the 0th minimum bucket in historical scoring
Points in the 0th minimum bucket either indicate we sent a payment which is < 1/16,384th of the channel's capacity or, more likely, we failed to send a payment. In either case, averaging the success probability across the full range of upper-bounds doesn't make a whole lot of sense - if we've never managed to send a "real" payment over a channel, we should be considering it quite poor. To address this, we special-case the 0th minimum bucket and only look at the largest-offset max bucket when calculating the success probability.
1 parent 1344104 commit 5a31e5a

File tree

1 file changed

+38
-11
lines changed

1 file changed

+38
-11
lines changed

lightning/src/routing/scoring.rs

Lines changed: 38 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1780,7 +1780,36 @@ mod bucketed_history {
17801780
}
17811781

17821782
let mut cumulative_success_prob_times_billion = 0;
1783-
for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
1783+
// Special-case the 0th min bucket - it generally means we failed a payment, so only
1784+
// consider the highest (i.e. largest-offset-from-max-capacity) max bucket for all
1785+
// points against the 0th min bucket. This avoids the case where we fail to route
1786+
// increasingly lower values over a channel, but treat each failure as a separate
1787+
// datapoint, many of which may have relatively high maximum-available-liquidity
1788+
// values, which will result in us thinking we have some nontrivial probability of
1789+
// routing up to that amount.
1790+
if self.min_liquidity_offset_history.buckets[0] != 0 {
1791+
let mut highest_max_bucket_with_points = 0;
1792+
let mut total_max_points = 0;
1793+
for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate() {
1794+
if *max_bucket >= 32 {
1795+
highest_max_bucket_with_points = cmp::max(highest_max_bucket_with_points, max_idx);
1796+
}
1797+
total_max_points += *max_bucket as u64;
1798+
}
1799+
let max_bucket_end_pos = BUCKET_START_POS[32 - highest_max_bucket_with_points] - 1;
1800+
if payment_pos < max_bucket_end_pos {
1801+
let bucket_prob_times_billion =
1802+
(self.min_liquidity_offset_history.buckets[0] as u64) * total_max_points
1803+
* 1024 * 1024 * 1024 / total_valid_points_tracked;
1804+
cumulative_success_prob_times_billion += bucket_prob_times_billion *
1805+
((max_bucket_end_pos - payment_pos) as u64) /
1806+
// Add an additional one in the divisor as the payment bucket has been
1807+
// rounded down.
1808+
(max_bucket_end_pos + 1) as u64;
1809+
}
1810+
}
1811+
1812+
for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate().skip(1) {
17841813
let min_bucket_start_pos = BUCKET_START_POS[min_idx];
17851814
for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate().take(32 - min_idx) {
17861815
let max_bucket_end_pos = BUCKET_START_POS[32 - max_idx] - 1;
@@ -3047,7 +3076,7 @@ mod tests {
30473076
// Even after we tell the scorer we definitely have enough available liquidity, it will
30483077
// still remember that there was some failure in the past, and assign a non-0 penalty.
30493078
scorer.payment_path_failed(&payment_path_for_amount(1000), 43);
3050-
assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 198);
3079+
assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 32);
30513080
// The first points should be decayed just slightly and the last bucket has a new point.
30523081
assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
30533082
Some(([31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0],
@@ -3057,12 +3086,12 @@ mod tests {
30573086
// simply check bounds here.
30583087
let five_hundred_prob =
30593088
scorer.historical_estimated_payment_success_probability(42, &target, 500).unwrap();
3060-
assert!(five_hundred_prob > 0.5);
3061-
assert!(five_hundred_prob < 0.52);
3089+
assert!(five_hundred_prob > 0.66);
3090+
assert!(five_hundred_prob < 0.68);
30623091
let one_prob =
30633092
scorer.historical_estimated_payment_success_probability(42, &target, 1).unwrap();
3064-
assert!(one_prob < 0.95);
3065-
assert!(one_prob > 0.90);
3093+
assert!(one_prob < 1.0);
3094+
assert!(one_prob > 0.95);
30663095

30673096
// Advance the time forward 16 half-lives (which the docs claim will ensure all data is
30683097
// gone), and check that we're back to where we started.
@@ -3082,7 +3111,7 @@ mod tests {
30823111
scorer.payment_path_failed(&payment_path_for_amount(1), 42);
30833112
assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 2048);
30843113
usage.inflight_htlc_msat = 0;
3085-
assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 409);
3114+
assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 866);
30863115

30873116
let usage = ChannelUsage {
30883117
amount_msat: 1,
@@ -3268,9 +3297,7 @@ mod tests {
32683297
assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
32693298
Some(([63, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
32703299
[32, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])));
3271-
assert!(scorer.historical_estimated_payment_success_probability(42, &target, amount_msat)
3272-
.unwrap() > 0.24);
3273-
assert!(scorer.historical_estimated_payment_success_probability(42, &target, amount_msat)
3274-
.unwrap() < 0.25);
3300+
assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, amount_msat),
3301+
Some(0.0));
32753302
}
32763303
}

0 commit comments

Comments
 (0)