Skip to content

Commit 2a1dff4

Browse files
committed
Move the bucketed history tracking logic into a scoring submodule
1 parent 534d731 commit 2a1dff4

File tree

1 file changed

+160
-155
lines changed

1 file changed

+160
-155
lines changed

lightning/src/routing/scoring.rs

Lines changed: 160 additions & 155 deletions
Original file line numberDiff line numberDiff line change
@@ -649,161 +649,6 @@ impl ProbabilisticScoringDecayParameters {
649649
}
650650
}
651651

652-
/// Tracks the historical state of a distribution as a weighted average of how much time was spent
653-
/// in each of 8 buckets.
654-
#[derive(Clone, Copy)]
655-
struct HistoricalBucketRangeTracker {
656-
buckets: [u16; 8],
657-
}
658-
659-
impl HistoricalBucketRangeTracker {
660-
fn new() -> Self { Self { buckets: [0; 8] } }
661-
fn track_datapoint(&mut self, liquidity_offset_msat: u64, capacity_msat: u64) {
662-
// We have 8 leaky buckets for min and max liquidity. Each bucket tracks the amount of time
663-
// we spend in each bucket as a 16-bit fixed-point number with a 5 bit fractional part.
664-
//
665-
// Each time we update our liquidity estimate, we add 32 (1.0 in our fixed-point system) to
666-
// the buckets for the current min and max liquidity offset positions.
667-
//
668-
// We then decay each bucket by multiplying by 2047/2048 (avoiding dividing by a
669-
// non-power-of-two). This ensures we can't actually overflow the u16 - when we get to
670-
// 63,457 adding 32 and decaying by 2047/2048 leaves us back at 63,457.
671-
//
672-
// In total, this allows us to track data for the last 8,000 or so payments across a given
673-
// channel.
674-
//
675-
// These constants are a balance - we try to fit in 2 bytes per bucket to reduce overhead,
676-
// and need to balance having more bits in the decimal part (to ensure decay isn't too
677-
// non-linear) with having too few bits in the mantissa, causing us to not store very many
678-
// datapoints.
679-
//
680-
// The constants were picked experimentally, selecting a decay amount that restricts us
681-
// from overflowing buckets without having to cap them manually.
682-
683-
// Ensure the bucket index is in the range [0, 7], even if the liquidity offset is zero or
684-
// the channel's capacity, though the second should generally never happen.
685-
debug_assert!(liquidity_offset_msat <= capacity_msat);
686-
let bucket_idx: u8 = (liquidity_offset_msat * 8 / capacity_msat.saturating_add(1))
687-
.try_into().unwrap_or(32); // 32 is bogus for 8 buckets, and will be ignored
688-
debug_assert!(bucket_idx < 8);
689-
if bucket_idx < 8 {
690-
for e in self.buckets.iter_mut() {
691-
*e = ((*e as u32) * 2047 / 2048) as u16;
692-
}
693-
self.buckets[bucket_idx as usize] = self.buckets[bucket_idx as usize].saturating_add(32);
694-
}
695-
}
696-
/// Decay all buckets by the given number of half-lives. Used to more aggressively remove old
697-
/// datapoints as we receive newer information.
698-
fn time_decay_data(&mut self, half_lives: u32) {
699-
for e in self.buckets.iter_mut() {
700-
*e = e.checked_shr(half_lives).unwrap_or(0);
701-
}
702-
}
703-
}
704-
705-
impl_writeable_tlv_based!(HistoricalBucketRangeTracker, { (0, buckets, required) });
706-
707-
struct HistoricalMinMaxBuckets<'a> {
708-
min_liquidity_offset_history: &'a HistoricalBucketRangeTracker,
709-
max_liquidity_offset_history: &'a HistoricalBucketRangeTracker,
710-
}
711-
712-
impl HistoricalMinMaxBuckets<'_> {
713-
#[inline]
714-
fn get_decayed_buckets<T: Time>(&self, now: T, last_updated: T, half_life: Duration)
715-
-> ([u16; 8], [u16; 8], u32) {
716-
let required_decays = now.duration_since(last_updated).as_secs()
717-
.checked_div(half_life.as_secs())
718-
.map_or(u32::max_value(), |decays| cmp::min(decays, u32::max_value() as u64) as u32);
719-
let mut min_buckets = *self.min_liquidity_offset_history;
720-
min_buckets.time_decay_data(required_decays);
721-
let mut max_buckets = *self.max_liquidity_offset_history;
722-
max_buckets.time_decay_data(required_decays);
723-
(min_buckets.buckets, max_buckets.buckets, required_decays)
724-
}
725-
726-
#[inline]
727-
fn calculate_success_probability_times_billion<T: Time>(
728-
&self, now: T, last_updated: T, half_life: Duration, amount_msat: u64, capacity_msat: u64)
729-
-> Option<u64> {
730-
// If historical penalties are enabled, calculate the penalty by walking the set of
731-
// historical liquidity bucket (min, max) combinations (where min_idx < max_idx) and, for
732-
// each, calculate the probability of success given our payment amount, then total the
733-
// weighted average probability of success.
734-
//
735-
// We use a sliding scale to decide which point within a given bucket will be compared to
736-
// the amount being sent - for lower-bounds, the amount being sent is compared to the lower
737-
// edge of the first bucket (i.e. zero), but compared to the upper 7/8ths of the last
738-
// bucket (i.e. 9 times the index, or 63), with each bucket in between increasing the
739-
// comparison point by 1/64th. For upper-bounds, the same applies, however with an offset
740-
// of 1/64th (i.e. starting at one and ending at 64). This avoids failing to assign
741-
// penalties to channels at the edges.
742-
//
743-
// If we used the bottom edge of buckets, we'd end up never assigning any penalty at all to
744-
// such a channel when sending less than ~0.19% of the channel's capacity (e.g. ~200k sats
745-
// for a 1 BTC channel!).
746-
//
747-
// If we used the middle of each bucket we'd never assign any penalty at all when sending
748-
// less than 1/16th of a channel's capacity, or 1/8th if we used the top of the bucket.
749-
let mut total_valid_points_tracked = 0;
750-
751-
let payment_amt_64th_bucket: u8 = if amount_msat < u64::max_value() / 64 {
752-
(amount_msat * 64 / capacity_msat.saturating_add(1))
753-
.try_into().unwrap_or(65)
754-
} else {
755-
// Only use 128-bit arithmetic when multiplication will overflow to avoid 128-bit
756-
// division. This branch should only be hit in fuzz testing since the amount would
757-
// need to be over 2.88 million BTC in practice.
758-
((amount_msat as u128) * 64 / (capacity_msat as u128).saturating_add(1))
759-
.try_into().unwrap_or(65)
760-
};
761-
#[cfg(not(fuzzing))]
762-
debug_assert!(payment_amt_64th_bucket <= 64);
763-
if payment_amt_64th_bucket >= 64 { return None; }
764-
765-
// Check if all our buckets are zero, once decayed and treat it as if we had no data. We
766-
// don't actually use the decayed buckets, though, as that would lose precision.
767-
let (decayed_min_buckets, decayed_max_buckets, required_decays) =
768-
self.get_decayed_buckets(now, last_updated, half_life);
769-
if decayed_min_buckets.iter().all(|v| *v == 0) || decayed_max_buckets.iter().all(|v| *v == 0) {
770-
return None;
771-
}
772-
773-
for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
774-
for max_bucket in self.max_liquidity_offset_history.buckets.iter().take(8 - min_idx) {
775-
total_valid_points_tracked += (*min_bucket as u64) * (*max_bucket as u64);
776-
}
777-
}
778-
// If the total valid points is smaller than 1.0 (i.e. 32 in our fixed-point scheme), treat
779-
// it as if we were fully decayed.
780-
if total_valid_points_tracked.checked_shr(required_decays).unwrap_or(0) < 32*32 {
781-
return None;
782-
}
783-
784-
let mut cumulative_success_prob_times_billion = 0;
785-
for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
786-
for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate().take(8 - min_idx) {
787-
let bucket_prob_times_million = (*min_bucket as u64) * (*max_bucket as u64)
788-
* 1024 * 1024 / total_valid_points_tracked;
789-
let min_64th_bucket = min_idx as u8 * 9;
790-
let max_64th_bucket = (7 - max_idx as u8) * 9 + 1;
791-
if payment_amt_64th_bucket > max_64th_bucket {
792-
// Success probability 0, the payment amount is above the max liquidity
793-
} else if payment_amt_64th_bucket <= min_64th_bucket {
794-
cumulative_success_prob_times_billion += bucket_prob_times_million * 1024;
795-
} else {
796-
cumulative_success_prob_times_billion += bucket_prob_times_million *
797-
((max_64th_bucket - payment_amt_64th_bucket) as u64) * 1024 /
798-
((max_64th_bucket - min_64th_bucket) as u64);
799-
}
800-
}
801-
}
802-
803-
Some(cumulative_success_prob_times_billion)
804-
}
805-
}
806-
807652
/// Accounting for channel liquidity balance uncertainty.
808653
///
809654
/// Direction is defined in terms of [`NodeId`] partial ordering, where the source node is the
@@ -1704,6 +1549,166 @@ mod approx {
17041549
}
17051550
}
17061551

1552+
mod bucketed_history {
1553+
use super::*;
1554+
1555+
/// Tracks the historical state of a distribution as a weighted average of how much time was spent
1556+
/// in each of 8 buckets.
1557+
#[derive(Clone, Copy)]
1558+
pub(super) struct HistoricalBucketRangeTracker {
1559+
buckets: [u16; 8],
1560+
}
1561+
1562+
impl HistoricalBucketRangeTracker {
1563+
pub(super) fn new() -> Self { Self { buckets: [0; 8] } }
1564+
pub(super) fn track_datapoint(&mut self, liquidity_offset_msat: u64, capacity_msat: u64) {
1565+
// We have 8 leaky buckets for min and max liquidity. Each bucket tracks the amount of time
1566+
// we spend in each bucket as a 16-bit fixed-point number with a 5 bit fractional part.
1567+
//
1568+
// Each time we update our liquidity estimate, we add 32 (1.0 in our fixed-point system) to
1569+
// the buckets for the current min and max liquidity offset positions.
1570+
//
1571+
// We then decay each bucket by multiplying by 2047/2048 (avoiding dividing by a
1572+
// non-power-of-two). This ensures we can't actually overflow the u16 - when we get to
1573+
// 63,457 adding 32 and decaying by 2047/2048 leaves us back at 63,457.
1574+
//
1575+
// In total, this allows us to track data for the last 8,000 or so payments across a given
1576+
// channel.
1577+
//
1578+
// These constants are a balance - we try to fit in 2 bytes per bucket to reduce overhead,
1579+
// and need to balance having more bits in the decimal part (to ensure decay isn't too
1580+
// non-linear) with having too few bits in the mantissa, causing us to not store very many
1581+
// datapoints.
1582+
//
1583+
// The constants were picked experimentally, selecting a decay amount that restricts us
1584+
// from overflowing buckets without having to cap them manually.
1585+
1586+
// Ensure the bucket index is in the range [0, 7], even if the liquidity offset is zero or
1587+
// the channel's capacity, though the second should generally never happen.
1588+
debug_assert!(liquidity_offset_msat <= capacity_msat);
1589+
let bucket_idx: u8 = (liquidity_offset_msat * 8 / capacity_msat.saturating_add(1))
1590+
.try_into().unwrap_or(32); // 32 is bogus for 8 buckets, and will be ignored
1591+
debug_assert!(bucket_idx < 8);
1592+
if bucket_idx < 8 {
1593+
for e in self.buckets.iter_mut() {
1594+
*e = ((*e as u32) * 2047 / 2048) as u16;
1595+
}
1596+
self.buckets[bucket_idx as usize] = self.buckets[bucket_idx as usize].saturating_add(32);
1597+
}
1598+
}
1599+
/// Decay all buckets by the given number of half-lives. Used to more aggressively remove old
1600+
/// datapoints as we receive newer information.
1601+
pub(super) fn time_decay_data(&mut self, half_lives: u32) {
1602+
for e in self.buckets.iter_mut() {
1603+
*e = e.checked_shr(half_lives).unwrap_or(0);
1604+
}
1605+
}
1606+
}
1607+
1608+
impl_writeable_tlv_based!(HistoricalBucketRangeTracker, { (0, buckets, required) });
1609+
1610+
pub(super) struct HistoricalMinMaxBuckets<'a> {
1611+
pub(super) min_liquidity_offset_history: &'a HistoricalBucketRangeTracker,
1612+
pub(super) max_liquidity_offset_history: &'a HistoricalBucketRangeTracker,
1613+
}
1614+
1615+
impl HistoricalMinMaxBuckets<'_> {
1616+
#[inline]
1617+
pub(super) fn get_decayed_buckets<T: Time>(&self, now: T, last_updated: T, half_life: Duration)
1618+
-> ([u16; 8], [u16; 8], u32) {
1619+
let required_decays = now.duration_since(last_updated).as_secs()
1620+
.checked_div(half_life.as_secs())
1621+
.map_or(u32::max_value(), |decays| cmp::min(decays, u32::max_value() as u64) as u32);
1622+
let mut min_buckets = *self.min_liquidity_offset_history;
1623+
min_buckets.time_decay_data(required_decays);
1624+
let mut max_buckets = *self.max_liquidity_offset_history;
1625+
max_buckets.time_decay_data(required_decays);
1626+
(min_buckets.buckets, max_buckets.buckets, required_decays)
1627+
}
1628+
1629+
#[inline]
1630+
pub(super) fn calculate_success_probability_times_billion<T: Time>(
1631+
&self, now: T, last_updated: T, half_life: Duration, amount_msat: u64, capacity_msat: u64)
1632+
-> Option<u64> {
1633+
// If historical penalties are enabled, calculate the penalty by walking the set of
1634+
// historical liquidity bucket (min, max) combinations (where min_idx < max_idx) and, for
1635+
// each, calculate the probability of success given our payment amount, then total the
1636+
// weighted average probability of success.
1637+
//
1638+
// We use a sliding scale to decide which point within a given bucket will be compared to
1639+
// the amount being sent - for lower-bounds, the amount being sent is compared to the lower
1640+
// edge of the first bucket (i.e. zero), but compared to the upper 7/8ths of the last
1641+
// bucket (i.e. 9 times the index, or 63), with each bucket in between increasing the
1642+
// comparison point by 1/64th. For upper-bounds, the same applies, however with an offset
1643+
// of 1/64th (i.e. starting at one and ending at 64). This avoids failing to assign
1644+
// penalties to channels at the edges.
1645+
//
1646+
// If we used the bottom edge of buckets, we'd end up never assigning any penalty at all to
1647+
// such a channel when sending less than ~0.19% of the channel's capacity (e.g. ~200k sats
1648+
// for a 1 BTC channel!).
1649+
//
1650+
// If we used the middle of each bucket we'd never assign any penalty at all when sending
1651+
// less than 1/16th of a channel's capacity, or 1/8th if we used the top of the bucket.
1652+
let mut total_valid_points_tracked = 0;
1653+
1654+
let payment_amt_64th_bucket: u8 = if amount_msat < u64::max_value() / 64 {
1655+
(amount_msat * 64 / capacity_msat.saturating_add(1))
1656+
.try_into().unwrap_or(65)
1657+
} else {
1658+
// Only use 128-bit arithmetic when multiplication will overflow to avoid 128-bit
1659+
// division. This branch should only be hit in fuzz testing since the amount would
1660+
// need to be over 2.88 million BTC in practice.
1661+
((amount_msat as u128) * 64 / (capacity_msat as u128).saturating_add(1))
1662+
.try_into().unwrap_or(65)
1663+
};
1664+
#[cfg(not(fuzzing))]
1665+
debug_assert!(payment_amt_64th_bucket <= 64);
1666+
if payment_amt_64th_bucket >= 64 { return None; }
1667+
1668+
// Check if all our buckets are zero, once decayed and treat it as if we had no data. We
1669+
// don't actually use the decayed buckets, though, as that would lose precision.
1670+
let (decayed_min_buckets, decayed_max_buckets, required_decays) =
1671+
self.get_decayed_buckets(now, last_updated, half_life);
1672+
if decayed_min_buckets.iter().all(|v| *v == 0) || decayed_max_buckets.iter().all(|v| *v == 0) {
1673+
return None;
1674+
}
1675+
1676+
for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
1677+
for max_bucket in self.max_liquidity_offset_history.buckets.iter().take(8 - min_idx) {
1678+
total_valid_points_tracked += (*min_bucket as u64) * (*max_bucket as u64);
1679+
}
1680+
}
1681+
// If the total valid points is smaller than 1.0 (i.e. 32 in our fixed-point scheme), treat
1682+
// it as if we were fully decayed.
1683+
if total_valid_points_tracked.checked_shr(required_decays).unwrap_or(0) < 32*32 {
1684+
return None;
1685+
}
1686+
1687+
let mut cumulative_success_prob_times_billion = 0;
1688+
for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
1689+
for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate().take(8 - min_idx) {
1690+
let bucket_prob_times_million = (*min_bucket as u64) * (*max_bucket as u64)
1691+
* 1024 * 1024 / total_valid_points_tracked;
1692+
let min_64th_bucket = min_idx as u8 * 9;
1693+
let max_64th_bucket = (7 - max_idx as u8) * 9 + 1;
1694+
if payment_amt_64th_bucket > max_64th_bucket {
1695+
// Success probability 0, the payment amount is above the max liquidity
1696+
} else if payment_amt_64th_bucket <= min_64th_bucket {
1697+
cumulative_success_prob_times_billion += bucket_prob_times_million * 1024;
1698+
} else {
1699+
cumulative_success_prob_times_billion += bucket_prob_times_million *
1700+
((max_64th_bucket - payment_amt_64th_bucket) as u64) * 1024 /
1701+
((max_64th_bucket - min_64th_bucket) as u64);
1702+
}
1703+
}
1704+
}
1705+
1706+
Some(cumulative_success_prob_times_billion)
1707+
}
1708+
}
1709+
}
1710+
use bucketed_history::{HistoricalBucketRangeTracker, HistoricalMinMaxBuckets};
1711+
17071712
impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> Writeable for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
17081713
#[inline]
17091714
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {

0 commit comments

Comments
 (0)