@@ -1499,6 +1499,83 @@ enum CandidateHopId {
1499
1499
Blinded ( usize ) ,
1500
1500
}
1501
1501
1502
+ /// To avoid doing [`PublicKey`] -> [`PathBuildingHop`] hashtable lookups, we assign each
1503
+ /// [`PublicKey`]/node a `usize` index and simply keep a `Vec` of values.
1504
+ ///
1505
+ /// While this is easy for gossip-originating nodes (the [`DirectedChannelInfo`] exposes "counters"
1506
+ /// for us for this purpose) we have to have our own indexes for nodes originating from invoice
1507
+ /// hints, local channels, or blinded path fake nodes.
1508
+ ///
1509
+ /// This wrapper handles all this for us, allowing look-up of counters from the various contexts.
1510
+ ///
1511
+ /// It is first built by passing all [`NodeId`]s that we'll ever care about (which are not in our
1512
+ /// [`NetworkGraph`], e.g. those from first- and last-hop hints and blinded path introduction
1513
+ /// points) either though [`NodeCountersBuilder::node_counter_from_pubkey`] or
1514
+ /// [`NodeCountersBuilder::node_counter_from_id`], then calling [`NodeCountersBuilder::build`] and
1515
+ /// using the resulting [`NodeCounters`] to look up any counters.
1516
+ ///
1517
+ /// [`NodeCounters::private_node_counter_from_pubkey`], specifically, will return `Some` iff
1518
+ /// [`NodeCountersBuilder::node_counter_from_pubkey`] was called on the same key (not
1519
+ /// [`NodeCountersBuilder::node_counter_from_id`]). It will also return a cached copy of the
1520
+ /// [`PublicKey`] -> [`NodeId`] conversion.
1521
+ struct NodeCounters < ' a > {
1522
+ network_graph : & ' a ReadOnlyNetworkGraph < ' a > ,
1523
+ private_node_id_to_node_counter : HashMap < NodeId , u32 > ,
1524
+ private_hop_key_cache : HashMap < PublicKey , ( NodeId , u32 ) > ,
1525
+ }
1526
+
1527
+ struct NodeCountersBuilder < ' a > ( NodeCounters < ' a > ) ;
1528
+
1529
+ impl < ' a > NodeCountersBuilder < ' a > {
1530
+ fn new ( network_graph : & ' a ReadOnlyNetworkGraph ) -> Self {
1531
+ Self ( NodeCounters {
1532
+ network_graph,
1533
+ private_node_id_to_node_counter : new_hash_map ( ) ,
1534
+ private_hop_key_cache : new_hash_map ( ) ,
1535
+ } )
1536
+ }
1537
+
1538
+ fn node_counter_from_pubkey ( & mut self , pubkey : PublicKey ) -> u32 {
1539
+ let id = NodeId :: from_pubkey ( & pubkey) ;
1540
+ let counter = self . node_counter_from_id ( id) ;
1541
+ self . 0 . private_hop_key_cache . insert ( pubkey, ( id, counter) ) ;
1542
+ counter
1543
+ }
1544
+
1545
+ fn node_counter_from_id ( & mut self , node_id : NodeId ) -> u32 {
1546
+ // For any node_id, we first have to check if its in the existing network graph, and then
1547
+ // ensure that we always look up in our internal map first.
1548
+ self . 0 . network_graph . nodes ( ) . get ( & node_id)
1549
+ . map ( |node| node. node_counter )
1550
+ . unwrap_or_else ( || {
1551
+ let next_node_counter = self . 0 . network_graph . max_node_counter ( ) + 1 +
1552
+ self . 0 . private_node_id_to_node_counter . len ( ) as u32 ;
1553
+ * self . 0 . private_node_id_to_node_counter . entry ( node_id) . or_insert ( next_node_counter)
1554
+ } )
1555
+ }
1556
+
1557
+ fn build ( self ) -> NodeCounters < ' a > { self . 0 }
1558
+ }
1559
+
1560
+ impl < ' a > NodeCounters < ' a > {
1561
+ fn max_counter ( & self ) -> u32 {
1562
+ self . network_graph . max_node_counter ( ) +
1563
+ self . private_node_id_to_node_counter . len ( ) as u32
1564
+ }
1565
+
1566
+ fn private_node_counter_from_pubkey ( & self , pubkey : & PublicKey ) -> Option < & ( NodeId , u32 ) > {
1567
+ self . private_hop_key_cache . get ( pubkey)
1568
+ }
1569
+
1570
+ fn node_counter_from_id ( & self , node_id : & NodeId ) -> Option < ( & NodeId , u32 ) > {
1571
+ self . private_node_id_to_node_counter . get_key_value ( node_id) . map ( |( a, b) | ( a, * b) )
1572
+ . or_else ( || {
1573
+ self . network_graph . nodes ( ) . get_key_value ( node_id)
1574
+ . map ( |( node_id, node) | ( node_id, node. node_counter ) )
1575
+ } )
1576
+ }
1577
+ }
1578
+
1502
1579
#[ inline]
1503
1580
fn max_htlc_from_capacity ( capacity : EffectiveCapacity , max_channel_saturation_power_of_half : u8 ) -> u64 {
1504
1581
let saturation_shift: u32 = max_channel_saturation_power_of_half as u32 ;
@@ -2051,6 +2128,17 @@ where L::Target: Logger {
2051
2128
}
2052
2129
}
2053
2130
2131
+ let mut node_counters = NodeCountersBuilder :: new ( & network_graph) ;
2132
+
2133
+ let payer_node_counter = node_counters. node_counter_from_pubkey ( * our_node_pubkey) ;
2134
+ let payee_node_counter = node_counters. node_counter_from_pubkey ( maybe_dummy_payee_pk) ;
2135
+
2136
+ for route in payment_params. payee . unblinded_route_hints ( ) . iter ( ) {
2137
+ for hop in route. 0 . iter ( ) {
2138
+ node_counters. node_counter_from_pubkey ( hop. src_node_id ) ;
2139
+ }
2140
+ }
2141
+
2054
2142
// Step (1).
2055
2143
// Prepare the data we'll use for payee-to-payer search by
2056
2144
// inserting first hops suggested by the caller as targets.
@@ -2065,9 +2153,14 @@ where L::Target: Logger {
2065
2153
if chan. counterparty . node_id == * our_node_pubkey {
2066
2154
return Err ( LightningError { err : "First hop cannot have our_node_pubkey as a destination." . to_owned ( ) , action : ErrorAction :: IgnoreError } ) ;
2067
2155
}
2156
+ let counterparty_id = NodeId :: from_pubkey ( & chan. counterparty . node_id ) ;
2068
2157
first_hop_targets
2069
- . entry ( NodeId :: from_pubkey ( & chan. counterparty . node_id ) )
2070
- . or_insert ( Vec :: new ( ) )
2158
+ . entry ( counterparty_id)
2159
+ . or_insert_with ( || {
2160
+ // Make sure there's a counter assigned for the counterparty
2161
+ node_counters. node_counter_from_id ( counterparty_id) ;
2162
+ Vec :: new ( )
2163
+ } )
2071
2164
. push ( chan) ;
2072
2165
}
2073
2166
if first_hop_targets. is_empty ( ) {
@@ -2089,6 +2182,8 @@ where L::Target: Logger {
2089
2182
}
2090
2183
}
2091
2184
2185
+ let node_counters = node_counters. build ( ) ;
2186
+
2092
2187
// The main heap containing all candidate next-hops sorted by their score (max(fee,
2093
2188
// htlc_minimum)). Ideally this would be a heap which allowed cheap score reduction instead of
2094
2189
// adding duplicate entries when we find a better path to a given node.
0 commit comments