|
47 | 47 |
|
48 | 48 | pub(super) const MAX_TIMER_TICKS: usize = 2;
|
49 | 49 |
|
| 50 | +/// A trivial trait which describes any [`OnionMessenger`]. |
| 51 | +/// |
| 52 | +/// This is not exported to bindings users as general cover traits aren't useful in other |
| 53 | +/// languages. |
| 54 | +pub trait AOnionMessenger { |
| 55 | + /// A type implementing [`EntropySource`] |
| 56 | + type EntropySource: EntropySource + ?Sized; |
| 57 | + /// A type that may be dereferenced to [`Self::EntropySource`] |
| 58 | + type ES: Deref<Target = Self::EntropySource>; |
| 59 | + /// A type implementing [`NodeSigner`] |
| 60 | + type NodeSigner: NodeSigner + ?Sized; |
| 61 | + /// A type that may be dereferenced to [`Self::NodeSigner`] |
| 62 | + type NS: Deref<Target = Self::NodeSigner>; |
| 63 | + /// A type implementing [`Logger`] |
| 64 | + type Logger: Logger + ?Sized; |
| 65 | + /// A type that may be dereferenced to [`Self::Logger`] |
| 66 | + type L: Deref<Target = Self::Logger>; |
| 67 | + /// A type implementing [`NodeIdLookUp`] |
| 68 | + type NodeIdLookUp: NodeIdLookUp + ?Sized; |
| 69 | + /// A type that may be dereferenced to [`Self::NodeIdLookUp`] |
| 70 | + type NL: Deref<Target = Self::NodeIdLookUp>; |
| 71 | + /// A type implementing [`MessageRouter`] |
| 72 | + type MessageRouter: MessageRouter + ?Sized; |
| 73 | + /// A type that may be dereferenced to [`Self::MessageRouter`] |
| 74 | + type MR: Deref<Target = Self::MessageRouter>; |
| 75 | + /// A type implementing [`OffersMessageHandler`] |
| 76 | + type OffersMessageHandler: OffersMessageHandler + ?Sized; |
| 77 | + /// A type that may be dereferenced to [`Self::OffersMessageHandler`] |
| 78 | + type OMH: Deref<Target = Self::OffersMessageHandler>; |
| 79 | + /// A type implementing [`CustomOnionMessageHandler`] |
| 80 | + type CustomOnionMessageHandler: CustomOnionMessageHandler + ?Sized; |
| 81 | + /// A type that may be dereferenced to [`Self::CustomOnionMessageHandler`] |
| 82 | + type CMH: Deref<Target = Self::CustomOnionMessageHandler>; |
| 83 | + /// Returns a reference to the actual [`OnionMessenger`] object. |
| 84 | + fn get_om(&self) -> &OnionMessenger<Self::ES, Self::NS, Self::L, Self::NL, Self::MR, Self::OMH, Self::CMH>; |
| 85 | +} |
| 86 | + |
| 87 | +impl<ES: Deref, NS: Deref, L: Deref, NL: Deref, MR: Deref, OMH: Deref, CMH: Deref> AOnionMessenger |
| 88 | +for OnionMessenger<ES, NS, L, NL, MR, OMH, CMH> where |
| 89 | + ES::Target: EntropySource, |
| 90 | + NS::Target: NodeSigner, |
| 91 | + L::Target: Logger, |
| 92 | + NL::Target: NodeIdLookUp, |
| 93 | + MR::Target: MessageRouter, |
| 94 | + OMH::Target: OffersMessageHandler, |
| 95 | + CMH::Target: CustomOnionMessageHandler, |
| 96 | +{ |
| 97 | + type EntropySource = ES::Target; |
| 98 | + type ES = ES; |
| 99 | + type NodeSigner = NS::Target; |
| 100 | + type NS = NS; |
| 101 | + type Logger = L::Target; |
| 102 | + type L = L; |
| 103 | + type NodeIdLookUp = NL::Target; |
| 104 | + type NL = NL; |
| 105 | + type MessageRouter = MR::Target; |
| 106 | + type MR = MR; |
| 107 | + type OffersMessageHandler = OMH::Target; |
| 108 | + type OMH = OMH; |
| 109 | + type CustomOnionMessageHandler = CMH::Target; |
| 110 | + type CMH = CMH; |
| 111 | + fn get_om(&self) -> &OnionMessenger<ES, NS, L, NL, MR, OMH, CMH> { self } |
| 112 | +} |
| 113 | + |
50 | 114 | /// A sender, receiver and forwarder of [`OnionMessage`]s.
|
51 | 115 | ///
|
52 | 116 | /// # Handling Messages
|
@@ -181,7 +245,12 @@ where
|
181 | 245 | offers_handler: OMH,
|
182 | 246 | custom_handler: CMH,
|
183 | 247 | intercept_messages_for_offline_peers: bool,
|
184 |
| - pending_events: Mutex<Vec<Event>>, |
| 248 | + pending_events: Mutex<PendingEvents>, |
| 249 | +} |
| 250 | + |
| 251 | +struct PendingEvents { |
| 252 | + intercepted_msgs: Vec<Event>, |
| 253 | + peer_connecteds: Vec<Event>, |
185 | 254 | }
|
186 | 255 |
|
187 | 256 | /// [`OnionMessage`]s buffered to be sent.
|
@@ -929,7 +998,10 @@ where
|
929 | 998 | offers_handler,
|
930 | 999 | custom_handler,
|
931 | 1000 | intercept_messages_for_offline_peers,
|
932 |
| - pending_events: Mutex::new(Vec::new()), |
| 1001 | + pending_events: Mutex::new(PendingEvents { |
| 1002 | + intercepted_msgs: Vec::new(), |
| 1003 | + peer_connecteds: Vec::new(), |
| 1004 | + }), |
933 | 1005 | }
|
934 | 1006 | }
|
935 | 1007 |
|
@@ -1150,18 +1222,61 @@ where
|
1150 | 1222 | msgs
|
1151 | 1223 | }
|
1152 | 1224 |
|
1153 |
| - fn enqueue_event(&self, event: Event) { |
| 1225 | + fn enqueue_intercepted_event(&self, event: Event) { |
1154 | 1226 | const MAX_EVENTS_BUFFER_SIZE: usize = (1 << 10) * 256;
|
1155 | 1227 | let mut pending_events = self.pending_events.lock().unwrap();
|
1156 |
| - let total_buffered_bytes: usize = pending_events |
1157 |
| - .iter() |
1158 |
| - .map(|ev| ev.serialized_length()) |
1159 |
| - .sum(); |
| 1228 | + let total_buffered_bytes: usize = |
| 1229 | + pending_events.intercepted_msgs.iter().map(|ev| ev.serialized_length()).sum(); |
1160 | 1230 | if total_buffered_bytes >= MAX_EVENTS_BUFFER_SIZE {
|
1161 | 1231 | log_trace!(self.logger, "Dropping event {:?}: buffer full", event);
|
1162 | 1232 | return
|
1163 | 1233 | }
|
1164 |
| - pending_events.push(event); |
| 1234 | + pending_events.intercepted_msgs.push(event); |
| 1235 | + } |
| 1236 | + |
| 1237 | + /// Processes any events asynchronously using the given handler. |
| 1238 | + /// |
| 1239 | + /// Note that the event handler is called in the order each event was generated, however |
| 1240 | + /// futures are polled in parallel for some events to allow for parallelism where events do not |
| 1241 | + /// have an ordering requirement. |
| 1242 | + /// |
| 1243 | + /// See the trait-level documentation of [`EventsProvider`] for requirements. |
| 1244 | + pub async fn process_pending_events_async<Future: core::future::Future<Output = ()> + core::marker::Unpin, H: Fn(Event) -> Future>( |
| 1245 | + &self, handler: H |
| 1246 | + ) { |
| 1247 | + let mut intercepted_msgs = Vec::new(); |
| 1248 | + let mut peer_connecteds = Vec::new(); |
| 1249 | + { |
| 1250 | + let mut pending_events = self.pending_events.lock().unwrap(); |
| 1251 | + core::mem::swap(&mut pending_events.intercepted_msgs, &mut intercepted_msgs); |
| 1252 | + core::mem::swap(&mut pending_events.peer_connecteds, &mut peer_connecteds); |
| 1253 | + } |
| 1254 | + |
| 1255 | + let mut futures = Vec::with_capacity(intercepted_msgs.len()); |
| 1256 | + for (node_id, recipient) in self.message_recipients.lock().unwrap().iter_mut() { |
| 1257 | + if let OnionMessageRecipient::PendingConnection(_, addresses, _) = recipient { |
| 1258 | + if let Some(addresses) = addresses.take() { |
| 1259 | + futures.push(Some(handler(Event::ConnectionNeeded { node_id: *node_id, addresses }))); |
| 1260 | + } |
| 1261 | + } |
| 1262 | + } |
| 1263 | + |
| 1264 | + for ev in intercepted_msgs { |
| 1265 | + if let Event::OnionMessageIntercepted { .. } = ev {} else { debug_assert!(false); } |
| 1266 | + futures.push(Some(handler(ev))); |
| 1267 | + } |
| 1268 | + // Let the `OnionMessageIntercepted` events finish before moving on to peer_connecteds |
| 1269 | + crate::util::async_poll::MultiFuturePoller(futures).await; |
| 1270 | + |
| 1271 | + if peer_connecteds.len() <= 1 { |
| 1272 | + for event in peer_connecteds { handler(event).await; } |
| 1273 | + } else { |
| 1274 | + let mut futures = Vec::new(); |
| 1275 | + for event in peer_connecteds { |
| 1276 | + futures.push(Some(handler(event))); |
| 1277 | + } |
| 1278 | + crate::util::async_poll::MultiFuturePoller(futures).await; |
| 1279 | + } |
1165 | 1280 | }
|
1166 | 1281 | }
|
1167 | 1282 |
|
@@ -1208,7 +1323,20 @@ where
|
1208 | 1323 | }
|
1209 | 1324 | }
|
1210 | 1325 | let mut events = Vec::new();
|
1211 |
| - core::mem::swap(&mut *self.pending_events.lock().unwrap(), &mut events); |
| 1326 | + { |
| 1327 | + let mut pending_events = self.pending_events.lock().unwrap(); |
| 1328 | + #[cfg(debug_assertions)] { |
| 1329 | + for ev in pending_events.intercepted_msgs.iter() { |
| 1330 | + if let Event::OnionMessageIntercepted { .. } = ev {} else { panic!(); } |
| 1331 | + } |
| 1332 | + for ev in pending_events.peer_connecteds.iter() { |
| 1333 | + if let Event::OnionMessagePeerConnected { .. } = ev {} else { panic!(); } |
| 1334 | + } |
| 1335 | + } |
| 1336 | + core::mem::swap(&mut pending_events.intercepted_msgs, &mut events); |
| 1337 | + events.append(&mut pending_events.peer_connecteds); |
| 1338 | + pending_events.peer_connecteds.shrink_to(10); // Limit total heap usage |
| 1339 | + } |
1212 | 1340 | for ev in events {
|
1213 | 1341 | handler.handle_event(ev);
|
1214 | 1342 | }
|
@@ -1286,7 +1414,7 @@ where
|
1286 | 1414 | log_trace!(logger, "Forwarding an onion message to peer {}", next_node_id);
|
1287 | 1415 | },
|
1288 | 1416 | _ if self.intercept_messages_for_offline_peers => {
|
1289 |
| - self.enqueue_event( |
| 1417 | + self.enqueue_intercepted_event( |
1290 | 1418 | Event::OnionMessageIntercepted {
|
1291 | 1419 | peer_node_id: next_node_id, message: onion_message
|
1292 | 1420 | }
|
@@ -1314,7 +1442,7 @@ where
|
1314 | 1442 | .or_insert_with(|| OnionMessageRecipient::ConnectedPeer(VecDeque::new()))
|
1315 | 1443 | .mark_connected();
|
1316 | 1444 | if self.intercept_messages_for_offline_peers {
|
1317 |
| - self.enqueue_event( |
| 1445 | + self.pending_events.lock().unwrap().peer_connecteds.push( |
1318 | 1446 | Event::OnionMessagePeerConnected { peer_node_id: *their_node_id }
|
1319 | 1447 | );
|
1320 | 1448 | }
|
|
0 commit comments