Skip to content

Commit 96a738a

Browse files
authored
Merge pull request #961 from TheBlueMatt/2021-06-workaround-broken-cln
Use the query start block for ReplyChannelRange response messages
2 parents 9993845 + 42f6a8f commit 96a738a

File tree

1 file changed

+60
-30
lines changed

1 file changed

+60
-30
lines changed

lightning/src/routing/network_graph.rs

Lines changed: 60 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -377,23 +377,39 @@ impl<C: Deref , L: Deref > RoutingMessageHandler for NetGraphMsgHandler<C, L> wh
377377

378378
let mut pending_events = self.pending_events.lock().unwrap();
379379
let batch_count = batches.len();
380+
let mut prev_batch_endblock = msg.first_blocknum;
380381
for (batch_index, batch) in batches.into_iter().enumerate() {
381-
// Per spec, the initial first_blocknum needs to be <= the query's first_blocknum and subsequent
382-
// must be >= the prior reply. We'll simplify this by using zero since its still spec compliant and
383-
// sequence completion is now explicitly.
384-
let first_blocknum = 0;
385-
386-
// Per spec, the final end_blocknum needs to be >= the query's end_blocknum, so we'll use the
387-
// query's value. Prior batches must use the number of blocks that fit into the message. We'll
388-
// base this off the last SCID in the batch since we've somewhat abusing first_blocknum.
389-
let number_of_blocks = if batch_index == batch_count-1 {
390-
msg.end_blocknum()
391-
} else {
392-
block_from_scid(batch.last().unwrap()) + 1
382+
// Per spec, the initial `first_blocknum` needs to be <= the query's `first_blocknum`
383+
// and subsequent `first_blocknum`s must be >= the prior reply's `first_blocknum`.
384+
//
385+
// Additionally, c-lightning versions < 0.10 require that the `first_blocknum` of each
386+
// reply is >= the previous reply's `first_blocknum` and either exactly the previous
387+
// reply's `first_blocknum + number_of_blocks` or exactly one greater. This is a
388+
// significant diversion from the requirements set by the spec, and, in case of blocks
389+
// with no channel opens (e.g. empty blocks), requires that we use the previous value
390+
// and *not* derive the first_blocknum from the actual first block of the reply.
391+
let first_blocknum = prev_batch_endblock;
392+
393+
// Each message carries the number of blocks (from the `first_blocknum`) its contents
394+
// fit in. Though there is no requirement that we use exactly the number of blocks its
395+
// contents are from, except for the bogus requirements c-lightning enforces, above.
396+
//
397+
// Per spec, the last end block (ie `first_blocknum + number_of_blocks`) needs to be
398+
// >= the query's end block. Thus, for the last reply, we calculate the difference
399+
// between the query's end block and the start of the reply.
400+
//
401+
// Overflow safe since end_blocknum=msg.first_block_num+msg.number_of_blocks and
402+
// first_blocknum will be either msg.first_blocknum or a higher block height.
403+
let (sync_complete, number_of_blocks) = if batch_index == batch_count-1 {
404+
(true, msg.end_blocknum() - first_blocknum)
405+
}
406+
// Prior replies should use the number of blocks that fit into the reply. Overflow
407+
// safe since first_blocknum is always <= last SCID's block.
408+
else {
409+
(false, block_from_scid(batch.last().unwrap()) - first_blocknum)
393410
};
394411

395-
// Only true for the last message in a sequence
396-
let sync_complete = batch_index == batch_count - 1;
412+
prev_batch_endblock = first_blocknum + number_of_blocks;
397413

398414
pending_events.push(MessageSendEvent::SendReplyChannelRange {
399415
node_id: their_node_id.clone(),
@@ -2239,8 +2255,8 @@ mod tests {
22392255
vec![
22402256
ReplyChannelRange {
22412257
chain_hash: chain_hash.clone(),
2242-
first_blocknum: 0,
2243-
number_of_blocks: 0x01000000,
2258+
first_blocknum: 0xffffff,
2259+
number_of_blocks: 1,
22442260
sync_complete: true,
22452261
short_channel_ids: vec![]
22462262
},
@@ -2260,8 +2276,8 @@ mod tests {
22602276
vec![
22612277
ReplyChannelRange {
22622278
chain_hash: chain_hash.clone(),
2263-
first_blocknum: 0,
2264-
number_of_blocks: 2000,
2279+
first_blocknum: 1000,
2280+
number_of_blocks: 1000,
22652281
sync_complete: true,
22662282
short_channel_ids: vec![],
22672283
}
@@ -2281,8 +2297,8 @@ mod tests {
22812297
vec![
22822298
ReplyChannelRange {
22832299
chain_hash: chain_hash.clone(),
2284-
first_blocknum: 0,
2285-
number_of_blocks: 0xffffffff,
2300+
first_blocknum: 0xfe0000,
2301+
number_of_blocks: 0xffffffff - 0xfe0000,
22862302
sync_complete: true,
22872303
short_channel_ids: vec![
22882304
0xfffffe_ffffff_ffff, // max
@@ -2304,8 +2320,8 @@ mod tests {
23042320
vec![
23052321
ReplyChannelRange {
23062322
chain_hash: chain_hash.clone(),
2307-
first_blocknum: 0,
2308-
number_of_blocks: 108000,
2323+
first_blocknum: 100000,
2324+
number_of_blocks: 8000,
23092325
sync_complete: true,
23102326
short_channel_ids: (100000..=107999)
23112327
.map(|block| scid_from_parts(block, 0, 0).unwrap())
@@ -2327,17 +2343,17 @@ mod tests {
23272343
vec![
23282344
ReplyChannelRange {
23292345
chain_hash: chain_hash.clone(),
2330-
first_blocknum: 0,
2331-
number_of_blocks: 108000,
2346+
first_blocknum: 100000,
2347+
number_of_blocks: 7999,
23322348
sync_complete: false,
23332349
short_channel_ids: (100000..=107999)
23342350
.map(|block| scid_from_parts(block, 0, 0).unwrap())
23352351
.collect(),
23362352
},
23372353
ReplyChannelRange {
23382354
chain_hash: chain_hash.clone(),
2339-
first_blocknum: 0,
2340-
number_of_blocks: 108001,
2355+
first_blocknum: 107999,
2356+
number_of_blocks: 2,
23412357
sync_complete: true,
23422358
short_channel_ids: vec![
23432359
scid_from_parts(108000, 0, 0).unwrap(),
@@ -2359,17 +2375,17 @@ mod tests {
23592375
vec![
23602376
ReplyChannelRange {
23612377
chain_hash: chain_hash.clone(),
2362-
first_blocknum: 0,
2363-
number_of_blocks: 108002,
2378+
first_blocknum: 100002,
2379+
number_of_blocks: 7999,
23642380
sync_complete: false,
23652381
short_channel_ids: (100002..=108001)
23662382
.map(|block| scid_from_parts(block, 0, 0).unwrap())
23672383
.collect(),
23682384
},
23692385
ReplyChannelRange {
23702386
chain_hash: chain_hash.clone(),
2371-
first_blocknum: 0,
2372-
number_of_blocks: 108002,
2387+
first_blocknum: 108001,
2388+
number_of_blocks: 1,
23732389
sync_complete: true,
23742390
short_channel_ids: vec![
23752391
scid_from_parts(108001, 1, 0).unwrap(),
@@ -2386,6 +2402,9 @@ mod tests {
23862402
expected_ok: bool,
23872403
expected_replies: Vec<ReplyChannelRange>
23882404
) {
2405+
let mut max_firstblocknum = msg.first_blocknum.saturating_sub(1);
2406+
let mut c_lightning_0_9_prev_end_blocknum = max_firstblocknum;
2407+
let query_end_blocknum = msg.end_blocknum();
23892408
let result = net_graph_msg_handler.handle_query_channel_range(test_node_id, msg);
23902409

23912410
if expected_ok {
@@ -2407,6 +2426,17 @@ mod tests {
24072426
assert_eq!(msg.number_of_blocks, expected_reply.number_of_blocks);
24082427
assert_eq!(msg.sync_complete, expected_reply.sync_complete);
24092428
assert_eq!(msg.short_channel_ids, expected_reply.short_channel_ids);
2429+
2430+
// Enforce exactly the sequencing requirements present on c-lightning v0.9.3
2431+
assert!(msg.first_blocknum == c_lightning_0_9_prev_end_blocknum || msg.first_blocknum == c_lightning_0_9_prev_end_blocknum.saturating_add(1));
2432+
assert!(msg.first_blocknum >= max_firstblocknum);
2433+
max_firstblocknum = msg.first_blocknum;
2434+
c_lightning_0_9_prev_end_blocknum = msg.first_blocknum.saturating_add(msg.number_of_blocks);
2435+
2436+
// Check that the last block count is >= the query's end_blocknum
2437+
if i == events.len() - 1 {
2438+
assert!(msg.first_blocknum.saturating_add(msg.number_of_blocks) >= query_end_blocknum);
2439+
}
24102440
},
24112441
_ => panic!("expected MessageSendEvent::SendReplyChannelRange"),
24122442
}

0 commit comments

Comments
 (0)