@@ -570,6 +570,90 @@ pub struct ChannelUpdate {
570
570
pub contents : UnsignedChannelUpdate ,
571
571
}
572
572
573
+ /// A query_channel_range message is used to query a peer for channel
574
+ /// UTXOs in a range of blocks. The recipient of a query makes a best
575
+ /// effort to reply to the query using one or more reply_channel_range
576
+ /// messages.
577
+ #[ derive( Clone , Debug ) ]
578
+ pub struct QueryChannelRange {
579
+ /// The genesis hash of the blockchain being queried
580
+ pub chain_hash : BlockHash ,
581
+ /// The height of the first block for the channel UTXOs being queried
582
+ pub first_blocknum : u32 ,
583
+ /// The number of blocks to include in the query results
584
+ pub number_of_blocks : u32 ,
585
+ }
586
+
587
+ /// A reply_channel_range message is a reply to a query_channel_range
588
+ /// message. Multiple reply_channel_range messages can be sent in reply
589
+ /// to a single query_channel_range message. The query recipient makes a
590
+ /// best effort to respond based on their local network view which may
591
+ /// not be a perfect view of the network. The short_channel_ids in the
592
+ /// reply are encoded. We only support encoding_type=0 uncompressed
593
+ /// serialization and do not support encoding_type=1 zlib serialization.
594
+ #[ derive( Clone , Debug ) ]
595
+ pub struct ReplyChannelRange {
596
+ /// The genesis hash of the blockchain being queried
597
+ pub chain_hash : BlockHash ,
598
+ /// The height of the first block in the range of the reply
599
+ pub first_blocknum : u32 ,
600
+ /// The number of blocks included in the range of the reply
601
+ pub number_of_blocks : u32 ,
602
+ /// Indicates if the query recipient maintains up-to-date channel
603
+ /// information for the chain_hash
604
+ pub full_information : bool ,
605
+ /// The short_channel_ids in the channel range
606
+ pub short_channel_ids : Vec < u64 > ,
607
+ }
608
+
609
+ /// A query_short_channel_ids message is used to query a peer for
610
+ /// routing gossip messages related to one or more short_channel_ids.
611
+ /// The query recipient will reply with the latest, if available,
612
+ /// channel_announcement, channel_update and node_announcement messages
613
+ /// it maintains for the requested short_channel_ids followed by a
614
+ /// reply_short_channel_ids_end message. The short_channel_ids sent in
615
+ /// this query are encoded. We only support encoding_type=0 uncompressed
616
+ /// serialization and do not support encoding_type=1 zlib serialization.
617
+ #[ derive( Clone , Debug ) ]
618
+ pub struct QueryShortChannelIds {
619
+ /// The genesis hash of the blockchain being queried
620
+ pub chain_hash : BlockHash ,
621
+ /// The short_channel_ids that are being queried
622
+ pub short_channel_ids : Vec < u64 > ,
623
+ }
624
+
625
+ /// A reply_short_channel_ids_end message is sent as a reply to a
626
+ /// query_short_channel_ids message. The query recipient makes a best
627
+ /// effort to respond based on their local network view which may not be
628
+ /// a perfect view of the network.
629
+ #[ derive( Clone , Debug ) ]
630
+ pub struct ReplyShortChannelIdsEnd {
631
+ /// The genesis hash of the blockchain that was queried
632
+ pub chain_hash : BlockHash ,
633
+ /// Indicates if the query recipient maintains up-to-date channel
634
+ /// information for the chain_hash
635
+ pub full_information : bool ,
636
+ }
637
+
638
+ /// A gossip_timestamp_filter message is used by a node to request
639
+ /// gossip relay for messages in the requested time range when the
640
+ /// gossip_queries feature has been negotiated.
641
+ #[ derive( Clone , Debug ) ]
642
+ pub struct GossipTimestampFilter {
643
+ /// The genesis hash of the blockchain for channel and node information
644
+ pub chain_hash : BlockHash ,
645
+ /// The starting unix timestamp
646
+ pub first_timestamp : u32 ,
647
+ /// The range of information in seconds
648
+ pub timestamp_range : u32 ,
649
+ }
650
+
651
+ /// Encoding type for data compression of collections in gossip queries.
652
+ /// We do not support encoding_type=1 zlib serialization defined in BOLT #7.
653
+ enum EncodingType {
654
+ Uncompressed = 0x00 ,
655
+ }
656
+
573
657
/// Used to put an error message in a LightningError
574
658
#[ derive( Clone ) ]
575
659
pub enum ErrorAction {
@@ -1515,6 +1599,184 @@ impl_writeable_len_match!(NodeAnnouncement, {
1515
1599
contents
1516
1600
} ) ;
1517
1601
1602
+ impl Readable for QueryShortChannelIds {
1603
+ fn read < R : Read > ( r : & mut R ) -> Result < Self , DecodeError > {
1604
+ let chain_hash: BlockHash = Readable :: read ( r) ?;
1605
+
1606
+ // We expect the encoding_len to always includes the 1-byte
1607
+ // encoding_type and that short_channel_ids are 8-bytes each
1608
+ let encoding_len: u16 = Readable :: read ( r) ?;
1609
+ if encoding_len == 0 || ( encoding_len - 1 ) % 8 != 0 {
1610
+ return Err ( DecodeError :: InvalidValue ) ;
1611
+ }
1612
+
1613
+ // Must be encoding_type=0 uncompressed serialization. We do not
1614
+ // support encoding_type=1 zlib serialization.
1615
+ let encoding_type: u8 = Readable :: read ( r) ?;
1616
+ if encoding_type != EncodingType :: Uncompressed as u8 {
1617
+ return Err ( DecodeError :: InvalidValue ) ;
1618
+ }
1619
+
1620
+ // Read short_channel_ids (8-bytes each), for the u16 encoding_len
1621
+ // less the 1-byte encoding_type
1622
+ let short_channel_id_count: u16 = ( encoding_len - 1 ) /8 ;
1623
+ let mut short_channel_ids = Vec :: with_capacity ( short_channel_id_count as usize ) ;
1624
+ for _ in 0 ..short_channel_id_count {
1625
+ short_channel_ids. push ( Readable :: read ( r) ?) ;
1626
+ }
1627
+
1628
+ Ok ( QueryShortChannelIds {
1629
+ chain_hash,
1630
+ short_channel_ids,
1631
+ } )
1632
+ }
1633
+ }
1634
+
1635
+ impl Writeable for QueryShortChannelIds {
1636
+ fn write < W : Writer > ( & self , w : & mut W ) -> Result < ( ) , :: std:: io:: Error > {
1637
+ // Calculated from 1-byte encoding_type plus 8-bytes per short_channel_id
1638
+ let encoding_len: u16 = 1 + self . short_channel_ids . len ( ) as u16 * 8 ;
1639
+
1640
+ w. size_hint ( 32 + 2 + encoding_len as usize ) ;
1641
+ self . chain_hash . write ( w) ?;
1642
+ encoding_len. write ( w) ?;
1643
+
1644
+ // We only support type=0 uncompressed serialization
1645
+ ( EncodingType :: Uncompressed as u8 ) . write ( w) ?;
1646
+
1647
+ for scid in self . short_channel_ids . iter ( ) {
1648
+ scid. write ( w) ?;
1649
+ }
1650
+
1651
+ Ok ( ( ) )
1652
+ }
1653
+ }
1654
+
1655
+ impl Readable for ReplyShortChannelIdsEnd {
1656
+ fn read < R : Read > ( r : & mut R ) -> Result < Self , DecodeError > {
1657
+ let chain_hash: BlockHash = Readable :: read ( r) ?;
1658
+ let full_information: bool = Readable :: read ( r) ?;
1659
+ Ok ( ReplyShortChannelIdsEnd {
1660
+ chain_hash,
1661
+ full_information,
1662
+ } )
1663
+ }
1664
+ }
1665
+
1666
+ impl Writeable for ReplyShortChannelIdsEnd {
1667
+ fn write < W : Writer > ( & self , w : & mut W ) -> Result < ( ) , :: std:: io:: Error > {
1668
+ w. size_hint ( 32 + 1 ) ;
1669
+ self . chain_hash . write ( w) ?;
1670
+ self . full_information . write ( w) ?;
1671
+ Ok ( ( ) )
1672
+ }
1673
+ }
1674
+
1675
+ impl Readable for QueryChannelRange {
1676
+ fn read < R : Read > ( r : & mut R ) -> Result < Self , DecodeError > {
1677
+ let chain_hash: BlockHash = Readable :: read ( r) ?;
1678
+ let first_blocknum: u32 = Readable :: read ( r) ?;
1679
+ let number_of_blocks: u32 = Readable :: read ( r) ?;
1680
+ Ok ( QueryChannelRange {
1681
+ chain_hash,
1682
+ first_blocknum,
1683
+ number_of_blocks
1684
+ } )
1685
+ }
1686
+ }
1687
+
1688
+ impl Writeable for QueryChannelRange {
1689
+ fn write < W : Writer > ( & self , w : & mut W ) -> Result < ( ) , :: std:: io:: Error > {
1690
+ w. size_hint ( 32 + 4 + 4 ) ;
1691
+ self . chain_hash . write ( w) ?;
1692
+ self . first_blocknum . write ( w) ?;
1693
+ self . number_of_blocks . write ( w) ?;
1694
+ Ok ( ( ) )
1695
+ }
1696
+ }
1697
+
1698
+ impl Readable for ReplyChannelRange {
1699
+ fn read < R : Read > ( r : & mut R ) -> Result < Self , DecodeError > {
1700
+ let chain_hash: BlockHash = Readable :: read ( r) ?;
1701
+ let first_blocknum: u32 = Readable :: read ( r) ?;
1702
+ let number_of_blocks: u32 = Readable :: read ( r) ?;
1703
+ let full_information: bool = Readable :: read ( r) ?;
1704
+
1705
+ // We expect the encoding_len to always includes the 1-byte
1706
+ // encoding_type and that short_channel_ids are 8-bytes each
1707
+ let encoding_len: u16 = Readable :: read ( r) ?;
1708
+ if encoding_len == 0 || ( encoding_len - 1 ) % 8 != 0 {
1709
+ return Err ( DecodeError :: InvalidValue ) ;
1710
+ }
1711
+
1712
+ // Must be encoding_type=0 uncompressed serialization. We do not
1713
+ // support encoding_type=1 zlib serialization.
1714
+ let encoding_type: u8 = Readable :: read ( r) ?;
1715
+ if encoding_type != EncodingType :: Uncompressed as u8 {
1716
+ return Err ( DecodeError :: InvalidValue ) ;
1717
+ }
1718
+
1719
+ // Read short_channel_ids (8-bytes each), for the u16 encoding_len
1720
+ // less the 1-byte encoding_type
1721
+ let short_channel_id_count: u16 = ( encoding_len - 1 ) /8 ;
1722
+ let mut short_channel_ids = Vec :: with_capacity ( short_channel_id_count as usize ) ;
1723
+ for _ in 0 ..short_channel_id_count {
1724
+ short_channel_ids. push ( Readable :: read ( r) ?) ;
1725
+ }
1726
+
1727
+ Ok ( ReplyChannelRange {
1728
+ chain_hash,
1729
+ first_blocknum,
1730
+ number_of_blocks,
1731
+ full_information,
1732
+ short_channel_ids
1733
+ } )
1734
+ }
1735
+ }
1736
+
1737
+ impl Writeable for ReplyChannelRange {
1738
+ fn write < W : Writer > ( & self , w : & mut W ) -> Result < ( ) , :: std:: io:: Error > {
1739
+ let encoding_len: u16 = 1 + self . short_channel_ids . len ( ) as u16 * 8 ;
1740
+ w. size_hint ( 32 + 4 + 4 + 1 + 2 + encoding_len as usize ) ;
1741
+ self . chain_hash . write ( w) ?;
1742
+ self . first_blocknum . write ( w) ?;
1743
+ self . number_of_blocks . write ( w) ?;
1744
+ self . full_information . write ( w) ?;
1745
+
1746
+ encoding_len. write ( w) ?;
1747
+ ( EncodingType :: Uncompressed as u8 ) . write ( w) ?;
1748
+ for scid in self . short_channel_ids . iter ( ) {
1749
+ scid. write ( w) ?;
1750
+ }
1751
+
1752
+ Ok ( ( ) )
1753
+ }
1754
+ }
1755
+
1756
+ impl Readable for GossipTimestampFilter {
1757
+ fn read < R : Read > ( r : & mut R ) -> Result < Self , DecodeError > {
1758
+ let chain_hash: BlockHash = Readable :: read ( r) ?;
1759
+ let first_timestamp: u32 = Readable :: read ( r) ?;
1760
+ let timestamp_range: u32 = Readable :: read ( r) ?;
1761
+ Ok ( GossipTimestampFilter {
1762
+ chain_hash,
1763
+ first_timestamp,
1764
+ timestamp_range,
1765
+ } )
1766
+ }
1767
+ }
1768
+
1769
+ impl Writeable for GossipTimestampFilter {
1770
+ fn write < W : Writer > ( & self , w : & mut W ) -> Result < ( ) , :: std:: io:: Error > {
1771
+ w. size_hint ( 32 + 4 + 4 ) ;
1772
+ self . chain_hash . write ( w) ?;
1773
+ self . first_timestamp . write ( w) ?;
1774
+ self . timestamp_range . write ( w) ?;
1775
+ Ok ( ( ) )
1776
+ }
1777
+ }
1778
+
1779
+
1518
1780
#[ cfg( test) ]
1519
1781
mod tests {
1520
1782
use hex;
@@ -2246,4 +2508,122 @@ mod tests {
2246
2508
assert_eq ! ( msg. amt_to_forward, 0x0badf00d01020304 ) ;
2247
2509
assert_eq ! ( msg. outgoing_cltv_value, 0xffffffff ) ;
2248
2510
}
2511
+
2512
+ #[ test]
2513
+ fn encoding_query_channel_range ( ) {
2514
+ let mut query_channel_range = msgs:: QueryChannelRange {
2515
+ chain_hash : BlockHash :: from_hex ( "06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f" ) . unwrap ( ) ,
2516
+ first_blocknum : 100000 ,
2517
+ number_of_blocks : 1500 ,
2518
+ } ;
2519
+ let encoded_value = query_channel_range. encode ( ) ;
2520
+ let target_value = hex:: decode ( "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206000186a0000005dc" ) . unwrap ( ) ;
2521
+ assert_eq ! ( encoded_value, target_value) ;
2522
+
2523
+ query_channel_range = Readable :: read ( & mut Cursor :: new ( & target_value[ ..] ) ) . unwrap ( ) ;
2524
+ assert_eq ! ( query_channel_range. first_blocknum, 100000 ) ;
2525
+ assert_eq ! ( query_channel_range. number_of_blocks, 1500 ) ;
2526
+ }
2527
+
2528
+ #[ test]
2529
+ fn encoding_reply_channel_range ( ) {
2530
+ do_encoding_reply_channel_range ( 0 ) ;
2531
+ do_encoding_reply_channel_range ( 1 ) ;
2532
+ }
2533
+
2534
+ fn do_encoding_reply_channel_range ( encoding_type : u8 ) {
2535
+ let mut target_value = hex:: decode ( "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206000b8a06000005dc01" ) . unwrap ( ) ;
2536
+ let expected_chain_hash = BlockHash :: from_hex ( "06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f" ) . unwrap ( ) ;
2537
+ let mut reply_channel_range = msgs:: ReplyChannelRange {
2538
+ chain_hash : expected_chain_hash,
2539
+ first_blocknum : 756230 ,
2540
+ number_of_blocks : 1500 ,
2541
+ full_information : true ,
2542
+ short_channel_ids : vec ! [ 0x000000000000008e , 0x0000000000003c69 , 0x000000000045a6c4 ] ,
2543
+ } ;
2544
+
2545
+ if encoding_type == 0 {
2546
+ target_value. append ( & mut hex:: decode ( "001900000000000000008e0000000000003c69000000000045a6c4" ) . unwrap ( ) ) ;
2547
+ let encoded_value = reply_channel_range. encode ( ) ;
2548
+ assert_eq ! ( encoded_value, target_value) ;
2549
+
2550
+ reply_channel_range = Readable :: read ( & mut Cursor :: new ( & target_value[ ..] ) ) . unwrap ( ) ;
2551
+ assert_eq ! ( reply_channel_range. chain_hash, expected_chain_hash) ;
2552
+ assert_eq ! ( reply_channel_range. first_blocknum, 756230 ) ;
2553
+ assert_eq ! ( reply_channel_range. number_of_blocks, 1500 ) ;
2554
+ assert_eq ! ( reply_channel_range. full_information, true ) ;
2555
+ assert_eq ! ( reply_channel_range. short_channel_ids[ 0 ] , 0x000000000000008e ) ;
2556
+ assert_eq ! ( reply_channel_range. short_channel_ids[ 1 ] , 0x0000000000003c69 ) ;
2557
+ assert_eq ! ( reply_channel_range. short_channel_ids[ 2 ] , 0x000000000045a6c4 ) ;
2558
+ } else {
2559
+ target_value. append ( & mut hex:: decode ( "001601789c636000833e08659309a65878be010010a9023a" ) . unwrap ( ) ) ;
2560
+ let result: Result < msgs:: ReplyChannelRange , msgs:: DecodeError > = Readable :: read ( & mut Cursor :: new ( & target_value[ ..] ) ) ;
2561
+ assert ! ( result. is_err( ) , "Expected decode failure with unsupported zlib encoding" ) ;
2562
+ }
2563
+ }
2564
+
2565
+ #[ test]
2566
+ fn encoding_query_short_channel_ids ( ) {
2567
+ do_encoding_query_short_channel_ids ( 0 ) ;
2568
+ do_encoding_query_short_channel_ids ( 1 ) ;
2569
+ }
2570
+
2571
+ fn do_encoding_query_short_channel_ids ( encoding_type : u8 ) {
2572
+ let mut target_value = hex:: decode ( "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206" ) . unwrap ( ) ;
2573
+ let expected_chain_hash = BlockHash :: from_hex ( "06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f" ) . unwrap ( ) ;
2574
+ let mut query_short_channel_ids = msgs:: QueryShortChannelIds {
2575
+ chain_hash : expected_chain_hash,
2576
+ short_channel_ids : vec ! [ 0x0000000000008e , 0x0000000000003c69 , 0x000000000045a6c4 ] ,
2577
+ } ;
2578
+
2579
+ if encoding_type == 0 {
2580
+ target_value. append ( & mut hex:: decode ( "001900000000000000008e0000000000003c69000000000045a6c4" ) . unwrap ( ) ) ;
2581
+ let encoded_value = query_short_channel_ids. encode ( ) ;
2582
+ assert_eq ! ( encoded_value, target_value) ;
2583
+
2584
+ query_short_channel_ids = Readable :: read ( & mut Cursor :: new ( & target_value[ ..] ) ) . unwrap ( ) ;
2585
+ assert_eq ! ( query_short_channel_ids. chain_hash, expected_chain_hash) ;
2586
+ assert_eq ! ( query_short_channel_ids. short_channel_ids[ 0 ] , 0x000000000000008e ) ;
2587
+ assert_eq ! ( query_short_channel_ids. short_channel_ids[ 1 ] , 0x0000000000003c69 ) ;
2588
+ assert_eq ! ( query_short_channel_ids. short_channel_ids[ 2 ] , 0x000000000045a6c4 ) ;
2589
+ } else {
2590
+ target_value. append ( & mut hex:: decode ( "001601789c636000833e08659309a65878be010010a9023a" ) . unwrap ( ) ) ;
2591
+ let result: Result < msgs:: QueryShortChannelIds , msgs:: DecodeError > = Readable :: read ( & mut Cursor :: new ( & target_value[ ..] ) ) ;
2592
+ assert ! ( result. is_err( ) , "Expected decode failure with unsupported zlib encoding" ) ;
2593
+ }
2594
+ }
2595
+
2596
+ #[ test]
2597
+ fn encoding_reply_short_channel_ids_end ( ) {
2598
+ let expected_chain_hash = BlockHash :: from_hex ( "06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f" ) . unwrap ( ) ;
2599
+ let mut reply_short_channel_ids_end = msgs:: ReplyShortChannelIdsEnd {
2600
+ chain_hash : expected_chain_hash,
2601
+ full_information : true ,
2602
+ } ;
2603
+ let encoded_value = reply_short_channel_ids_end. encode ( ) ;
2604
+ let target_value = hex:: decode ( "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e220601" ) . unwrap ( ) ;
2605
+ assert_eq ! ( encoded_value, target_value) ;
2606
+
2607
+ reply_short_channel_ids_end = Readable :: read ( & mut Cursor :: new ( & target_value[ ..] ) ) . unwrap ( ) ;
2608
+ assert_eq ! ( reply_short_channel_ids_end. chain_hash, expected_chain_hash) ;
2609
+ assert_eq ! ( reply_short_channel_ids_end. full_information, true ) ;
2610
+ }
2611
+
2612
+ #[ test]
2613
+ fn encoding_gossip_timestamp_filter ( ) {
2614
+ let expected_chain_hash = BlockHash :: from_hex ( "06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f" ) . unwrap ( ) ;
2615
+ let mut gossip_timestamp_filter = msgs:: GossipTimestampFilter {
2616
+ chain_hash : expected_chain_hash,
2617
+ first_timestamp : 1590000000 ,
2618
+ timestamp_range : 0xffff_ffff ,
2619
+ } ;
2620
+ let encoded_value = gossip_timestamp_filter. encode ( ) ;
2621
+ let target_value = hex:: decode ( "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e22065ec57980ffffffff" ) . unwrap ( ) ;
2622
+ assert_eq ! ( encoded_value, target_value) ;
2623
+
2624
+ gossip_timestamp_filter = Readable :: read ( & mut Cursor :: new ( & target_value[ ..] ) ) . unwrap ( ) ;
2625
+ assert_eq ! ( gossip_timestamp_filter. chain_hash, expected_chain_hash) ;
2626
+ assert_eq ! ( gossip_timestamp_filter. first_timestamp, 1590000000 ) ;
2627
+ assert_eq ! ( gossip_timestamp_filter. timestamp_range, 0xffff_ffff ) ;
2628
+ }
2249
2629
}
0 commit comments