@@ -6,6 +6,7 @@ use std::io::{Read, Write};
6
6
use std:: collections:: HashMap ;
7
7
use std:: hash:: Hash ;
8
8
use std:: sync:: Mutex ;
9
+ use std:: cmp;
9
10
10
11
use secp256k1:: Signature ;
11
12
use secp256k1:: key:: { PublicKey , SecretKey } ;
@@ -67,6 +68,85 @@ impl Writer for VecWriter {
67
68
}
68
69
}
69
70
71
+ /// Writer that only tracks the amount of data written - useful if you need to calculate the length
72
+ /// of some data when serialized but don't yet need the full data.
73
+ pub ( crate ) struct LengthCalculatingWriter ( pub usize ) ;
74
+ impl Writer for LengthCalculatingWriter {
75
+ #[ inline]
76
+ fn write_all ( & mut self , buf : & [ u8 ] ) -> Result < ( ) , :: std:: io:: Error > {
77
+ self . 0 += buf. len ( ) ;
78
+ Ok ( ( ) )
79
+ }
80
+ #[ inline]
81
+ fn size_hint ( & mut self , _size : usize ) { }
82
+ }
83
+
84
+ /// Essentially std::io::Take but a bit simpler and with a method to walk the underlying stream
85
+ /// forward to ensure we always consume exactly the fixed length specified.
86
+ pub ( crate ) struct FixedLengthReader < R : Read > {
87
+ read : R ,
88
+ bytes_read : u64 ,
89
+ total_bytes : u64 ,
90
+ }
91
+ impl < R : Read > FixedLengthReader < R > {
92
+ pub fn new ( read : R , total_bytes : u64 ) -> Self {
93
+ Self { read, bytes_read : 0 , total_bytes }
94
+ }
95
+
96
+ pub fn bytes_remain ( & mut self ) -> bool {
97
+ self . bytes_read != self . total_bytes
98
+ }
99
+
100
+ pub fn eat_remaining ( & mut self ) -> Result < ( ) , DecodeError > {
101
+ :: std:: io:: copy ( self , & mut :: std:: io:: sink ( ) ) . unwrap ( ) ;
102
+ if self . bytes_read != self . total_bytes {
103
+ Err ( DecodeError :: ShortRead )
104
+ } else {
105
+ Ok ( ( ) )
106
+ }
107
+ }
108
+ }
109
+ impl < R : Read > Read for FixedLengthReader < R > {
110
+ fn read ( & mut self , dest : & mut [ u8 ] ) -> Result < usize , :: std:: io:: Error > {
111
+ if self . total_bytes == self . bytes_read {
112
+ Ok ( 0 )
113
+ } else {
114
+ let read_len = cmp:: min ( dest. len ( ) as u64 , self . total_bytes - self . bytes_read ) ;
115
+ match self . read . read ( & mut dest[ 0 ..( read_len as usize ) ] ) {
116
+ Ok ( v) => {
117
+ self . bytes_read += v as u64 ;
118
+ Ok ( v)
119
+ } ,
120
+ Err ( e) => Err ( e) ,
121
+ }
122
+ }
123
+ }
124
+ }
125
+
126
+ /// A Read which tracks whether any bytes have been read at all. This allows us to distinguish
127
+ /// between "EOF reached before we started" and "EOF reached mid-read".
128
+ pub ( crate ) struct ReadTrackingReader < R : Read > {
129
+ read : R ,
130
+ pub have_read : bool ,
131
+ }
132
+ impl < R : Read > ReadTrackingReader < R > {
133
+ pub fn new ( read : R ) -> Self {
134
+ Self { read, have_read : false }
135
+ }
136
+ }
137
+ impl < R : Read > Read for ReadTrackingReader < R > {
138
+ fn read ( & mut self , dest : & mut [ u8 ] ) -> Result < usize , :: std:: io:: Error > {
139
+ match self . read . read ( dest) {
140
+ Ok ( 0 ) => Ok ( 0 ) ,
141
+ Ok ( len) => {
142
+ self . have_read = true ;
143
+ Ok ( len)
144
+ } ,
145
+ Err ( e) => Err ( e) ,
146
+ }
147
+ }
148
+ }
149
+
70
150
/// A trait that various rust-lightning types implement allowing them to be written out to a Writer
71
151
pub trait Writeable {
72
152
/// Writes self out to the given Writer
@@ -125,6 +205,76 @@ impl<R: Read> Readable<R> for U48 {
125
205
}
126
206
}
127
207
208
+ /// Lightning TLV uses a custom variable-length integer called BigSize. It is similar to Bitcoin's
209
+ /// variable-length integers except that it is serialized in big-endian instead of little-endian.
210
+ ///
211
+ /// Like Bitcoin's variable-length integer, it exhibits ambiguity in that certain values can be
212
+ /// encoded in several different ways, which we must check for at deserialization-time. Thus, if
213
+ /// you're looking for an example of a variable-length integer to use for your own project, move
214
+ /// along, this is a rather poor design.
215
+ pub ( crate ) struct BigSize ( pub u64 ) ;
216
+ impl Writeable for BigSize {
217
+ #[ inline]
218
+ fn write < W : Writer > ( & self , writer : & mut W ) -> Result < ( ) , :: std:: io:: Error > {
219
+ match self . 0 {
220
+ 0 ...0xFC => {
221
+ ( self . 0 as u8 ) . write ( writer)
222
+ } ,
223
+ 0xFD ...0xFFFF => {
224
+ 0xFDu8 . write ( writer) ?;
225
+ ( self . 0 as u16 ) . write ( writer)
226
+ } ,
227
+ 0x10000 ...0xFFFFFFFF => {
228
+ 0xFEu8 . write ( writer) ?;
229
+ ( self . 0 as u32 ) . write ( writer)
230
+ } ,
231
+ _ => {
232
+ 0xFFu8 . write ( writer) ?;
233
+ ( self . 0 as u64 ) . write ( writer)
234
+ } ,
235
+ }
236
+ }
237
+ }
238
+ impl < R : Read > Readable < R > for BigSize {
239
+ #[ inline]
240
+ fn read ( reader : & mut R ) -> Result < BigSize , DecodeError > {
241
+ let n: u8 = Readable :: read ( reader) ?;
242
+ match n {
243
+ 0xFF => {
244
+ let x: u64 = Readable :: read ( reader) ?;
245
+ if x < 0x100000000 {
246
+ Err ( DecodeError :: InvalidValue )
247
+ } else {
248
+ Ok ( BigSize ( x) )
249
+ }
250
+ }
251
+ 0xFE => {
252
+ let x: u32 = Readable :: read ( reader) ?;
253
+ if x < 0x10000 {
254
+ Err ( DecodeError :: InvalidValue )
255
+ } else {
256
+ Ok ( BigSize ( x as u64 ) )
257
+ }
258
+ }
259
+ 0xFD => {
260
+ let x: u16 = Readable :: read ( reader) ?;
261
+ if x < 0xFD {
262
+ Err ( DecodeError :: InvalidValue )
263
+ } else {
264
+ Ok ( BigSize ( x as u64 ) )
265
+ }
266
+ }
267
+ n => Ok ( BigSize ( n as u64 ) )
268
+ }
269
+ }
270
+ }
271
+
272
+ /// In TLV we occasionally send fields which only consist of, or potentially end with, a
273
+ /// variabe-length integer which is simply truncated by skipping high zero bytes. This type
274
+ /// encapsulates such integers implementing Readable/Writeable for them.
275
+ #[ cfg_attr( test, derive( PartialEq , Debug ) ) ]
276
+ pub ( crate ) struct HighZeroBytesDroppedVarInt < T > ( pub T ) ;
277
+
128
278
macro_rules! impl_writeable_primitive {
129
279
( $val_type: ty, $meth_write: ident, $len: expr, $meth_read: ident) => {
130
280
impl Writeable for $val_type {
@@ -133,6 +283,13 @@ macro_rules! impl_writeable_primitive {
133
283
writer. write_all( & $meth_write( * self ) )
134
284
}
135
285
}
286
+ impl Writeable for HighZeroBytesDroppedVarInt <$val_type> {
287
+ #[ inline]
288
+ fn write<W : Writer >( & self , writer: & mut W ) -> Result <( ) , :: std:: io:: Error > {
289
+ // Skip any full leading 0 bytes when writing (in BE):
290
+ writer. write_all( & $meth_write( self . 0 ) [ ( self . 0 . leading_zeros( ) /8 ) as usize ..$len] )
291
+ }
292
+ }
136
293
impl <R : Read > Readable <R > for $val_type {
137
294
#[ inline]
138
295
fn read( reader: & mut R ) -> Result <$val_type, DecodeError > {
@@ -141,6 +298,30 @@ macro_rules! impl_writeable_primitive {
141
298
Ok ( $meth_read( & buf) )
142
299
}
143
300
}
301
+ impl <R : Read > Readable <R > for HighZeroBytesDroppedVarInt <$val_type> {
302
+ #[ inline]
303
+ fn read( reader: & mut R ) -> Result <HighZeroBytesDroppedVarInt <$val_type>, DecodeError > {
304
+ // We need to accept short reads (read_len == 0) as "EOF" and handle them as simply
305
+ // the high bytes being dropped. To do so, we start reading in the middle of buf
306
+ // and then convert the appropriate number of bytes with extra high bytes out of
307
+ // buf.
308
+ let mut buf = [ 0 ; $len* 2 ] ;
309
+ let mut read_len = reader. read( & mut buf[ $len..] ) ?;
310
+ let mut total_read_len = read_len;
311
+ while read_len != 0 && total_read_len != $len {
312
+ read_len = reader. read( & mut buf[ ( $len + read_len) ..] ) ?;
313
+ total_read_len += read_len;
314
+ }
315
+ if total_read_len == 0 || buf[ $len] != 0 {
316
+ let first_byte = $len - ( $len - total_read_len) ;
317
+ Ok ( HighZeroBytesDroppedVarInt ( $meth_read( & buf[ first_byte..first_byte + $len] ) ) )
318
+ } else {
319
+ // If the encoding had extra zero bytes, return a failure even though we know
320
+ // what they meant (as the TLV test vectors require this)
321
+ Err ( DecodeError :: InvalidValue )
322
+ }
323
+ }
324
+ }
144
325
}
145
326
}
146
327
0 commit comments