@@ -15,11 +15,13 @@ use bitcoin::blockdata::locktime::absolute::LockTime;
15
15
use bitcoin:: transaction:: Version ;
16
16
17
17
use crate :: chain:: channelmonitor:: LATENCY_GRACE_PERIOD_BLOCKS ;
18
+ use crate :: chain:: ChannelMonitorUpdateStatus ;
18
19
use crate :: events:: bump_transaction:: WalletSource ;
19
- use crate :: events:: { Event , MessageSendEvent , MessageSendEventsProvider , ClosureReason } ;
20
- use crate :: ln:: functional_test_utils:: * ;
20
+ use crate :: events:: { ClosureReason , Event , MessageSendEvent , MessageSendEventsProvider , PaymentPurpose } ;
21
+ use crate :: ln:: { functional_test_utils:: * , msgs } ;
21
22
use crate :: ln:: msgs:: ChannelMessageHandler ;
22
- use crate :: ln:: channelmanager:: { PaymentId , RecipientOnionFields } ;
23
+ use crate :: ln:: channelmanager:: { PaymentId , PaymentSendFailure , RAACommitmentOrder , RecipientOnionFields } ;
24
+ use crate :: util:: errors:: APIError ;
23
25
use crate :: util:: test_channel_signer:: SignerOp ;
24
26
25
27
#[ test]
@@ -329,6 +331,222 @@ fn test_async_commitment_signature_for_peer_disconnect() {
329
331
}
330
332
}
331
333
334
+ #[ test]
335
+ fn test_async_commitment_signature_ordering_reestablish ( ) {
336
+ do_test_async_commitment_signature_ordering ( false ) ;
337
+ }
338
+
339
+ #[ test]
340
+ fn test_async_commitment_signature_ordering_monitor_restored ( ) {
341
+ do_test_async_commitment_signature_ordering ( true ) ;
342
+ }
343
+
344
+ fn do_test_async_commitment_signature_ordering ( monitor_update_failure : bool ) {
345
+ // Across disconnects we may end up in a situation where we need to send a
346
+ // commitment_signed and then revoke_and_ack. We need to make sure that if
347
+ // the signer is pending for commitment_signed but not revoke_and_ack, we don't
348
+ // screw up the order by sending the revoke_and_ack first.
349
+ //
350
+ // We test this for both the case where we send messages after a channel
351
+ // reestablish, as well as restoring a channel after persisting
352
+ // a monitor update.
353
+ //
354
+ // The set up for this test is based on
355
+ // `test_drop_messages_peer_disconnect_dual_htlc`.
356
+ let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
357
+ let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs) ;
358
+ let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs, & [ None , None ] ) ;
359
+ let mut nodes = create_network ( 2 , & node_cfgs, & node_chanmgrs) ;
360
+ let ( _, _, chan_id, _) = create_announced_chan_between_nodes ( & nodes, 0 , 1 ) ;
361
+
362
+ let ( payment_preimage_1, payment_hash_1, ..) = route_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , 1_000_000 ) ;
363
+
364
+ // Start to send the second update_add_htlc + commitment_signed, but don't actually make it
365
+ // to the peer.
366
+ let ( route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash ! ( nodes[ 0 ] , nodes[ 1 ] , 1000000 ) ;
367
+ nodes[ 0 ] . node . send_payment_with_route ( & route, payment_hash_2,
368
+ RecipientOnionFields :: secret_only ( payment_secret_2) , PaymentId ( payment_hash_2. 0 ) ) . unwrap ( ) ;
369
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
370
+
371
+ let events_1 = nodes[ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
372
+ assert_eq ! ( events_1. len( ) , 1 ) ;
373
+ match events_1[ 0 ] {
374
+ MessageSendEvent :: UpdateHTLCs { .. } => { } ,
375
+ _ => panic ! ( "Unexpected event" ) ,
376
+ }
377
+
378
+ // Send back update_fulfill_htlc + commitment_signed for the first payment.
379
+ nodes[ 1 ] . node . claim_funds ( payment_preimage_1) ;
380
+ expect_payment_claimed ! ( nodes[ 1 ] , payment_hash_1, 1_000_000 ) ;
381
+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
382
+
383
+ // Handle the update_fulfill_htlc, but fail to persist the monitor update when handling the
384
+ // commitment_signed.
385
+ let events_2 = nodes[ 1 ] . node . get_and_clear_pending_msg_events ( ) ;
386
+ assert_eq ! ( events_2. len( ) , 1 ) ;
387
+ match events_2[ 0 ] {
388
+ MessageSendEvent :: UpdateHTLCs { ref node_id, updates : msgs:: CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
389
+ assert_eq ! ( * node_id, nodes[ 0 ] . node. get_our_node_id( ) ) ;
390
+ assert ! ( update_add_htlcs. is_empty( ) ) ;
391
+ assert_eq ! ( update_fulfill_htlcs. len( ) , 1 ) ;
392
+ assert ! ( update_fail_htlcs. is_empty( ) ) ;
393
+ assert ! ( update_fail_malformed_htlcs. is_empty( ) ) ;
394
+ assert ! ( update_fee. is_none( ) ) ;
395
+
396
+ nodes[ 0 ] . node . handle_update_fulfill_htlc ( & nodes[ 1 ] . node . get_our_node_id ( ) , & update_fulfill_htlcs[ 0 ] ) ;
397
+ let events_3 = nodes[ 0 ] . node . get_and_clear_pending_events ( ) ;
398
+ assert_eq ! ( events_3. len( ) , 1 ) ;
399
+ match events_3[ 0 ] {
400
+ Event :: PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
401
+ assert_eq ! ( * payment_preimage, payment_preimage_1) ;
402
+ assert_eq ! ( * payment_hash, payment_hash_1) ;
403
+ } ,
404
+ _ => panic ! ( "Unexpected event" ) ,
405
+ }
406
+
407
+ if monitor_update_failure {
408
+ chanmon_cfgs[ 0 ] . persister . set_update_ret ( ChannelMonitorUpdateStatus :: InProgress ) ;
409
+ }
410
+ nodes[ 0 ] . node . handle_commitment_signed ( & nodes[ 1 ] . node . get_our_node_id ( ) , commitment_signed) ;
411
+ if monitor_update_failure {
412
+ assert ! ( nodes[ 0 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
413
+ } else {
414
+ let _ = get_event_msg ! ( nodes[ 0 ] , MessageSendEvent :: SendRevokeAndACK , nodes[ 1 ] . node. get_our_node_id( ) ) ;
415
+ }
416
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
417
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
418
+ } ,
419
+ _ => panic ! ( "Unexpected event" ) ,
420
+ }
421
+
422
+ // Disconnect and reconnect the peers so that nodes[0] will
423
+ // need to re-send the commitment update *and then* revoke_and_ack.
424
+ nodes[ 0 ] . node . peer_disconnected ( & nodes[ 1 ] . node . get_our_node_id ( ) ) ;
425
+ nodes[ 1 ] . node . peer_disconnected ( & nodes[ 0 ] . node . get_our_node_id ( ) ) ;
426
+
427
+ nodes[ 0 ] . node . peer_connected ( & nodes[ 1 ] . node . get_our_node_id ( ) , & msgs:: Init {
428
+ features : nodes[ 1 ] . node . init_features ( ) , networks : None , remote_network_address : None
429
+ } , true ) . unwrap ( ) ;
430
+ let reestablish_1 = get_chan_reestablish_msgs ! ( nodes[ 0 ] , nodes[ 1 ] ) ;
431
+ assert_eq ! ( reestablish_1. len( ) , 1 ) ;
432
+ nodes[ 1 ] . node . peer_connected ( & nodes[ 0 ] . node . get_our_node_id ( ) , & msgs:: Init {
433
+ features : nodes[ 0 ] . node . init_features ( ) , networks : None , remote_network_address : None
434
+ } , false ) . unwrap ( ) ;
435
+ let reestablish_2 = get_chan_reestablish_msgs ! ( nodes[ 1 ] , nodes[ 0 ] ) ;
436
+ assert_eq ! ( reestablish_2. len( ) , 1 ) ;
437
+
438
+ // With a fully working signer, here we would send a commitment_signed,
439
+ // and then revoke_and_ack. With commitment_signed disabled, since
440
+ // our ordering is CS then RAA, we should make sure we don't send the RAA.
441
+ nodes[ 0 ] . disable_channel_signer_op ( & nodes[ 1 ] . node . get_our_node_id ( ) , & chan_id, SignerOp :: SignCounterpartyCommitment ) ;
442
+ nodes[ 0 ] . node . handle_channel_reestablish ( & nodes[ 1 ] . node . get_our_node_id ( ) , & reestablish_2[ 0 ] ) ;
443
+ let as_resp = handle_chan_reestablish_msgs ! ( nodes[ 0 ] , nodes[ 1 ] ) ;
444
+ assert ! ( as_resp. 0 . is_none( ) ) ;
445
+ assert ! ( as_resp. 1 . is_none( ) ) ;
446
+ assert ! ( as_resp. 2 . is_none( ) ) ;
447
+
448
+ if monitor_update_failure {
449
+ chanmon_cfgs[ 0 ] . persister . set_update_ret ( ChannelMonitorUpdateStatus :: Completed ) ;
450
+ let ( outpoint, latest_update, _) = nodes[ 0 ] . chain_monitor . latest_monitor_update_id . lock ( ) . unwrap ( ) . get ( & chan_id) . unwrap ( ) . clone ( ) ;
451
+ nodes[ 0 ] . chain_monitor . chain_monitor . force_channel_monitor_updated ( outpoint, latest_update) ;
452
+ check_added_monitors ! ( nodes[ 0 ] , 0 ) ;
453
+ }
454
+
455
+ // Make sure that on signer_unblocked we have the same behavior (even though RAA is ready,
456
+ // we don't send CS yet).
457
+ nodes[ 0 ] . node . signer_unblocked ( Some ( ( nodes[ 1 ] . node . get_our_node_id ( ) , chan_id) ) ) ;
458
+ let as_resp = handle_chan_reestablish_msgs ! ( nodes[ 0 ] , nodes[ 1 ] ) ;
459
+ assert ! ( as_resp. 0 . is_none( ) ) ;
460
+ assert ! ( as_resp. 1 . is_none( ) ) ;
461
+ assert ! ( as_resp. 2 . is_none( ) ) ;
462
+
463
+ nodes[ 0 ] . enable_channel_signer_op ( & nodes[ 1 ] . node . get_our_node_id ( ) , & chan_id, SignerOp :: SignCounterpartyCommitment ) ;
464
+ nodes[ 0 ] . node . signer_unblocked ( Some ( ( nodes[ 1 ] . node . get_our_node_id ( ) , chan_id) ) ) ;
465
+
466
+ let as_resp = handle_chan_reestablish_msgs ! ( nodes[ 0 ] , nodes[ 1 ] ) ;
467
+ nodes[ 1 ] . node . handle_channel_reestablish ( & nodes[ 0 ] . node . get_our_node_id ( ) , & reestablish_1[ 0 ] ) ;
468
+ let bs_resp = handle_chan_reestablish_msgs ! ( nodes[ 1 ] , nodes[ 0 ] ) ;
469
+
470
+ assert ! ( as_resp. 0 . is_none( ) ) ;
471
+ assert ! ( bs_resp. 0 . is_none( ) ) ;
472
+
473
+ assert ! ( bs_resp. 1 . is_none( ) ) ;
474
+ assert ! ( bs_resp. 2 . is_none( ) ) ;
475
+
476
+ assert ! ( as_resp. 3 == RAACommitmentOrder :: CommitmentFirst ) ;
477
+
478
+ // Now that everything is restored, get the CS + RAA and handle them.
479
+ assert_eq ! ( as_resp. 2 . as_ref( ) . unwrap( ) . update_add_htlcs. len( ) , 1 ) ;
480
+ assert ! ( as_resp. 2 . as_ref( ) . unwrap( ) . update_fulfill_htlcs. is_empty( ) ) ;
481
+ assert ! ( as_resp. 2 . as_ref( ) . unwrap( ) . update_fail_htlcs. is_empty( ) ) ;
482
+ assert ! ( as_resp. 2 . as_ref( ) . unwrap( ) . update_fail_malformed_htlcs. is_empty( ) ) ;
483
+ assert ! ( as_resp. 2 . as_ref( ) . unwrap( ) . update_fee. is_none( ) ) ;
484
+ nodes[ 1 ] . node . handle_update_add_htlc ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_resp. 2 . as_ref ( ) . unwrap ( ) . update_add_htlcs [ 0 ] ) ;
485
+ nodes[ 1 ] . node . handle_commitment_signed ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_resp. 2 . as_ref ( ) . unwrap ( ) . commitment_signed ) ;
486
+ let bs_revoke_and_ack = get_event_msg ! ( nodes[ 1 ] , MessageSendEvent :: SendRevokeAndACK , nodes[ 0 ] . node. get_our_node_id( ) ) ;
487
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
488
+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
489
+
490
+ nodes[ 1 ] . node . handle_revoke_and_ack ( & nodes[ 0 ] . node . get_our_node_id ( ) , as_resp. 1 . as_ref ( ) . unwrap ( ) ) ;
491
+ let bs_second_commitment_signed = get_htlc_update_msgs ! ( nodes[ 1 ] , nodes[ 0 ] . node. get_our_node_id( ) ) ;
492
+ assert ! ( bs_second_commitment_signed. update_add_htlcs. is_empty( ) ) ;
493
+ assert ! ( bs_second_commitment_signed. update_fulfill_htlcs. is_empty( ) ) ;
494
+ assert ! ( bs_second_commitment_signed. update_fail_htlcs. is_empty( ) ) ;
495
+ assert ! ( bs_second_commitment_signed. update_fail_malformed_htlcs. is_empty( ) ) ;
496
+ assert ! ( bs_second_commitment_signed. update_fee. is_none( ) ) ;
497
+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
498
+
499
+ // The rest of this is boilerplate for resolving the previous state.
500
+
501
+ nodes[ 0 ] . node . handle_revoke_and_ack ( & nodes[ 1 ] . node . get_our_node_id ( ) , & bs_revoke_and_ack) ;
502
+ let as_commitment_signed = get_htlc_update_msgs ! ( nodes[ 0 ] , nodes[ 1 ] . node. get_our_node_id( ) ) ;
503
+ assert ! ( as_commitment_signed. update_add_htlcs. is_empty( ) ) ;
504
+ assert ! ( as_commitment_signed. update_fulfill_htlcs. is_empty( ) ) ;
505
+ assert ! ( as_commitment_signed. update_fail_htlcs. is_empty( ) ) ;
506
+ assert ! ( as_commitment_signed. update_fail_malformed_htlcs. is_empty( ) ) ;
507
+ assert ! ( as_commitment_signed. update_fee. is_none( ) ) ;
508
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
509
+
510
+ nodes[ 0 ] . node . handle_commitment_signed ( & nodes[ 1 ] . node . get_our_node_id ( ) , & bs_second_commitment_signed. commitment_signed ) ;
511
+ let as_revoke_and_ack = get_event_msg ! ( nodes[ 0 ] , MessageSendEvent :: SendRevokeAndACK , nodes[ 1 ] . node. get_our_node_id( ) ) ;
512
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
513
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
514
+
515
+ nodes[ 1 ] . node . handle_commitment_signed ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_commitment_signed. commitment_signed ) ;
516
+ let bs_second_revoke_and_ack = get_event_msg ! ( nodes[ 1 ] , MessageSendEvent :: SendRevokeAndACK , nodes[ 0 ] . node. get_our_node_id( ) ) ;
517
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
518
+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
519
+
520
+ nodes[ 1 ] . node . handle_revoke_and_ack ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_revoke_and_ack) ;
521
+ assert ! ( nodes[ 1 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
522
+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
523
+
524
+ expect_pending_htlcs_forwardable ! ( nodes[ 1 ] ) ;
525
+
526
+ let events_5 = nodes[ 1 ] . node . get_and_clear_pending_events ( ) ;
527
+ assert_eq ! ( events_5. len( ) , 1 ) ;
528
+ match events_5[ 0 ] {
529
+ Event :: PaymentClaimable { ref payment_hash, ref purpose, .. } => {
530
+ assert_eq ! ( payment_hash_2, * payment_hash) ;
531
+ match & purpose {
532
+ PaymentPurpose :: Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
533
+ assert ! ( payment_preimage. is_none( ) ) ;
534
+ assert_eq ! ( payment_secret_2, * payment_secret) ;
535
+ } ,
536
+ _ => panic ! ( "expected PaymentPurpose::Bolt11InvoicePayment" )
537
+ }
538
+ } ,
539
+ _ => panic ! ( "Unexpected event" ) ,
540
+ }
541
+
542
+ nodes[ 0 ] . node . handle_revoke_and_ack ( & nodes[ 1 ] . node . get_our_node_id ( ) , & bs_second_revoke_and_ack) ;
543
+ assert ! ( nodes[ 0 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
544
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
545
+
546
+ expect_payment_path_successful ! ( nodes[ 0 ] ) ;
547
+ claim_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , payment_preimage_2) ;
548
+ }
549
+
332
550
fn do_test_async_holder_signatures ( anchors : bool , remote_commitment : bool ) {
333
551
// Ensures that we can obtain holder signatures for commitment and HTLC transactions
334
552
// asynchronously by allowing their retrieval to fail and retrying via
0 commit comments