diff --git a/lightning-tests/Cargo.toml b/lightning-tests/Cargo.toml index 4e8d330089d..7830792b121 100644 --- a/lightning-tests/Cargo.toml +++ b/lightning-tests/Cargo.toml @@ -15,6 +15,7 @@ lightning-types = { path = "../lightning-types", features = ["_test_utils"] } lightning-invoice = { path = "../lightning-invoice", default-features = false } lightning-macros = { path = "../lightning-macros" } lightning = { path = "../lightning", features = ["_test_utils"] } +lightning_0_3 = { package = "lightning", git = "https://github.com/valentinewallace/rust-lightning", branch = "2026-02-dedup-htlc-fwd-data", features = ["_test_utils"] } lightning_0_2 = { package = "lightning", version = "0.2.0", features = ["_test_utils"] } lightning_0_1 = { package = "lightning", version = "0.1.7", features = ["_test_utils"] } lightning_0_0_125 = { package = "lightning", version = "0.0.125", features = ["_test_utils"] } diff --git a/lightning-tests/src/lib.rs b/lightning-tests/src/lib.rs index c028193d692..4249c957dde 100644 --- a/lightning-tests/src/lib.rs +++ b/lightning-tests/src/lib.rs @@ -1,4 +1,3 @@ -#[cfg_attr(test, macro_use)] extern crate lightning; #[cfg(all(test, not(taproot)))] diff --git a/lightning-tests/src/upgrade_downgrade_tests.rs b/lightning-tests/src/upgrade_downgrade_tests.rs index 8df670321be..e900aeb381b 100644 --- a/lightning-tests/src/upgrade_downgrade_tests.rs +++ b/lightning-tests/src/upgrade_downgrade_tests.rs @@ -45,19 +45,46 @@ use lightning_0_0_125::ln::msgs::ChannelMessageHandler as _; use lightning_0_0_125::routing::router as router_0_0_125; use lightning_0_0_125::util::ser::Writeable as _; +use lightning_0_3::events::bump_transaction::sync::WalletSourceSync as _; +use lightning_0_3::events::{ + Event as Event_0_3, HTLCHandlingFailureType as HTLCHandlingFailureType_0_3, +}; +use lightning_0_3::expect_payment_claimable as expect_payment_claimable_0_3; +use lightning_0_3::get_event_msg as get_event_msg_0_3; +use lightning_0_3::get_monitor as get_monitor_0_3; +use lightning_0_3::ln::channelmanager::PaymentId as PaymentId_0_3; +use lightning_0_3::ln::functional_test_utils as lightning_0_3_utils; +use lightning_0_3::ln::functional_test_utils::{ + PaymentFailedConditions as PaymentFailedConditions_0_3, ReconnectArgs as ReconnectArgs_0_3, + SendEvent as SendEvent_0_3, +}; +use lightning_0_3::ln::funding::SpliceContribution as SpliceContribution_0_3; +use lightning_0_3::ln::msgs::BaseMessageHandler as _; +use lightning_0_3::ln::msgs::ChannelMessageHandler as _; +use lightning_0_3::ln::msgs::MessageSendEvent as MessageSendEvent_0_3; +use lightning_0_3::ln::outbound_payment::RecipientOnionFields as RecipientOnionFields_0_3; +use lightning_0_3::ln::splicing_tests::lock_splice as lock_splice_0_3; +use lightning_0_3::ln::splicing_tests::splice_channel as splice_channel_0_3; +use lightning_0_3::ln::types::ChannelId as ChannelId_0_3; +use lightning_0_3::reload_node as reload_node_0_3; +use lightning_0_3::routing::router as router_0_3; +use lightning_0_3::types::payment::{ + PaymentHash as PaymentHash_0_3, PaymentPreimage as PaymentPreimage_0_3, + PaymentSecret as PaymentSecret_0_3, +}; +use lightning_0_3::util::ser::Writeable as _; + use lightning::chain::channelmonitor::{ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER}; -use lightning::events::bump_transaction::sync::WalletSourceSync; -use lightning::events::{ClosureReason, Event, HTLCHandlingFailureType}; -use lightning::ln::functional_test_utils::*; -use lightning::ln::funding::SpliceContribution; -use lightning::ln::msgs::BaseMessageHandler as _; +use lightning::check_spends; +use lightning::events::{ClosureReason, Event}; +use lightning::expect_payment_claimable; +use lightning::ln::functional_test_utils as lightning_local_utils; +use lightning::ln::functional_test_utils::{ReconnectArgs, SendEvent}; use lightning::ln::msgs::ChannelMessageHandler as _; -use lightning::ln::msgs::MessageSendEvent; -use lightning::ln::splicing_tests::*; -use lightning::ln::types::ChannelId; +use lightning::reload_node; use lightning::sign::OutputSpender; - -use lightning_types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; +use lightning::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; +use lightning::util::ser::Writeable as _; use bitcoin::script::Builder; use bitcoin::secp256k1::Secp256k1; @@ -68,7 +95,7 @@ use std::sync::Arc; #[test] fn simple_upgrade() { // Tests a simple case of upgrading from LDK 0.1 with a pending payment - let (node_a_ser, node_b_ser, mon_a_ser, mon_b_ser, preimage); + let (node_a_ser, node_b_ser, mon_a_ser, mon_b_ser, preimage_bytes); { let chanmon_cfgs = lightning_0_1_utils::create_chanmon_cfgs(2); let node_cfgs = lightning_0_1_utils::create_node_cfgs(2, &chanmon_cfgs); @@ -79,7 +106,7 @@ fn simple_upgrade() { let payment_preimage = lightning_0_1_utils::route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - preimage = PaymentPreimage(payment_preimage.0 .0); + preimage_bytes = payment_preimage.0 .0; node_a_ser = nodes[0].node.encode(); node_b_ser = nodes[1].node.encode(); @@ -89,26 +116,43 @@ fn simple_upgrade() { // Create a dummy node to reload over with the 0.1 state - let mut chanmon_cfgs = create_chanmon_cfgs(2); + let mut chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(2); // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_cfgs = lightning_0_3_utils::create_node_cfgs(2, &chanmon_cfgs); let (persister_a, persister_b, chain_mon_a, chain_mon_b); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let node_chanmgrs = lightning_0_3_utils::create_node_chanmgrs(2, &node_cfgs, &[None, None]); let (node_a, node_b); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let mut nodes = lightning_0_3_utils::create_network(2, &node_cfgs, &node_chanmgrs); - let config = test_default_channel_config(); + let config = lightning_0_3_utils::test_default_channel_config(); let a_mons = &[&mon_a_ser[..]]; - reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); - reload_node!(nodes[1], config, &node_b_ser, &[&mon_b_ser], persister_b, chain_mon_b, node_b); - - reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - - claim_payment(&nodes[0], &[&nodes[1]], preimage); + reload_node_0_3!( + nodes[0], + config.clone(), + &node_a_ser, + a_mons, + persister_a, + chain_mon_a, + node_a + ); + reload_node_0_3!( + nodes[1], + config, + &node_b_ser, + &[&mon_b_ser], + persister_b, + chain_mon_b, + node_b + ); + + lightning_0_3_utils::reconnect_nodes(ReconnectArgs_0_3::new(&nodes[0], &nodes[1])); + + let preimage = PaymentPreimage_0_3(preimage_bytes); + lightning_0_3_utils::claim_payment(&nodes[0], &[&nodes[1]], preimage); } #[test] @@ -228,7 +272,7 @@ fn test_125_dangling_post_update_actions() { // Create a dummy node to reload over with the 0.0.125 state - let mut chanmon_cfgs = create_chanmon_cfgs(4); + let mut chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(4); // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; @@ -236,14 +280,15 @@ fn test_125_dangling_post_update_actions() { chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; chanmon_cfgs[3].keys_manager.disable_all_state_policy_checks = true; - let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let node_cfgs = lightning_local_utils::create_node_cfgs(4, &chanmon_cfgs); let (persister, chain_mon); - let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let node_chanmgrs = + lightning_local_utils::create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let node; - let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let mut nodes = lightning_local_utils::create_network(4, &node_cfgs, &node_chanmgrs); // Finally, reload the node in the latest LDK. This previously failed. - let config = test_default_channel_config(); + let config = lightning_local_utils::test_default_channel_config(); reload_node!(nodes[3], config, &node_d_ser, &[&mon_ser], persister, chain_mon, node); } @@ -283,14 +328,14 @@ fn test_0_1_legacy_remote_key_derivation() { } // Create a dummy node to reload over with the 0.1 state - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(2); + let node_cfgs = lightning_local_utils::create_node_cfgs(2, &chanmon_cfgs); let (persister_a, persister_b, chain_mon_a, chain_mon_b); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let node_chanmgrs = lightning_local_utils::create_node_chanmgrs(2, &node_cfgs, &[None, None]); let (node_a, node_b); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let mut nodes = lightning_local_utils::create_network(2, &node_cfgs, &node_chanmgrs); - let config = test_default_channel_config(); + let config = lightning_local_utils::test_default_channel_config(); let a_mons = &[&mon_a_ser[..]]; reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); reload_node!(nodes[1], config, &node_b_ser, &[&mon_b_ser], persister_b, chain_mon_b, node_b); @@ -299,13 +344,13 @@ fn test_0_1_legacy_remote_key_derivation() { let node_b_id = nodes[1].node.get_our_node_id(); - mine_transaction(&nodes[0], &commitment_tx[0]); + lightning_local_utils::mine_transaction(&nodes[0], &commitment_tx[0]); let reason = ClosureReason::CommitmentTxConfirmed; - check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100_000); - check_added_monitors(&nodes[0], 1); - check_closed_broadcast(&nodes[0], 1, false); + lightning_local_utils::check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100_000); + lightning_local_utils::check_added_monitors(&nodes[0], 1); + lightning_local_utils::check_closed_broadcast(&nodes[0], 1, false); - connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); + lightning_local_utils::connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); let mut spendable_event = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); assert_eq!(spendable_event.len(), 1); if let Event::SpendableOutputs { outputs, channel_id: ev_id } = spendable_event.pop().unwrap() { @@ -420,7 +465,7 @@ fn do_test_0_1_htlc_forward_after_splice(fail_htlc: bool) { } // Create a dummy node to reload over with the 0.1 state - let mut chanmon_cfgs = create_chanmon_cfgs(3); + let mut chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(3); // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; @@ -431,73 +476,105 @@ fn do_test_0_1_htlc_forward_after_splice(fail_htlc: bool) { chanmon_cfgs[1].tx_broadcaster.blocks = node_b_blocks; chanmon_cfgs[2].tx_broadcaster.blocks = node_c_blocks; - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_cfgs = lightning_0_3_utils::create_node_cfgs(3, &chanmon_cfgs); let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let node_chanmgrs = + lightning_0_3_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let (node_a, node_b, node_c); - let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let mut nodes = lightning_0_3_utils::create_network(3, &node_cfgs, &node_chanmgrs); - let config = test_default_channel_config(); + let config = lightning_0_3_utils::test_default_channel_config(); let a_mons = &[&mon_a_1_ser[..]]; - reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); + reload_node_0_3!( + nodes[0], + config.clone(), + &node_a_ser, + a_mons, + persister_a, + chain_mon_a, + node_a + ); let b_mons = &[&mon_b_1_ser[..], &mon_b_2_ser[..]]; - reload_node!(nodes[1], config.clone(), &node_b_ser, b_mons, persister_b, chain_mon_b, node_b); + reload_node_0_3!( + nodes[1], + config.clone(), + &node_b_ser, + b_mons, + persister_b, + chain_mon_b, + node_b + ); let c_mons = &[&mon_c_1_ser[..]]; - reload_node!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); + reload_node_0_3!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); - reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - let mut reconnect_b_c_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + lightning_0_3_utils::reconnect_nodes(ReconnectArgs_0_3::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs_0_3::new(&nodes[1], &nodes[2]); reconnect_b_c_args.send_channel_ready = (true, true); reconnect_b_c_args.send_announcement_sigs = (true, true); - reconnect_nodes(reconnect_b_c_args); + lightning_0_3_utils::reconnect_nodes(reconnect_b_c_args); - let contribution = SpliceContribution::splice_out(vec![TxOut { + let contribution = SpliceContribution_0_3::splice_out(vec![TxOut { value: Amount::from_sat(1_000), script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), }]); - let splice_tx = splice_channel(&nodes[0], &nodes[1], ChannelId(chan_id_bytes_a), contribution); + let splice_tx = + splice_channel_0_3(&nodes[0], &nodes[1], ChannelId_0_3(chan_id_bytes_a), contribution); for node in nodes.iter() { - mine_transaction(node, &splice_tx); - connect_blocks(node, ANTI_REORG_DELAY - 1); + lightning_0_3_utils::mine_transaction(node, &splice_tx); + lightning_0_3_utils::connect_blocks(node, ANTI_REORG_DELAY - 1); } - let splice_locked = get_event_msg!(nodes[0], MessageSendEvent::SendSpliceLocked, node_b_id); - lock_splice(&nodes[0], &nodes[1], &splice_locked, false); + let splice_locked = + get_event_msg_0_3!(nodes[0], MessageSendEvent_0_3::SendSpliceLocked, node_b_id); + lock_splice_0_3(&nodes[0], &nodes[1], &splice_locked, false); for node in nodes.iter() { - connect_blocks(node, EXTRA_BLOCKS_BEFORE_FAIL - ANTI_REORG_DELAY); + lightning_0_3_utils::connect_blocks(node, EXTRA_BLOCKS_BEFORE_FAIL - ANTI_REORG_DELAY); } // Now release the HTLC to be failed back to node A nodes[1].node.process_pending_htlc_forwards(); - let pay_secret = PaymentSecret(payment_secret_bytes); - let pay_hash = PaymentHash(payment_hash_bytes); - let pay_preimage = PaymentPreimage(payment_preimage_bytes); + let pay_secret = PaymentSecret_0_3(payment_secret_bytes); + let pay_hash = PaymentHash_0_3(payment_hash_bytes); + let pay_preimage = PaymentPreimage_0_3(payment_preimage_bytes); if fail_htlc { - let failure = HTLCHandlingFailureType::Forward { + let failure = HTLCHandlingFailureType_0_3::Forward { node_id: Some(node_c_id), - channel_id: ChannelId(chan_id_bytes_b), + channel_id: ChannelId_0_3(chan_id_bytes_b), }; - expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[failure]); - check_added_monitors(&nodes[1], 1); + lightning_0_3_utils::expect_and_process_pending_htlcs_and_htlc_handling_failed( + &nodes[1], + &[failure], + ); + lightning_0_3_utils::check_added_monitors(&nodes[1], 1); - let updates = get_htlc_update_msgs(&nodes[1], &node_a_id); + let updates = lightning_0_3_utils::get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); - do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); - let conditions = PaymentFailedConditions::new(); - expect_payment_failed_conditions(&nodes[0], pay_hash, false, conditions); + lightning_0_3_utils::do_commitment_signed_dance( + &nodes[0], + &nodes[1], + &updates.commitment_signed, + false, + false, + ); + let conditions = PaymentFailedConditions_0_3::new(); + lightning_0_3_utils::expect_payment_failed_conditions( + &nodes[0], pay_hash, false, conditions, + ); } else { - check_added_monitors(&nodes[1], 1); - let forward_event = SendEvent::from_node(&nodes[1]); + lightning_0_3_utils::check_added_monitors(&nodes[1], 1); + let forward_event = SendEvent_0_3::from_node(&nodes[1]); nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); let commitment = &forward_event.commitment_msg; - do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, false); + lightning_0_3_utils::do_commitment_signed_dance( + &nodes[2], &nodes[1], commitment, false, false, + ); - expect_and_process_pending_htlcs(&nodes[2], false); - expect_payment_claimable!(nodes[2], pay_hash, pay_secret, 1_000_000); - claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); + lightning_0_3_utils::expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable_0_3!(nodes[2], pay_hash, pay_secret, 1_000_000); + lightning_0_3_utils::claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); } } @@ -632,32 +709,49 @@ fn do_upgrade_mid_htlc_forward(test: MidHtlcForwardCase) { } // Create a dummy node to reload over with the 0.2 state - let mut chanmon_cfgs = create_chanmon_cfgs(3); + let mut chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(3); // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_cfgs = lightning_0_3_utils::create_node_cfgs(3, &chanmon_cfgs); let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let node_chanmgrs = + lightning_0_3_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let (node_a, node_b, node_c); - let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let mut nodes = lightning_0_3_utils::create_network(3, &node_cfgs, &node_chanmgrs); - let config = test_default_channel_config(); + let config = lightning_0_3_utils::test_default_channel_config(); let a_mons = &[&mon_a_1_ser[..]]; - reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); + reload_node_0_3!( + nodes[0], + config.clone(), + &node_a_ser, + a_mons, + persister_a, + chain_mon_a, + node_a + ); let b_mons = &[&mon_b_1_ser[..], &mon_b_2_ser[..]]; - reload_node!(nodes[1], config.clone(), &node_b_ser, b_mons, persister_b, chain_mon_b, node_b); + reload_node_0_3!( + nodes[1], + config.clone(), + &node_b_ser, + b_mons, + persister_b, + chain_mon_b, + node_b + ); let c_mons = &[&mon_c_1_ser[..]]; - reload_node!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); + reload_node_0_3!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); - reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - let mut reconnect_b_c_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + lightning_0_3_utils::reconnect_nodes(ReconnectArgs_0_3::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs_0_3::new(&nodes[1], &nodes[2]); reconnect_b_c_args.send_channel_ready = (true, true); reconnect_b_c_args.send_announcement_sigs = (true, true); - reconnect_nodes(reconnect_b_c_args); + lightning_0_3_utils::reconnect_nodes(reconnect_b_c_args); // Now release the HTLC from node_b to node_c, to be claimed back to node_a nodes[1].node.process_pending_htlc_forwards(); @@ -666,7 +760,7 @@ fn do_upgrade_mid_htlc_forward(test: MidHtlcForwardCase) { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); let (intercept_id, expected_outbound_amt_msat) = match events[0] { - Event::HTLCIntercepted { intercept_id, expected_outbound_amount_msat, .. } => { + Event_0_3::HTLCIntercepted { intercept_id, expected_outbound_amount_msat, .. } => { (intercept_id, expected_outbound_amount_msat) }, _ => panic!(), @@ -675,7 +769,7 @@ fn do_upgrade_mid_htlc_forward(test: MidHtlcForwardCase) { .node .forward_intercepted_htlc( intercept_id, - &ChannelId(chan_id_bytes_b_c), + &ChannelId_0_3(chan_id_bytes_b_c), nodes[2].node.get_our_node_id(), expected_outbound_amt_msat, ) @@ -683,17 +777,647 @@ fn do_upgrade_mid_htlc_forward(test: MidHtlcForwardCase) { nodes[1].node.process_pending_htlc_forwards(); } + let pay_secret = PaymentSecret_0_3(payment_secret_bytes); + let pay_hash = PaymentHash_0_3(payment_hash_bytes); + let pay_preimage = PaymentPreimage_0_3(payment_preimage_bytes); + + lightning_0_3_utils::check_added_monitors(&nodes[1], 1); + let forward_event = SendEvent_0_3::from_node(&nodes[1]); + nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); + let commitment = &forward_event.commitment_msg; + lightning_0_3_utils::do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, false); + + lightning_0_3_utils::expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable_0_3!(nodes[2], pay_hash, pay_secret, 1_000_000); + lightning_0_3_utils::claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); +} + +#[test] +fn test_0_3_pending_forward_upgrade() { + // Tests upgrading from 0.3 with a pending HTLC forward. + // Phase 1: Create state in 0.3 with a pending forward (HTLC locked on inbound edge of node B) + // Phase 2: Reload with local lightning and complete the payment + let (node_a_ser, node_b_ser, node_c_ser, mon_a_1_ser, mon_b_1_ser, mon_b_2_ser, mon_c_1_ser); + let (node_a_id, node_b_id, _node_c_id); + let (payment_secret_bytes, payment_hash_bytes, payment_preimage_bytes); + + { + let chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(3); + let node_cfgs = lightning_0_3_utils::create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = + lightning_0_3_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = lightning_0_3_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + node_a_id = nodes[0].node.get_our_node_id(); + node_b_id = nodes[1].node.get_our_node_id(); + _node_c_id = nodes[2].node.get_our_node_id(); + let chan_id_a = lightning_0_3_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 10_000_000, 0, + ) + .2; + + let chan_id_b = lightning_0_3_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 2, 50_000, 0, + ) + .2; + + // Ensure all nodes are at the same initial height. + let node_max_height = nodes.iter().map(|node| node.best_block_info().1).max().unwrap(); + for node in &nodes { + let blocks_to_mine = node_max_height - node.best_block_info().1; + if blocks_to_mine > 0 { + lightning_0_3_utils::connect_blocks(node, blocks_to_mine); + } + } + + // Initiate an HTLC to be sent over node_a -> node_b -> node_c + let (preimage, hash, secret) = + lightning_0_3_utils::get_payment_preimage_hash(&nodes[2], Some(1_000_000), None); + payment_preimage_bytes = preimage.0; + payment_hash_bytes = hash.0; + payment_secret_bytes = secret.0; + + let pay_params = router_0_3::PaymentParameters::from_node_id( + _node_c_id, + lightning_0_3_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + router_0_3::RouteParameters::from_payment_params_and_value(pay_params, 1_000_000); + let route = lightning_0_3_utils::get_route(&nodes[0], &route_params).unwrap(); + + let onion = RecipientOnionFields_0_3::secret_only(secret); + let id = PaymentId_0_3(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + lightning_0_3_utils::check_added_monitors(&nodes[0], 1); + let send_event = SendEvent_0_3::from_node(&nodes[0]); + + // Lock in the HTLC on the inbound edge of node_b without initiating the outbound edge. + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); + lightning_0_3_utils::do_commitment_signed_dance( + &nodes[1], + &nodes[0], + &send_event.commitment_msg, + false, + false, + ); + // Process the pending update_add_htlcs but don't forward yet + nodes[1].node.test_process_pending_update_add_htlcs(); + let events = nodes[1].node.get_and_clear_pending_events(); + assert!(events.is_empty()); + + node_a_ser = nodes[0].node.encode(); + node_b_ser = nodes[1].node.encode(); + node_c_ser = nodes[2].node.encode(); + mon_a_1_ser = get_monitor_0_3!(nodes[0], chan_id_a).encode(); + mon_b_1_ser = get_monitor_0_3!(nodes[1], chan_id_a).encode(); + mon_b_2_ser = get_monitor_0_3!(nodes[1], chan_id_b).encode(); + mon_c_1_ser = get_monitor_0_3!(nodes[2], chan_id_b).encode(); + } + + // Create a dummy node to reload over with the 0.3 state + let mut chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(3); + + // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = lightning_local_utils::create_node_cfgs(3, &chanmon_cfgs); + let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); + let node_chanmgrs = + lightning_local_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let (node_a, node_b, node_c); + let mut nodes = lightning_local_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + let config = lightning_local_utils::test_default_channel_config(); + let a_mons = &[&mon_a_1_ser[..]]; + reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); + let b_mons = &[&mon_b_1_ser[..], &mon_b_2_ser[..]]; + reload_node!(nodes[1], config.clone(), &node_b_ser, b_mons, persister_b, chain_mon_b, node_b); + let c_mons = &[&mon_c_1_ser[..]]; + reload_node!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); + + lightning_local_utils::reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + reconnect_b_c_args.send_channel_ready = (true, true); + reconnect_b_c_args.send_announcement_sigs = (true, true); + lightning_local_utils::reconnect_nodes(reconnect_b_c_args); + + // Now release the HTLC from node_b to node_c, to be claimed back to node_a + nodes[1].node.process_pending_htlc_forwards(); + let pay_secret = PaymentSecret(payment_secret_bytes); let pay_hash = PaymentHash(payment_hash_bytes); let pay_preimage = PaymentPreimage(payment_preimage_bytes); - check_added_monitors(&nodes[1], 1); + lightning_local_utils::check_added_monitors(&nodes[1], 1); let forward_event = SendEvent::from_node(&nodes[1]); nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); let commitment = &forward_event.commitment_msg; - do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, false); + lightning_local_utils::do_commitment_signed_dance( + &nodes[2], &nodes[1], commitment, false, false, + ); - expect_and_process_pending_htlcs(&nodes[2], false); + lightning_local_utils::expect_and_process_pending_htlcs(&nodes[2], false); expect_payment_claimable!(nodes[2], pay_hash, pay_secret, 1_000_000); - claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); + lightning_local_utils::claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); +} + +#[test] +fn test_0_2_pending_forward_upgrade_fails() { + // Tests that upgrading directly from 0.2 to local lightning fails with DecodeError::InvalidValue + // when there's a pending HTLC forward, because in 0.5 we started requiring new pending_htlc + // fields that started being written in 0.3. + // XXX update this + use lightning::ln::channelmanager::ChannelManagerReadArgs; + use lightning::ln::msgs::DecodeError; + use lightning::util::ser::ReadableArgs; + + let (node_b_ser, mon_b_1_ser, mon_b_2_ser); + + { + let chanmon_cfgs = lightning_0_2_utils::create_chanmon_cfgs(3); + let node_cfgs = lightning_0_2_utils::create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = + lightning_0_2_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = lightning_0_2_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_a = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 10_000_000, 0, + ) + .2; + + let chan_id_b = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 2, 50_000, 0, + ) + .2; + + // Send HTLC from node_a to node_c, hold at node_b (don't call process_pending_htlc_forwards) + let node_a_id = nodes[0].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let (_preimage, hash, secret) = + lightning_0_2_utils::get_payment_preimage_hash(&nodes[2], Some(1_000_000), None); + + let pay_params = router_0_2::PaymentParameters::from_node_id( + node_c_id, + lightning_0_2_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + router_0_2::RouteParameters::from_payment_params_and_value(pay_params, 1_000_000); + let route = lightning_0_2_utils::get_route(&nodes[0], &route_params).unwrap(); + + let onion = RecipientOnionFields_0_2::secret_only(secret); + let id = PaymentId_0_2(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + lightning_0_2_utils::check_added_monitors(&nodes[0], 1); + let send_event = lightning_0_2_utils::SendEvent::from_node(&nodes[0]); + + // Lock in the HTLC on the inbound edge of node_b without initiating the outbound edge. + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); + commitment_signed_dance_0_2!(nodes[1], nodes[0], send_event.commitment_msg, false); + + // Process the pending HTLC to create a pending forward (but don't actually forward it) + nodes[1].node.test_process_pending_update_add_htlcs(); + + // Serialize node_b with the pending forward + node_b_ser = nodes[1].node.encode(); + mon_b_1_ser = get_monitor_0_2!(nodes[1], chan_id_a).encode(); + mon_b_2_ser = get_monitor_0_2!(nodes[1], chan_id_b).encode(); + } + + // Try to reload using local lightning - this should fail with DecodeError::InvalidValue + let mut chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(1); + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = lightning_local_utils::create_node_cfgs(1, &chanmon_cfgs); + let node_chanmgrs = lightning_local_utils::create_node_chanmgrs(1, &node_cfgs, &[None]); + let nodes = lightning_local_utils::create_network(1, &node_cfgs, &node_chanmgrs); + + // Read the monitors first + use lightning::util::test_channel_signer::TestChannelSigner; + let mut monitor_read_1 = &mon_b_1_ser[..]; + let (_, monitor_1) = <( + bitcoin::BlockHash, + lightning::chain::channelmonitor::ChannelMonitor, + )>::read( + &mut monitor_read_1, (nodes[0].keys_manager, nodes[0].keys_manager) + ) + .unwrap(); + + let mut monitor_read_2 = &mon_b_2_ser[..]; + let (_, monitor_2) = <( + bitcoin::BlockHash, + lightning::chain::channelmonitor::ChannelMonitor, + )>::read( + &mut monitor_read_2, (nodes[0].keys_manager, nodes[0].keys_manager) + ) + .unwrap(); + + // Try to read the ChannelManager - this should fail + use lightning::util::hash_tables::new_hash_map; + let mut channel_monitors = new_hash_map(); + channel_monitors.insert(monitor_1.channel_id(), &monitor_1); + channel_monitors.insert(monitor_2.channel_id(), &monitor_2); + + let config = lightning_local_utils::test_default_channel_config(); + let mut node_read = &node_b_ser[..]; + let read_args = ChannelManagerReadArgs { + config, + entropy_source: nodes[0].keys_manager, + node_signer: nodes[0].keys_manager, + signer_provider: nodes[0].keys_manager, + fee_estimator: nodes[0].fee_estimator, + router: nodes[0].router, + message_router: nodes[0].message_router, + chain_monitor: nodes[0].chain_monitor, + tx_broadcaster: nodes[0].tx_broadcaster, + logger: nodes[0].logger, + channel_monitors, + }; + + let result = + <(bitcoin::BlockHash, lightning::ln::functional_test_utils::TestChannelManager)>::read( + &mut node_read, + read_args, + ); + + assert!(matches!(result, Err(DecodeError::InvalidValue))); +} + +#[test] +fn test_0_2_to_0_3_to_local_pending_forward_upgrade() { + // Tests that upgrading from 0.2 to 0.3 to local with a pending HTLC forward works. + // The key is that 0.3 can read 0.2's format and re-serialize it in the new format + // that local can then read. + let (node_a_ser_0_3, node_b_ser_0_3, node_c_ser_0_3); + let (mon_a_1_ser_0_3, mon_b_1_ser_0_3, mon_b_2_ser_0_3, mon_c_1_ser_0_3); + let (node_a_id, node_b_id); + let (payment_secret_bytes, payment_hash_bytes, payment_preimage_bytes); + let (chan_id_a_bytes, chan_id_b_bytes); + + // Phase 1: Create network on 0.2 with pending HTLC forward + let (node_a_ser_0_2, node_b_ser_0_2, node_c_ser_0_2); + let (mon_a_1_ser_0_2, mon_b_1_ser_0_2, mon_b_2_ser_0_2, mon_c_1_ser_0_2); + { + let chanmon_cfgs = lightning_0_2_utils::create_chanmon_cfgs(3); + let node_cfgs = lightning_0_2_utils::create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = + lightning_0_2_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = lightning_0_2_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + node_a_id = nodes[0].node.get_our_node_id(); + node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + let chan_id_a = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 10_000_000, 0, + ) + .2; + chan_id_a_bytes = chan_id_a.0; + + let chan_id_b = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 2, 50_000, 0, + ) + .2; + chan_id_b_bytes = chan_id_b.0; + + // Ensure all nodes are at the same initial height. + let node_max_height = nodes.iter().map(|node| node.best_block_info().1).max().unwrap(); + for node in &nodes { + let blocks_to_mine = node_max_height - node.best_block_info().1; + if blocks_to_mine > 0 { + lightning_0_2_utils::connect_blocks(node, blocks_to_mine); + } + } + + // Initiate an HTLC to be sent over node_a -> node_b -> node_c + let (preimage, hash, secret) = + lightning_0_2_utils::get_payment_preimage_hash(&nodes[2], Some(1_000_000), None); + payment_preimage_bytes = preimage.0; + payment_hash_bytes = hash.0; + payment_secret_bytes = secret.0; + + let pay_params = router_0_2::PaymentParameters::from_node_id( + node_c_id, + lightning_0_2_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + router_0_2::RouteParameters::from_payment_params_and_value(pay_params, 1_000_000); + let route = lightning_0_2_utils::get_route(&nodes[0], &route_params).unwrap(); + + let onion = RecipientOnionFields_0_2::secret_only(secret); + let id = PaymentId_0_2(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + lightning_0_2_utils::check_added_monitors(&nodes[0], 1); + let send_event = lightning_0_2_utils::SendEvent::from_node(&nodes[0]); + + // Lock in the HTLC on the inbound edge of node_b without initiating the outbound edge. + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); + commitment_signed_dance_0_2!(nodes[1], nodes[0], send_event.commitment_msg, false); + // Process the pending update_add_htlcs to create pending forward state + nodes[1].node.test_process_pending_update_add_htlcs(); + let events = nodes[1].node.get_and_clear_pending_events(); + assert!(events.is_empty()); + + node_a_ser_0_2 = nodes[0].node.encode(); + node_b_ser_0_2 = nodes[1].node.encode(); + node_c_ser_0_2 = nodes[2].node.encode(); + mon_a_1_ser_0_2 = get_monitor_0_2!(nodes[0], chan_id_a).encode(); + mon_b_1_ser_0_2 = get_monitor_0_2!(nodes[1], chan_id_a).encode(); + mon_b_2_ser_0_2 = get_monitor_0_2!(nodes[1], chan_id_b).encode(); + mon_c_1_ser_0_2 = get_monitor_0_2!(nodes[2], chan_id_b).encode(); + } + + // Phase 2: Reload on 0.3, forward the HTLC (but don't claim), and re-serialize + { + let mut chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(3); + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = lightning_0_3_utils::create_node_cfgs(3, &chanmon_cfgs); + let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); + let node_chanmgrs = + lightning_0_3_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let (node_a, node_b, node_c); + let mut nodes = lightning_0_3_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + let config = lightning_0_3_utils::test_default_channel_config(); + let a_mons = &[&mon_a_1_ser_0_2[..]]; + reload_node_0_3!( + nodes[0], + config.clone(), + &node_a_ser_0_2, + a_mons, + persister_a, + chain_mon_a, + node_a + ); + let b_mons = &[&mon_b_1_ser_0_2[..], &mon_b_2_ser_0_2[..]]; + reload_node_0_3!( + nodes[1], + config.clone(), + &node_b_ser_0_2, + b_mons, + persister_b, + chain_mon_b, + node_b + ); + let c_mons = &[&mon_c_1_ser_0_2[..]]; + reload_node_0_3!( + nodes[2], + config, + &node_c_ser_0_2, + c_mons, + persister_c, + chain_mon_c, + node_c + ); + + // Reconnect nodes after reload + lightning_0_3_utils::reconnect_nodes(ReconnectArgs_0_3::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs_0_3::new(&nodes[1], &nodes[2]); + reconnect_b_c_args.send_channel_ready = (true, true); + reconnect_b_c_args.send_announcement_sigs = (true, true); + lightning_0_3_utils::reconnect_nodes(reconnect_b_c_args); + + // Forward the HTLC from node_b to node_c (but don't claim yet) + nodes[1].node.process_pending_htlc_forwards(); + + let pay_hash = PaymentHash_0_3(payment_hash_bytes); + let pay_secret = PaymentSecret_0_3(payment_secret_bytes); + + lightning_0_3_utils::check_added_monitors(&nodes[1], 1); + let forward_event = SendEvent_0_3::from_node(&nodes[1]); + nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); + let commitment = &forward_event.commitment_msg; + lightning_0_3_utils::do_commitment_signed_dance( + &nodes[2], &nodes[1], commitment, false, false, + ); + + lightning_0_3_utils::expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable_0_3!(nodes[2], pay_hash, pay_secret, 1_000_000); + + // Re-serialize in 0.3 format - now the HTLC has been forwarded (not just pending) + node_a_ser_0_3 = nodes[0].node.encode(); + node_b_ser_0_3 = nodes[1].node.encode(); + node_c_ser_0_3 = nodes[2].node.encode(); + + let chan_id_a = ChannelId_0_3(chan_id_a_bytes); + let chan_id_b = ChannelId_0_3(chan_id_b_bytes); + + mon_a_1_ser_0_3 = get_monitor_0_3!(nodes[0], chan_id_a).encode(); + mon_b_1_ser_0_3 = get_monitor_0_3!(nodes[1], chan_id_a).encode(); + mon_b_2_ser_0_3 = get_monitor_0_3!(nodes[1], chan_id_b).encode(); + mon_c_1_ser_0_3 = get_monitor_0_3!(nodes[2], chan_id_b).encode(); + } + + // Phase 3: Reload on local and claim the payment (HTLC already forwarded in Phase 2) + let mut chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(3); + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = lightning_local_utils::create_node_cfgs(3, &chanmon_cfgs); + let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); + let node_chanmgrs = + lightning_local_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let (node_a, node_b, node_c); + let mut nodes = lightning_local_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + let config = lightning_local_utils::test_default_channel_config(); + let a_mons = &[&mon_a_1_ser_0_3[..]]; + reload_node!( + nodes[0], + config.clone(), + &node_a_ser_0_3, + a_mons, + persister_a, + chain_mon_a, + node_a + ); + let b_mons = &[&mon_b_1_ser_0_3[..], &mon_b_2_ser_0_3[..]]; + reload_node!( + nodes[1], + config.clone(), + &node_b_ser_0_3, + b_mons, + persister_b, + chain_mon_b, + node_b + ); + let c_mons = &[&mon_c_1_ser_0_3[..]]; + reload_node!(nodes[2], config, &node_c_ser_0_3, c_mons, persister_c, chain_mon_c, node_c); + + lightning_local_utils::reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); + lightning_local_utils::reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2])); + + // The HTLC was already forwarded and claimable event generated in Phase 2. + // After reload, just claim the payment - the HTLC is already locked on node C. + let pay_preimage = PaymentPreimage(payment_preimage_bytes); + lightning_local_utils::claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); +} + +#[test] +fn test_local_to_0_3_pending_forward_downgrade() { + // Tests downgrading from local lightning to 0.3 with a pending HTLC forward works. + // Phase 1: Create state in local lightning with a pending forward + // Phase 2: Reload with 0.3 and complete the payment + use lightning::ln::types::ChannelId; + + let (node_a_ser, node_b_ser, node_c_ser, mon_a_1_ser, mon_b_1_ser, mon_b_2_ser, mon_c_1_ser); + let (node_a_id, node_b_id); + let (payment_secret_bytes, payment_hash_bytes, payment_preimage_bytes); + let (chan_id_a_bytes, chan_id_b_bytes); + + // Phase 1: Create network on local lightning with pending HTLC forward + { + let chanmon_cfgs = lightning_local_utils::create_chanmon_cfgs(3); + let node_cfgs = lightning_local_utils::create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = + lightning_local_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = lightning_local_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + node_a_id = nodes[0].node.get_our_node_id(); + node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + let chan_id_a = lightning_local_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 10_000_000, 0, + ) + .2; + chan_id_a_bytes = chan_id_a.0; + + let chan_id_b = lightning_local_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 2, 50_000, 0, + ) + .2; + chan_id_b_bytes = chan_id_b.0; + + // Ensure all nodes are at the same initial height. + let node_max_height = nodes.iter().map(|node| node.best_block_info().1).max().unwrap(); + for node in &nodes { + let blocks_to_mine = node_max_height - node.best_block_info().1; + if blocks_to_mine > 0 { + lightning_local_utils::connect_blocks(node, blocks_to_mine); + } + } + + // Initiate an HTLC to be sent over node_a -> node_b -> node_c + let (preimage, hash, secret) = + lightning_local_utils::get_payment_preimage_hash(&nodes[2], Some(1_000_000), None); + payment_preimage_bytes = preimage.0; + payment_hash_bytes = hash.0; + payment_secret_bytes = secret.0; + + use lightning::routing::router as local_router; + let pay_params = local_router::PaymentParameters::from_node_id( + node_c_id, + lightning_local_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + local_router::RouteParameters::from_payment_params_and_value(pay_params, 1_000_000); + let route = lightning_local_utils::get_route(&nodes[0], &route_params).unwrap(); + + use lightning::ln::channelmanager::PaymentId as LocalPaymentId; + use lightning::ln::outbound_payment::RecipientOnionFields as LocalRecipientOnionFields; + let onion = LocalRecipientOnionFields::secret_only(secret); + let id = LocalPaymentId(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + lightning_local_utils::check_added_monitors(&nodes[0], 1); + let send_event = SendEvent::from_node(&nodes[0]); + + // Lock in the HTLC on the inbound edge of node_b without initiating the outbound edge. + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); + lightning_local_utils::do_commitment_signed_dance( + &nodes[1], + &nodes[0], + &send_event.commitment_msg, + false, + false, + ); + node_a_ser = nodes[0].node.encode(); + node_b_ser = nodes[1].node.encode(); + node_c_ser = nodes[2].node.encode(); + mon_a_1_ser = lightning::get_monitor!(nodes[0], ChannelId(chan_id_a_bytes)).encode(); + mon_b_1_ser = lightning::get_monitor!(nodes[1], ChannelId(chan_id_a_bytes)).encode(); + mon_b_2_ser = lightning::get_monitor!(nodes[1], ChannelId(chan_id_b_bytes)).encode(); + mon_c_1_ser = lightning::get_monitor!(nodes[2], ChannelId(chan_id_b_bytes)).encode(); + } + + // Phase 2: Reload on 0.3 and complete the payment + let mut chanmon_cfgs = lightning_0_3_utils::create_chanmon_cfgs(3); + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = lightning_0_3_utils::create_node_cfgs(3, &chanmon_cfgs); + let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); + let node_chanmgrs = + lightning_0_3_utils::create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let (node_a, node_b, node_c); + let mut nodes = lightning_0_3_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + let config = lightning_0_3_utils::test_default_channel_config(); + let a_mons = &[&mon_a_1_ser[..]]; + reload_node_0_3!( + nodes[0], + config.clone(), + &node_a_ser, + a_mons, + persister_a, + chain_mon_a, + node_a + ); + let b_mons = &[&mon_b_1_ser[..], &mon_b_2_ser[..]]; + reload_node_0_3!( + nodes[1], + config.clone(), + &node_b_ser, + b_mons, + persister_b, + chain_mon_b, + node_b + ); + let c_mons = &[&mon_c_1_ser[..]]; + reload_node_0_3!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); + + lightning_0_3_utils::reconnect_nodes(ReconnectArgs_0_3::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs_0_3::new(&nodes[1], &nodes[2]); + reconnect_b_c_args.send_channel_ready = (true, true); + reconnect_b_c_args.send_announcement_sigs = (true, true); + lightning_0_3_utils::reconnect_nodes(reconnect_b_c_args); + + // Now release the HTLC from node_b to node_c, to be claimed back to node_a + nodes[1].node.process_pending_htlc_forwards(); + + let pay_secret = PaymentSecret_0_3(payment_secret_bytes); + let pay_hash = PaymentHash_0_3(payment_hash_bytes); + let pay_preimage = PaymentPreimage_0_3(payment_preimage_bytes); + + lightning_0_3_utils::check_added_monitors(&nodes[1], 1); + let forward_event = SendEvent_0_3::from_node(&nodes[1]); + nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); + let commitment = &forward_event.commitment_msg; + lightning_0_3_utils::do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, false); + + lightning_0_3_utils::expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable_0_3!(nodes[2], pay_hash, pay_secret, 1_000_000); + lightning_0_3_utils::claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); } diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index a537ff55874..3c94b919d97 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1256,18 +1256,19 @@ pub(crate) struct ChannelMonitorImpl { // deserialization current_holder_commitment_number: u64, - /// The set of payment hashes from inbound payments for which we know the preimage. Payment - /// preimages that are not included in any unrevoked local commitment transaction or unrevoked - /// remote commitment transactions are automatically removed when commitment transactions are - /// revoked. Note that this happens one revocation after it theoretically could, leaving - /// preimages present here for the previous state even when the channel is "at rest". This is a - /// good safety buffer, but also is important as it ensures we retain payment preimages for the - /// previous local commitment transaction, which may have been broadcast already when we see - /// the revocation (in setups with redundant monitors). + /// The set of payment hashes from inbound payments and forwards for which we know the preimage. + /// Payment preimages that are not included in any unrevoked local commitment transaction or + /// unrevoked remote commitment transactions are automatically removed when commitment + /// transactions are revoked. Note that this happens one revocation after it theoretically could, + /// leaving preimages present here for the previous state even when the channel is "at rest". + /// This is a good safety buffer, but also is important as it ensures we retain payment preimages + /// for the previous local commitment transaction, which may have been broadcast already when we + /// see the revocation (in setups with redundant monitors). /// /// We also store [`PaymentClaimDetails`] here, tracking the payment information(s) for this /// preimage for inbound payments. This allows us to rebuild the inbound payment information on - /// startup even if we lost our `ChannelManager`. + /// startup even if we lost our `ChannelManager`. For forwardeds, the list of + /// [`PaymentClaimDetails`] is empty. payment_preimages: HashMap)>, // Note that `MonitorEvent`s MUST NOT be generated during update processing, only generated diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 2762ab63fc1..9199ac83c2d 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -50,10 +50,9 @@ use crate::ln::channel_state::{ OutboundHTLCDetails, OutboundHTLCStateDetails, }; use crate::ln::channelmanager::{ - self, ChannelReadyOrder, FundingConfirmedMessage, HTLCFailureMsg, HTLCSource, - OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, PendingHTLCStatus, - RAACommitmentOrder, SentHTLCId, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT, - MIN_CLTV_EXPIRY_DELTA, + self, ChannelReadyOrder, FundingConfirmedMessage, HTLCPreviousHopData, HTLCSource, + OpenChannelMessage, PaymentClaimDetails, RAACommitmentOrder, SentHTLCId, BREAKDOWN_TIMEOUT, + MAX_LOCAL_BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, }; use crate::ln::funding::{FundingTxInput, SpliceContribution}; use crate::ln::interactivetxs::{ @@ -85,6 +84,7 @@ use crate::util::errors::APIError; use crate::util::logger::{Logger, Record, WithContext}; use crate::util::scid_utils::{block_from_scid, scid_from_parts}; use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, Writeable, Writer}; +use crate::{impl_readable_for_vec, impl_writeable_for_vec}; use alloc::collections::{btree_map, BTreeMap}; @@ -144,36 +144,11 @@ enum InboundHTLCRemovalReason { Fulfill { preimage: PaymentPreimage, attribution_data: Option }, } -/// Represents the resolution status of an inbound HTLC. -#[cfg_attr(test, derive(Debug))] -#[derive(Clone)] -enum InboundHTLCResolution { - /// Resolved implies the action we must take with the inbound HTLC has already been determined, - /// i.e., we already know whether it must be failed back or forwarded. - // - // TODO: Once this variant is removed, we should also clean up - // [`MonitorRestoreUpdates::accepted_htlcs`] as the path will be unreachable. - Resolved { pending_htlc_status: PendingHTLCStatus }, - /// Pending implies we will attempt to resolve the inbound HTLC once it has been fully committed - /// to by both sides of the channel, i.e., once a `revoke_and_ack` has been processed by both - /// nodes for the state update in which it was proposed. - Pending { update_add_htlc: msgs::UpdateAddHTLC }, -} - -impl_writeable_tlv_based_enum!(InboundHTLCResolution, - (0, Resolved) => { - (0, pending_htlc_status, required), - }, - (2, Pending) => { - (0, update_add_htlc, required), - }, -); - #[cfg_attr(test, derive(Debug))] enum InboundHTLCState { /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an /// update_add_htlc message for this HTLC. - RemoteAnnounced(InboundHTLCResolution), + RemoteAnnounced(msgs::UpdateAddHTLC), /// Included in a received commitment_signed message (implying we've /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous /// state (see the example below). We have not yet included this HTLC in a @@ -203,20 +178,20 @@ enum InboundHTLCState { /// Implies AwaitingRemoteRevoke. /// /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md - AwaitingRemoteRevokeToAnnounce(InboundHTLCResolution), + AwaitingRemoteRevokeToAnnounce(msgs::UpdateAddHTLC), /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it). /// We have also included this HTLC in our latest commitment_signed and are now just waiting /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the /// channel (before it can then get forwarded and/or removed). /// Implies AwaitingRemoteRevoke. - AwaitingAnnouncedRemoteRevoke(InboundHTLCResolution), + AwaitingAnnouncedRemoteRevoke(msgs::UpdateAddHTLC), /// An HTLC irrevocably committed in the latest commitment transaction, ready to be forwarded or /// removed. Committed { /// Used to rebuild `ChannelManager` HTLC state on restart. Previously the manager would track /// and persist all HTLC forwards and receives itself, but newer LDK versions avoid relying on /// its persistence and instead reconstruct state based on `Channel` and `ChannelMonitor` data. - update_add_htlc_opt: Option, + update_add_htlc: InboundUpdateAdd, }, /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we /// created it we would have put it in the holding cell instead). When they next revoke_and_ack @@ -294,19 +269,54 @@ impl InboundHTLCState { /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc fn should_hold_htlc(&self) -> bool { match self { - InboundHTLCState::RemoteAnnounced(res) - | InboundHTLCState::AwaitingRemoteRevokeToAnnounce(res) - | InboundHTLCState::AwaitingAnnouncedRemoteRevoke(res) => match res { - InboundHTLCResolution::Pending { update_add_htlc } => { - update_add_htlc.hold_htlc.is_some() - }, - InboundHTLCResolution::Resolved { .. } => false, + InboundHTLCState::RemoteAnnounced(update_add_htlc) + | InboundHTLCState::AwaitingRemoteRevokeToAnnounce(update_add_htlc) + | InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add_htlc) => { + update_add_htlc.hold_htlc.is_some() }, InboundHTLCState::Committed { .. } | InboundHTLCState::LocalRemoved(_) => false, } } } +/// A field of `InboundHTLCState::Committed` containing the HTLC's `update_add_htlc` message. If +/// the HTLC is a forward and gets irrevocably committed to the outbound edge, we convert to +/// `InboundUpdateAdd::Forwarded`, thus pruning the onion and not persisting it on every +/// `ChannelManager` persist. +/// +/// Useful for reconstructing the pending HTLC set on startup. +#[derive(Debug, Clone)] +pub(super) enum InboundUpdateAdd { + /// The inbound committed HTLC's update_add_htlc message. + WithOnion { update_add_htlc: msgs::UpdateAddHTLC }, + /// This inbound HTLC is a forward that was irrevocably committed to the outbound edge, allowing + /// its onion to be pruned and no longer persisted. + Forwarded { + /// Useful if we need to fail or claim this HTLC backwards after restart, if it's missing in the + /// outbound edge. + hop_data: HTLCPreviousHopData, + /// Useful if we need to claim this HTLC backwards after a restart and it's missing in the + /// outbound edge, to generate an accurate [`Event::PaymentForwarded`]. + /// + /// [`Event::PaymentForwarded`]: crate::events::Event::PaymentForwarded + outbound_amt_msat: u64, + }, +} + +impl_writeable_tlv_based_enum_upgradable!(InboundUpdateAdd, + (0, WithOnion) => { + (0, update_add_htlc, required), + }, + // 2 was previously used for the ::Legacy variant, used for HTLCs received on LDK 0.1-. + (4, Forwarded) => { + (0, hop_data, required), + (2, outbound_amt_msat, required), + }, +); + +impl_writeable_for_vec!(&InboundUpdateAdd); +impl_readable_for_vec!(InboundUpdateAdd); + #[cfg_attr(test, derive(Debug))] struct InboundHTLCOutput { htlc_id: u64, @@ -1137,17 +1147,23 @@ pub enum UpdateFulfillCommitFetch { /// The return value of `monitor_updating_restored` pub(super) struct MonitorRestoreUpdates { pub raa: Option, + /// A `CommitmentUpdate` to be sent to our channel peer. pub commitment_update: Option, pub commitment_order: RAACommitmentOrder, - pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>, pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, pub finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, + /// Inbound update_adds that are now irrevocably committed to this channel and are ready for the + /// onion to be processed in order to forward or receive the HTLC. pub pending_update_adds: Vec, pub funding_broadcastable: Option, pub channel_ready: Option, pub channel_ready_order: ChannelReadyOrder, pub announcement_sigs: Option, pub tx_signatures: Option, + /// The sources of outbound HTLCs that were forwarded and irrevocably committed on this channel + /// (the outbound edge), along with their outbound amounts. Useful to store in the inbound HTLC + /// to ensure it gets resolved. + pub committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, } /// The return value of `signer_maybe_unblocked` @@ -3042,7 +3058,6 @@ pub(super) struct ChannelContext { // responsible for some of the HTLCs here or not - we don't know whether the update in question // completed or not. We currently ignore these fields entirely when force-closing a channel, // but need to handle this somehow or we run the risk of losing HTLCs! - monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>, monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, monitor_pending_finalized_fulfills: Vec<(HTLCSource, Option)>, monitor_pending_update_adds: Vec, @@ -3746,7 +3761,6 @@ impl ChannelContext { monitor_pending_channel_ready: false, monitor_pending_revoke_and_ack: false, monitor_pending_commitment_signed: false, - monitor_pending_forwards: Vec::new(), monitor_pending_failures: Vec::new(), monitor_pending_finalized_fulfills: Vec::new(), monitor_pending_update_adds: Vec::new(), @@ -3980,7 +3994,6 @@ impl ChannelContext { monitor_pending_channel_ready: false, monitor_pending_revoke_and_ack: false, monitor_pending_commitment_signed: false, - monitor_pending_forwards: Vec::new(), monitor_pending_failures: Vec::new(), monitor_pending_finalized_fulfills: Vec::new(), monitor_pending_update_adds: Vec::new(), @@ -7568,7 +7581,6 @@ where false, Vec::new(), Vec::new(), - Vec::new(), logger, ); UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat } @@ -7828,27 +7840,106 @@ where amount_msat: msg.amount_msat, payment_hash: msg.payment_hash, cltv_expiry: msg.cltv_expiry, - state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Pending { - update_add_htlc: msg.clone(), - }), + state: InboundHTLCState::RemoteAnnounced(msg.clone()) }); Ok(()) } /// Useful for reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. - pub(super) fn get_inbound_committed_update_adds(&self) -> Vec { + pub(super) fn inbound_committed_unresolved_htlcs( + &self, + ) -> Vec<(PaymentHash, InboundUpdateAdd)> { + // We don't want to return an HTLC as needing processing if it already has a resolution that's + // pending in the holding cell. + let htlc_resolution_in_holding_cell = |id: u64| -> bool { + self.context.holding_cell_htlc_updates.iter().any(|holding_cell_htlc| { + match holding_cell_htlc { + HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => *htlc_id == id, + HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => *htlc_id == id, + HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => *htlc_id == id, + HTLCUpdateAwaitingACK::AddHTLC { .. } => false, + } + }) + }; + self.context .pending_inbound_htlcs .iter() - .filter_map(|htlc| match htlc.state { - InboundHTLCState::Committed { ref update_add_htlc_opt } => { - update_add_htlc_opt.clone() + .filter_map(|htlc| match &htlc.state { + InboundHTLCState::Committed { update_add_htlc } => { + if htlc_resolution_in_holding_cell(htlc.htlc_id) { + return None; + } + Some((htlc.payment_hash, update_add_htlc.clone())) }, _ => None, }) .collect() } + /// Useful when reconstructing the set of pending HTLC forwards when deserializing the + /// `ChannelManager`. We don't want to cache an HTLC as needing to be forwarded if it's already + /// present in the outbound edge, or else we'll double-forward. + pub(super) fn outbound_htlc_forwards( + &self, + ) -> impl Iterator + '_ { + let holding_cell_outbounds = + self.context.holding_cell_htlc_updates.iter().filter_map(|htlc| match htlc { + HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => match source { + HTLCSource::PreviousHopData(prev_hop_data) => { + Some((*payment_hash, prev_hop_data.clone())) + }, + _ => None, + }, + _ => None, + }); + let committed_outbounds = + self.context.pending_outbound_htlcs.iter().filter_map(|htlc| match &htlc.source { + HTLCSource::PreviousHopData(prev_hop_data) => { + Some((htlc.payment_hash, prev_hop_data.clone())) + }, + _ => None, + }); + holding_cell_outbounds.chain(committed_outbounds) + } + + #[cfg(test)] + pub(super) fn test_holding_cell_outbound_htlc_forwards_count(&self) -> usize { + self.context + .holding_cell_htlc_updates + .iter() + .filter_map(|htlc| match htlc { + HTLCUpdateAwaitingACK::AddHTLC { source, .. } => match source { + HTLCSource::PreviousHopData(prev_hop_data) => Some(prev_hop_data.clone()), + _ => None, + }, + _ => None, + }) + .count() + } + + /// This inbound HTLC was irrevocably forwarded to the outbound edge, so we no longer need to + /// persist its onion. + pub(super) fn prune_inbound_htlc_onion( + &mut self, htlc_id: u64, hop_data: HTLCPreviousHopData, outbound_amt_msat: u64, + ) { + for htlc in self.context.pending_inbound_htlcs.iter_mut() { + if htlc.htlc_id == htlc_id { + if let InboundHTLCState::Committed { ref mut update_add_htlc } = htlc.state { + *update_add_htlc = InboundUpdateAdd::Forwarded { hop_data, outbound_amt_msat }; + return; + } + } + } + debug_assert!(false, "If we go to prune an inbound HTLC it should be present") + } + + /// Useful for testing crash scenarios where the holding cell is not persisted. + #[cfg(test)] + pub(super) fn test_clear_holding_cell(&mut self) { + self.context.holding_cell_htlc_updates.clear() + } + /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed #[inline] fn mark_outbound_htlc_removed( @@ -7980,15 +8071,7 @@ where &self.context.channel_id() ); - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), logger); self.context .interactive_tx_signing_session .as_mut() @@ -8092,15 +8175,7 @@ where .as_mut() .expect("Signing session must exist for negotiated pending splice") .received_commitment_signed(); - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), logger); self.context.monitor_pending_tx_signatures = true; Ok(self.push_ret_blockable_mon_update(monitor_update)) @@ -8295,11 +8370,10 @@ where } for htlc in self.context.pending_inbound_htlcs.iter_mut() { - if let &InboundHTLCState::RemoteAnnounced(ref htlc_resolution) = &htlc.state { + if let &InboundHTLCState::RemoteAnnounced(ref update_add) = &htlc.state { log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.", &htlc.payment_hash, &self.context.channel_id); - htlc.state = - InboundHTLCState::AwaitingRemoteRevokeToAnnounce(htlc_resolution.clone()); + htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(update_add.clone()); need_commitment = true; } } @@ -8401,7 +8475,6 @@ where false, Vec::new(), Vec::new(), - Vec::new(), logger, ); return Ok(self.push_ret_blockable_mon_update(monitor_update)); @@ -8601,15 +8674,7 @@ where if update_fee.is_some() { "a fee update, " } else { "" }, update_add_count, update_fulfill_count, update_fail_count); - self.monitor_updating_paused( - false, - true, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), logger); (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail) } else { (None, Vec::new()) @@ -8740,12 +8805,9 @@ where } log_trace!(logger, "Updating HTLCs on receipt of RAA..."); - let mut to_forward_infos = Vec::new(); let mut pending_update_adds = Vec::new(); let mut revoked_htlcs = Vec::new(); let mut finalized_claimed_htlcs = Vec::new(); - let mut update_fail_htlcs = Vec::new(); - let mut update_fail_malformed_htlcs = Vec::new(); let mut static_invoices = Vec::new(); let mut require_commitment = false; let mut value_to_self_msat_diff: i64 = 0; @@ -8809,62 +8871,29 @@ where false }; if swap { - let mut state = InboundHTLCState::Committed { update_add_htlc_opt: None }; + let mut state = + InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed { + sha256_of_onion: [0; 32], + failure_code: 0, + }); mem::swap(&mut state, &mut htlc.state); - if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) = state { + if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(update_add) = state { log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash); - htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution); + htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add); require_commitment = true; - } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) = + } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add_htlc) = state { - match resolution { - InboundHTLCResolution::Resolved { pending_htlc_status } => { - match pending_htlc_status { - PendingHTLCStatus::Fail(fail_msg) => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash); - require_commitment = true; - match fail_msg { - HTLCFailureMsg::Relay(msg) => { - htlc.state = InboundHTLCState::LocalRemoved( - InboundHTLCRemovalReason::FailRelay( - msg.clone().into(), - ), - ); - update_fail_htlcs.push(msg) - }, - HTLCFailureMsg::Malformed(msg) => { - htlc.state = InboundHTLCState::LocalRemoved( - InboundHTLCRemovalReason::FailMalformed { - sha256_of_onion: msg.sha256_of_onion, - failure_code: msg.failure_code, - }, - ); - update_fail_malformed_htlcs.push(msg) - }, - } - }, - PendingHTLCStatus::Forward(forward_info) => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed, attempting to forward", &htlc.payment_hash); - to_forward_infos.push((forward_info, htlc.htlc_id)); - htlc.state = InboundHTLCState::Committed { - // HTLCs will only be in state `InboundHTLCResolution::Resolved` if they were - // received on an old pre-0.0.123 version of LDK. In this case, the HTLC is - // required to be resolved prior to upgrading to 0.1+ per CHANGELOG.md. - update_add_htlc_opt: None, - }; - }, - } - }, - InboundHTLCResolution::Pending { update_add_htlc } => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash); - pending_update_adds.push(update_add_htlc.clone()); - htlc.state = InboundHTLCState::Committed { - update_add_htlc_opt: Some(update_add_htlc), - }; - }, - } + log_trace!( + logger, + " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", + &htlc.payment_hash + ); + pending_update_adds.push(update_add_htlc.clone()); + htlc.state = InboundHTLCState::Committed { + update_add_htlc: InboundUpdateAdd::WithOnion { update_add_htlc }, + }; } } } @@ -8980,7 +9009,6 @@ where false, true, false, - to_forward_infos, revoked_htlcs, finalized_claimed_htlcs, logger, @@ -9007,9 +9035,11 @@ where release_state_str ); if self.context.channel_state.can_generate_new_commitment() { - log_debug!(logger, "Responding with a commitment update with {} HTLCs failed for channel {}", - update_fail_htlcs.len() + update_fail_malformed_htlcs.len(), - &self.context.channel_id); + log_debug!( + logger, + "Responding with a commitment update for channel {}", + &self.context.channel_id + ); } else { let reason = if self.context.channel_state.is_local_stfu_sent() { "exits quiescence" @@ -9025,7 +9055,6 @@ where false, true, false, - to_forward_infos, revoked_htlcs, finalized_claimed_htlcs, logger, @@ -9039,7 +9068,6 @@ where false, false, false, - to_forward_infos, revoked_htlcs, finalized_claimed_htlcs, logger, @@ -9344,7 +9372,6 @@ where /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress fn monitor_updating_paused( &mut self, resend_raa: bool, resend_commitment: bool, resend_channel_ready: bool, - pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, pending_finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, logger: &L, ) { @@ -9353,7 +9380,6 @@ where self.context.monitor_pending_revoke_and_ack |= resend_raa; self.context.monitor_pending_commitment_signed |= resend_commitment; self.context.monitor_pending_channel_ready |= resend_channel_ready; - self.context.monitor_pending_forwards.extend(pending_forwards); self.context.monitor_pending_failures.extend(pending_fails); self.context.monitor_pending_finalized_fulfills.extend(pending_finalized_claimed_htlcs); self.context.channel_state.set_monitor_update_in_progress(); @@ -9440,23 +9466,29 @@ where let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger); - let mut accepted_htlcs = Vec::new(); - mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards); let mut failed_htlcs = Vec::new(); mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures); let mut finalized_claimed_htlcs = Vec::new(); mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills); let mut pending_update_adds = Vec::new(); mem::swap(&mut pending_update_adds, &mut self.context.monitor_pending_update_adds); + let committed_outbound_htlc_sources = self.context.pending_outbound_htlcs.iter().filter_map(|htlc| { + if let &OutboundHTLCState::LocalAnnounced(_) = &htlc.state { + if let HTLCSource::PreviousHopData(prev_hop_data) = &htlc.source { + return Some((prev_hop_data.clone(), htlc.amount_msat)) + } + } + None + }).collect(); if self.context.channel_state.is_peer_disconnected() { self.context.monitor_pending_revoke_and_ack = false; self.context.monitor_pending_commitment_signed = false; return MonitorRestoreUpdates { raa: None, commitment_update: None, commitment_order: RAACommitmentOrder::RevokeAndACKFirst, - accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, pending_update_adds, - funding_broadcastable, channel_ready, announcement_sigs, tx_signatures: None, - channel_ready_order, + failed_htlcs, finalized_claimed_htlcs, pending_update_adds, funding_broadcastable, + channel_ready, announcement_sigs, tx_signatures: None, channel_ready_order, + committed_outbound_htlc_sources }; } @@ -9485,9 +9517,9 @@ where if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, match commitment_order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); MonitorRestoreUpdates { - raa, commitment_update, commitment_order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, + raa, commitment_update, commitment_order, failed_htlcs, finalized_claimed_htlcs, pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs, tx_signatures, - channel_ready_order, + channel_ready_order, committed_outbound_htlc_sources } } @@ -10561,15 +10593,7 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), logger); self.push_ret_blockable_mon_update(monitor_update) } else { None @@ -11310,15 +11334,7 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), logger); let monitor_update = self.push_ret_blockable_mon_update(monitor_update); let announcement_sigs = @@ -12763,10 +12779,10 @@ where // is acceptable. for htlc in self.context.pending_inbound_htlcs.iter_mut() { let new_state = - if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = + if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref update_add) = &htlc.state { - Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone())) + Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add.clone())) } else { None }; @@ -12987,15 +13003,7 @@ where let can_add_htlc = send_res.map_err(|(_, msg)| ChannelError::Ignore(msg))?; if can_add_htlc { let monitor_update = self.build_commitment_no_status_check(logger); - self.monitor_updating_paused( - false, - true, - false, - Vec::new(), - Vec::new(), - Vec::new(), - logger, - ); + self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), logger); Ok(self.push_ret_blockable_mon_update(monitor_update)) } else { Ok(None) @@ -13115,15 +13123,7 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused( - false, - false, - false, - Vec::new(), - Vec::new(), - Vec::new(), - &&logger, - ); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), &&logger); self.push_ret_blockable_mon_update(monitor_update) } else { None @@ -13732,7 +13732,6 @@ impl OutboundV1Channel { need_channel_ready, Vec::new(), Vec::new(), - Vec::new(), logger, ); Ok((channel, channel_monitor)) @@ -14031,7 +14030,6 @@ impl InboundV1Channel { need_channel_ready, Vec::new(), Vec::new(), - Vec::new(), logger, ); @@ -14476,6 +14474,41 @@ impl Readable for AnnouncementSigsState { } } +/// Represents the resolution status of an inbound HTLC. Previously we had multiple variants here, +/// now we only use it for backwards compatibility when (de)serializing [`InboundHTLCState`]s. +enum WriteableLegacyHTLCResolution<'a> { + Pending { update_add_htlc: &'a msgs::UpdateAddHTLC }, +} + +// We can't use `impl_writeable_tlv_based_enum` due to the lifetime. +impl<'a> Writeable for WriteableLegacyHTLCResolution<'a> { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + match self { + Self::Pending { update_add_htlc } => { + 2u8.write(writer)?; + crate::_encode_varint_length_prefixed_tlv!(writer, { + (0, update_add_htlc, required) + }); + }, + } + + Ok(()) + } +} + +/// Represents the resolution status of an inbound HTLC. Previously we had multiple variants here, +/// now we only use it for backwards compatibility when (de)serializing [`InboundHTLCState`]s. +enum ReadableLegacyHTLCResolution { + Pending { update_add_htlc: msgs::UpdateAddHTLC }, +} + +impl_writeable_tlv_based_enum!(ReadableLegacyHTLCResolution, + // 0 used to be used for the ::Resolved variant in 0.2 and below. + (2, Pending) => { + (0, update_add_htlc, required), + }, +); + impl Writeable for FundedChannel { fn write(&self, writer: &mut W) -> Result<(), io::Error> { // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been @@ -14548,7 +14581,7 @@ impl Writeable for FundedChannel { } } let mut removed_htlc_attribution_data: Vec<&Option> = Vec::new(); - let mut inbound_committed_update_adds: Vec<&Option> = Vec::new(); + let mut inbound_committed_update_adds: Vec<&InboundUpdateAdd> = Vec::new(); (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?; for htlc in self.context.pending_inbound_htlcs.iter() { if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state { @@ -14560,17 +14593,17 @@ impl Writeable for FundedChannel { htlc.payment_hash.write(writer)?; match &htlc.state { &InboundHTLCState::RemoteAnnounced(_) => unreachable!(), - &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution) => { + &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref update_add_htlc) => { 1u8.write(writer)?; - htlc_resolution.write(writer)?; + WriteableLegacyHTLCResolution::Pending { update_add_htlc }.write(writer)?; }, - &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => { + &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref update_add_htlc) => { 2u8.write(writer)?; - htlc_resolution.write(writer)?; + WriteableLegacyHTLCResolution::Pending { update_add_htlc }.write(writer)?; }, - &InboundHTLCState::Committed { ref update_add_htlc_opt } => { + &InboundHTLCState::Committed { ref update_add_htlc } => { 3u8.write(writer)?; - inbound_committed_update_adds.push(update_add_htlc_opt); + inbound_committed_update_adds.push(update_add_htlc); }, &InboundHTLCState::LocalRemoved(ref removal_reason) => { 4u8.write(writer)?; @@ -14742,11 +14775,8 @@ impl Writeable for FundedChannel { self.context.monitor_pending_revoke_and_ack.write(writer)?; self.context.monitor_pending_commitment_signed.write(writer)?; - (self.context.monitor_pending_forwards.len() as u64).write(writer)?; - for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() { - pending_forward.write(writer)?; - htlc_id.write(writer)?; - } + // Previously used for monitor_pending_forwards prior to LDK 0.5. + 0u64.write(writer)?; (self.context.monitor_pending_failures.len() as u64).write(writer)?; for &(ref htlc_source, ref payment_hash, ref fail_reason) in @@ -14965,13 +14995,30 @@ impl Writeable for FundedChannel { } } -impl<'a, 'b, 'c, ES: EntropySource, SP: SignerProvider> - ReadableArgs<(&'a ES, &'b SP, &'c ChannelTypeFeatures)> for FundedChannel +fn dummy_prev_hop_data() -> HTLCPreviousHopData { + let txid_hash = Sha256d::from_bytes_ref(&[0; 32]); + HTLCPreviousHopData { + prev_outbound_scid_alias: 0, + user_channel_id: None, + htlc_id: 0, + incoming_packet_shared_secret: [0; 32], + phantom_shared_secret: None, + trampoline_shared_secret: None, + blinded_failure: None, + channel_id: ChannelId([0; 32]), + outpoint: OutPoint { txid: Txid::from_raw_hash(*txid_hash), index: 0 }, + counterparty_node_id: None, + cltv_expiry: None, + } +} + +impl<'a, 'b, 'c, 'd, ES: EntropySource, SP: SignerProvider, L: Logger> + ReadableArgs<(&'a ES, &'b SP, &'c L, &'d ChannelTypeFeatures)> for FundedChannel { fn read( - reader: &mut R, args: (&'a ES, &'b SP, &'c ChannelTypeFeatures), + reader: &mut R, args: (&'a ES, &'b SP, &'c L, &'d ChannelTypeFeatures), ) -> Result { - let (entropy_source, signer_provider, our_supported_features) = args; + let (entropy_source, signer_provider, logger, our_supported_features) = args; let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); if ver <= 2 { return Err(DecodeError::UnknownVersion); @@ -15006,8 +15053,9 @@ impl<'a, 'b, 'c, ES: EntropySource, SP: SignerProvider> let counterparty_next_commitment_transaction_number = Readable::read(reader)?; let value_to_self_msat = Readable::read(reader)?; - let pending_inbound_htlc_count: u64 = Readable::read(reader)?; + let logger = WithContext::from(logger, None, Some(channel_id), None); + let pending_inbound_htlc_count: u64 = Readable::read(reader)?; let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min( pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize, @@ -15020,26 +15068,25 @@ impl<'a, 'b, 'c, ES: EntropySource, SP: SignerProvider> payment_hash: Readable::read(reader)?, state: match ::read(reader)? { 1 => { - let resolution = if ver <= 3 { - InboundHTLCResolution::Resolved { - pending_htlc_status: Readable::read(reader)?, - } - } else { - Readable::read(reader)? - }; - InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) + let ReadableLegacyHTLCResolution::Pending { update_add_htlc } = + Readable::read(reader).map_err(|e| { + log_error!(logger, "Found deprecated HTLC received on LDK 0.1 or earlier. HTLC must be resolved before upgrading to LDK 0.5+, see CHANGELOG.md"); + e + })?; + InboundHTLCState::AwaitingRemoteRevokeToAnnounce(update_add_htlc) }, 2 => { - let resolution = if ver <= 3 { - InboundHTLCResolution::Resolved { - pending_htlc_status: Readable::read(reader)?, - } - } else { - Readable::read(reader)? - }; - InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) + let ReadableLegacyHTLCResolution::Pending { update_add_htlc } = + Readable::read(reader).map_err(|e| { + log_error!(logger, "Found deprecated HTLC received on LDK 0.1 or earlier. HTLC must be resolved before upgrading to LDK 0.5+, see CHANGELOG.md"); + e + })?; + InboundHTLCState::AwaitingAnnouncedRemoteRevoke(update_add_htlc) }, - 3 => InboundHTLCState::Committed { update_add_htlc_opt: None }, + 3 => InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Forwarded { + hop_data: dummy_prev_hop_data(), + outbound_amt_msat: 0, + }}, 4 => { let reason = match ::read(reader)? { 0 => InboundHTLCRemovalReason::FailRelay(msgs::OnionErrorPacket { @@ -15171,13 +15218,10 @@ impl<'a, 'b, 'c, ES: EntropySource, SP: SignerProvider> let monitor_pending_revoke_and_ack = Readable::read(reader)?; let monitor_pending_commitment_signed = Readable::read(reader)?; - let monitor_pending_forwards_count: u64 = Readable::read(reader)?; - let mut monitor_pending_forwards = Vec::with_capacity(cmp::min( - monitor_pending_forwards_count as usize, - DEFAULT_MAX_HTLCS as usize, - )); - for _ in 0..monitor_pending_forwards_count { - monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?)); + let monitor_pending_forwards_count_legacy: u64 = Readable::read(reader)?; + if monitor_pending_forwards_count_legacy != 0 { + log_error!(logger, "Found deprecated HTLC received on LDK 0.1 or earlier. HTLC must be resolved before upgrading to LDK 0.5+, see CHANGELOG.md"); + return Err(DecodeError::InvalidValue); } let monitor_pending_failures_count: u64 = Readable::read(reader)?; @@ -15345,7 +15389,7 @@ impl<'a, 'b, 'c, ES: EntropySource, SP: SignerProvider> let mut pending_outbound_held_htlc_flags_opt: Option>> = None; let mut holding_cell_held_htlc_flags_opt: Option>> = None; - let mut inbound_committed_update_adds_opt: Option>> = None; + let mut inbound_committed_update_adds_opt: Option> = None; let mut holding_cell_accountable: Option> = None; let mut pending_outbound_accountable: Option> = None; @@ -15529,8 +15573,8 @@ impl<'a, 'b, 'c, ES: EntropySource, SP: SignerProvider> if let Some(update_adds) = inbound_committed_update_adds_opt { let mut iter = update_adds.into_iter(); for htlc in pending_inbound_htlcs.iter_mut() { - if let InboundHTLCState::Committed { ref mut update_add_htlc_opt } = htlc.state { - *update_add_htlc_opt = iter.next().ok_or(DecodeError::InvalidValue)?; + if let InboundHTLCState::Committed { ref mut update_add_htlc } = htlc.state { + *update_add_htlc = iter.next().ok_or(DecodeError::InvalidValue)?; } } if iter.next().is_some() { @@ -15781,7 +15825,6 @@ impl<'a, 'b, 'c, ES: EntropySource, SP: SignerProvider> monitor_pending_channel_ready, monitor_pending_revoke_and_ack, monitor_pending_commitment_signed, - monitor_pending_forwards, monitor_pending_failures, monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(), monitor_pending_update_adds: monitor_pending_update_adds.unwrap_or_default(), @@ -15897,9 +15940,10 @@ mod tests { use crate::chain::BestBlock; use crate::ln::chan_utils::{self, commit_tx_fee_sat, ChannelTransactionParameters}; use crate::ln::channel::{ - AwaitingChannelReadyFlags, ChannelState, FundedChannel, HTLCCandidate, HTLCInitiator, - HTLCUpdateAwaitingACK, InboundHTLCOutput, InboundHTLCState, InboundV1Channel, - OutboundHTLCOutput, OutboundHTLCState, OutboundV1Channel, + dummy_prev_hop_data, AwaitingChannelReadyFlags, ChannelState, FundedChannel, HTLCCandidate, + HTLCInitiator, HTLCUpdateAwaitingACK, InboundHTLCOutput, InboundHTLCState, + InboundUpdateAdd, InboundV1Channel, OutboundHTLCOutput, OutboundHTLCState, + OutboundV1Channel, }; use crate::ln::channel::{ MAX_FUNDING_SATOSHIS_NO_WUMBO, MIN_THEIR_CHAN_RESERVE_SATOSHIS, @@ -15942,6 +15986,10 @@ mod tests { use bitcoin::{ScriptBuf, WPubkeyHash, WitnessProgram, WitnessVersion}; use std::cmp; + fn dummy_inbound_update_add() -> InboundUpdateAdd { + InboundUpdateAdd::Forwarded { hop_data: dummy_prev_hop_data(), outbound_amt_msat: 0 } + } + #[test] #[rustfmt::skip] fn test_channel_state_order() { @@ -16144,7 +16192,7 @@ mod tests { amount_msat: htlc_amount_msat, payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()), cltv_expiry: 300000000, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput { @@ -16715,9 +16763,11 @@ mod tests { let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64); let features = channelmanager::provided_channel_type_features(&config); - let decoded_chan = - FundedChannel::read(&mut reader, (&&keys_provider, &&keys_provider, &features)) - .unwrap(); + let decoded_chan = FundedChannel::read( + &mut reader, + (&&keys_provider, &&keys_provider, &&logger, &features), + ) + .unwrap(); assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs); assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates); } @@ -16993,7 +17043,7 @@ mod tests { amount_msat: 1000000, cltv_expiry: 500, payment_hash: PaymentHash::from(payment_preimage_0), - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); let payment_preimage_1 = @@ -17003,7 +17053,7 @@ mod tests { amount_msat: 2000000, cltv_expiry: 501, payment_hash: PaymentHash::from(payment_preimage_1), - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); let payment_preimage_2 = @@ -17045,7 +17095,7 @@ mod tests { amount_msat: 4000000, cltv_expiry: 504, payment_hash: PaymentHash::from(payment_preimage_4), - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); // commitment tx with all five HTLCs untrimmed (minimum feerate) @@ -17434,7 +17484,7 @@ mod tests { amount_msat: 2000000, cltv_expiry: 501, payment_hash: PaymentHash::from(payment_preimage_1), - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); chan.context.pending_outbound_htlcs.clear(); @@ -17687,7 +17737,7 @@ mod tests { amount_msat: 5000000, cltv_expiry: 920150, payment_hash: PaymentHash::from(htlc_in_preimage), - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, })); chan.context.pending_outbound_htlcs.extend( @@ -17751,7 +17801,7 @@ mod tests { amount_msat, cltv_expiry: 920150, payment_hash: PaymentHash::from(htlc_in_preimage), - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }, )); @@ -17818,7 +17868,7 @@ mod tests { amount_msat: 100000, cltv_expiry: 920125, payment_hash: htlc_0_in_hash, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); let htlc_1_in_preimage = @@ -17836,7 +17886,7 @@ mod tests { amount_msat: 49900000, cltv_expiry: 920125, payment_hash: htlc_1_in_hash, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); chan.context.pending_outbound_htlcs.extend( @@ -17889,7 +17939,7 @@ mod tests { amount_msat: 30000, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }, )); @@ -17931,7 +17981,7 @@ mod tests { amount_msat: 29525, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }, )); @@ -17969,7 +18019,7 @@ mod tests { amount_msat: 29525, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }, )); @@ -18007,7 +18057,7 @@ mod tests { amount_msat: 29753, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }, )); @@ -18060,7 +18110,7 @@ mod tests { amount_msat, cltv_expiry, payment_hash, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }), ); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 13197ea44ec..e253a860e44 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -59,9 +59,9 @@ use crate::ln::chan_utils::selected_commitment_sat_per_1000_weight; use crate::ln::channel::QuiescentAction; use crate::ln::channel::{ self, hold_time_since, Channel, ChannelError, ChannelUpdateStatus, DisconnectResult, - FundedChannel, FundingTxSigned, InboundV1Channel, OutboundV1Channel, PendingV2Channel, - ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, UpdateFulfillCommitFetch, - WithChannelContext, + FundedChannel, FundingTxSigned, InboundUpdateAdd, InboundV1Channel, OutboundV1Channel, + PendingV2Channel, ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, + UpdateFulfillCommitFetch, WithChannelContext, }; use crate::ln::channel_state::ChannelDetails; use crate::ln::funding::SpliceContribution; @@ -188,23 +188,6 @@ use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use core::time::Duration; use core::{cmp, mem}; -// We hold various information about HTLC relay in the HTLC objects in Channel itself: -// -// Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should -// forward the HTLC with information it will give back to us when it does so, or if it should Fail -// the HTLC with the relevant message for the Channel to handle giving to the remote peer. -// -// Once said HTLC is committed in the Channel, if the PendingHTLCStatus indicated Forward, the -// Channel will return the PendingHTLCInfo back to us, and we will create an HTLCForwardInfo -// with it to track where it came from (in case of onwards-forward error), waiting a random delay -// before we forward it. -// -// We will then use HTLCForwardInfo's PendingHTLCInfo to construct an outbound HTLC, with a -// relevant HTLCSource::PreviousHopData filled in to indicate where it came from (which we can use -// to either fail-backwards or fulfill the HTLC backwards along the relevant path). -// Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is -// our payment, which we can use to decode errors or inform the user that the payment was sent. - /// Information about where a received HTLC('s onion) has indicated the HTLC should go. #[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug #[cfg_attr(test, derive(Debug, PartialEq))] @@ -437,14 +420,6 @@ pub(super) enum HTLCFailureMsg { Malformed(msgs::UpdateFailMalformedHTLC), } -/// Stores whether we can't forward an HTLC or relevant forwarding info -#[cfg_attr(test, derive(Debug))] -#[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug -pub(super) enum PendingHTLCStatus { - Forward(PendingHTLCInfo), - Fail(HTLCFailureMsg), -} - #[cfg_attr(test, derive(Clone, Debug, PartialEq))] pub(super) struct PendingAddHTLCInfo { pub(super) forward_info: PendingHTLCInfo, @@ -1404,10 +1379,10 @@ enum PostMonitorUpdateChanResume { counterparty_node_id: PublicKey, unbroadcasted_batch_funding_txid: Option, update_actions: Vec, - htlc_forwards: Option, decode_update_add_htlcs: Option<(u64, Vec)>, finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, + committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, }, } @@ -9582,10 +9557,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ &self, channel_id: ChannelId, counterparty_node_id: PublicKey, unbroadcasted_batch_funding_txid: Option, update_actions: Vec, - htlc_forwards: Option, decode_update_add_htlcs: Option<(u64, Vec)>, finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, + committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, ) { // If the channel belongs to a batch funding transaction, the progress of the batch // should be updated as we have received funding_signed and persisted the monitor. @@ -9642,9 +9617,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ self.handle_monitor_update_completion_actions(update_actions); - if let Some(forwards) = htlc_forwards { - self.forward_htlcs(&mut [forwards][..]); - } if let Some(decode) = decode_update_add_htlcs { self.push_decode_update_add_htlcs(decode); } @@ -9656,6 +9628,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }; self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver, None); } + self.prune_persisted_inbound_htlc_onions(committed_outbound_htlc_sources); } fn handle_monitor_update_completion_actions< @@ -10099,13 +10072,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ None }; - let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption( + let decode_update_add_htlcs = self.handle_channel_resumption( pending_msg_events, chan, updates.raa, updates.commitment_update, updates.commitment_order, - updates.accepted_htlcs, updates.pending_update_adds, updates.funding_broadcastable, updates.channel_ready, @@ -10126,14 +10098,75 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ counterparty_node_id, unbroadcasted_batch_funding_txid, update_actions, - htlc_forwards, decode_update_add_htlcs, finalized_claimed_htlcs: updates.finalized_claimed_htlcs, failed_htlcs: updates.failed_htlcs, + committed_outbound_htlc_sources: updates.committed_outbound_htlc_sources, + } + } + } + + /// We store inbound committed HTLCs' onions in `Channel`s for use in reconstructing the pending + /// HTLC set on `ChannelManager` read. If an HTLC has been irrevocably forwarded to the outbound + /// edge, we no longer need to persist the inbound edge's onion and can prune it here. + fn prune_persisted_inbound_htlc_onions( + &self, committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, + ) { + let per_peer_state = self.per_peer_state.read().unwrap(); + for (source, outbound_amt_msat) in committed_outbound_htlc_sources { + let counterparty_node_id = match source.counterparty_node_id.as_ref() { + Some(id) => id, + None => continue, + }; + let mut peer_state = + match per_peer_state.get(counterparty_node_id).map(|state| state.lock().unwrap()) { + Some(peer_state) => peer_state, + None => continue, + }; + + if let Some(chan) = + peer_state.channel_by_id.get_mut(&source.channel_id).and_then(|c| c.as_funded_mut()) + { + chan.prune_inbound_htlc_onion(source.htlc_id, source, outbound_amt_msat); } } } + #[cfg(test)] + pub(crate) fn test_holding_cell_outbound_htlc_forwards_count( + &self, cp_id: PublicKey, chan_id: ChannelId, + ) -> usize { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state = per_peer_state.get(&cp_id).map(|state| state.lock().unwrap()).unwrap(); + let chan = peer_state.channel_by_id.get(&chan_id).and_then(|c| c.as_funded()).unwrap(); + chan.test_holding_cell_outbound_htlc_forwards_count() + } + + #[cfg(test)] + /// Useful to check that we prune inbound HTLC onions once they are irrevocably forwarded to the + /// outbound edge, see [`Self::prune_persisted_inbound_htlc_onions`]. + pub(crate) fn test_get_inbound_committed_htlcs_with_onion( + &self, cp_id: PublicKey, chan_id: ChannelId, + ) -> usize { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state = per_peer_state.get(&cp_id).map(|state| state.lock().unwrap()).unwrap(); + let chan = peer_state.channel_by_id.get(&chan_id).and_then(|c| c.as_funded()).unwrap(); + chan.inbound_committed_unresolved_htlcs() + .iter() + .filter(|(_, htlc)| matches!(htlc, InboundUpdateAdd::WithOnion { .. })) + .count() + } + + #[cfg(test)] + /// Useful for testing crash scenarios where the holding cell of a channel is not persisted. + pub(crate) fn test_clear_channel_holding_cell(&self, cp_id: PublicKey, chan_id: ChannelId) { + let per_peer_state = self.per_peer_state.read().unwrap(); + let mut peer_state = per_peer_state.get(&cp_id).map(|state| state.lock().unwrap()).unwrap(); + let chan = + peer_state.channel_by_id.get_mut(&chan_id).and_then(|c| c.as_funded_mut()).unwrap(); + chan.test_clear_holding_cell(); + } + /// Completes channel resumption after locks have been released. /// /// Processes the [`PostMonitorUpdateChanResume`] returned by @@ -10155,20 +10188,20 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ counterparty_node_id, unbroadcasted_batch_funding_txid, update_actions, - htlc_forwards, decode_update_add_htlcs, finalized_claimed_htlcs, failed_htlcs, + committed_outbound_htlc_sources, } => { self.post_monitor_update_unlock( channel_id, counterparty_node_id, unbroadcasted_batch_funding_txid, update_actions, - htlc_forwards, decode_update_add_htlcs, finalized_claimed_htlcs, failed_htlcs, + committed_outbound_htlc_sources, ); }, } @@ -10180,17 +10213,16 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fn handle_channel_resumption(&self, pending_msg_events: &mut Vec, channel: &mut FundedChannel, raa: Option, commitment_update: Option, commitment_order: RAACommitmentOrder, - pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_update_adds: Vec, - funding_broadcastable: Option, + pending_update_adds: Vec, funding_broadcastable: Option, channel_ready: Option, announcement_sigs: Option, tx_signatures: Option, tx_abort: Option, channel_ready_order: ChannelReadyOrder, - ) -> (Option<(u64, PublicKey, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec)>) { + ) -> Option<(u64, Vec)> { let logger = WithChannelContext::from(&self.logger, &channel.context, None); - log_trace!(logger, "Handling channel resumption with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort", + log_trace!(logger, "Handling channel resumption with {} RAA, {} commitment update, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort", if raa.is_some() { "an" } else { "no" }, if commitment_update.is_some() { "a" } else { "no" }, - pending_forwards.len(), pending_update_adds.len(), + pending_update_adds.len(), if funding_broadcastable.is_some() { "" } else { "not " }, if channel_ready.is_some() { "sending" } else { "without" }, if announcement_sigs.is_some() { "sending" } else { "without" }, @@ -10201,14 +10233,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let counterparty_node_id = channel.context.get_counterparty_node_id(); let outbound_scid_alias = channel.context.outbound_scid_alias(); - let mut htlc_forwards = None; - if !pending_forwards.is_empty() { - htlc_forwards = Some(( - outbound_scid_alias, channel.context.get_counterparty_node_id(), - channel.funding.get_funding_txo().unwrap(), channel.context.channel_id(), - channel.context.get_user_id(), pending_forwards - )); - } let mut decode_update_add_htlcs = None; if !pending_update_adds.is_empty() { decode_update_add_htlcs = Some((outbound_scid_alias, pending_update_adds)); @@ -10295,7 +10319,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }, None => { debug_assert!(false, "Channel resumed without a funding txo, this should never happen!"); - return (htlc_forwards, decode_update_add_htlcs); + return decode_update_add_htlcs; } }; } else { @@ -10313,7 +10337,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ emit_initial_channel_ready_event!(pending_events, channel); } - (htlc_forwards, decode_update_add_htlcs) + decode_update_add_htlcs } #[rustfmt::skip] @@ -12418,12 +12442,11 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } let need_lnd_workaround = chan.context.workaround_lnd_bug_4006.take(); - let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption( + let decode_update_add_htlcs = self.handle_channel_resumption( &mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.commitment_order, - Vec::new(), Vec::new(), None, responses.channel_ready, responses.announcement_sigs, + Vec::new(), None, responses.channel_ready, responses.announcement_sigs, responses.tx_signatures, responses.tx_abort, responses.channel_ready_order, ); - debug_assert!(htlc_forwards.is_none()); debug_assert!(decode_update_add_htlcs.is_none()); if let Some(upd) = channel_update { peer_state.pending_msg_events.push(upd); @@ -16561,9 +16584,21 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures { features } -const SERIALIZATION_VERSION: u8 = 1; +// Bumped to 5 in 0.5 because we stop reading/writing legacy ChannelManager pending HTLC maps and +// instead reconstruct them from `Channel{Monitor}` data as part of removing the requirement to +// regularly persist the `ChannelManager`. +const SERIALIZATION_VERSION: u8 = 5; const MIN_SERIALIZATION_VERSION: u8 = 1; +// LDK 0.5+ reconstructs the set of pending HTLCs from `Channel{Monitor}` data that started +// being written in 0.3, ignoring legacy `ChannelManager` HTLC maps on read and not writing them. +// LDK 0.5+ automatically fails to read if the pending HTLC set cannot be reconstructed, i.e. +// if we were last written with pending HTLCs on 0.2- or if the new 0.3+ fields are missing. +// +// If 0.3 or 0.4 reads this manager version, it knows that the legacy maps were not written and +// acts accordingly. +const RECONSTRUCT_HTLCS_FROM_CHANS_VERSION: u8 = 5; + impl_writeable_tlv_based!(PhantomRouteHints, { (2, channels, required_vec), (4, phantom_scid, required), @@ -16699,11 +16734,6 @@ impl Readable for HTLCFailureMsg { } } -impl_writeable_tlv_based_enum_legacy!(PendingHTLCStatus, ; - (0, Forward), - (1, Fail), -); - impl_writeable_tlv_based_enum!(BlindedFailure, (0, FromIntroductionNode) => {}, (2, FromBlindedNode) => {}, @@ -17030,24 +17060,6 @@ impl< } } - { - let forward_htlcs = self.forward_htlcs.lock().unwrap(); - (forward_htlcs.len() as u64).write(writer)?; - for (short_channel_id, pending_forwards) in forward_htlcs.iter() { - short_channel_id.write(writer)?; - (pending_forwards.len() as u64).write(writer)?; - for forward in pending_forwards { - forward.write(writer)?; - } - } - } - - let mut decode_update_add_htlcs_opt = None; - let decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap(); - if !decode_update_add_htlcs.is_empty() { - decode_update_add_htlcs_opt = Some(decode_update_add_htlcs); - } - let claimable_payments = self.claimable_payments.lock().unwrap(); let pending_outbound_payments = self.pending_outbound_payments.pending_outbound_payments.lock().unwrap(); @@ -17094,8 +17106,6 @@ impl< } } - let our_pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); - // Since some FundingNegotiation variants are not persisted, any splice in such state must // be failed upon reload. However, as the necessary information for the SpliceFailed event // is not persisted, the event itself needs to be persisted even though it hasn't been @@ -17191,11 +17201,6 @@ impl< } } - let mut pending_intercepted_htlcs = None; - if our_pending_intercepts.len() != 0 { - pending_intercepted_htlcs = Some(our_pending_intercepts); - } - let mut pending_claiming_payments = Some(&claimable_payments.pending_claiming_payments); if pending_claiming_payments.as_ref().unwrap().is_empty() { // LDK versions prior to 0.0.113 do not know how to read the pending claimed payments @@ -17218,7 +17223,7 @@ impl< write_tlv_fields!(writer, { (1, pending_outbound_payments_no_retry, required), - (2, pending_intercepted_htlcs, option), + // 2 was previously used for the pending_intercepted_htlcs map. (3, pending_outbound_payments, required), (4, pending_claiming_payments, option), (5, self.our_network_pubkey, required), @@ -17229,7 +17234,7 @@ impl< (10, legacy_in_flight_monitor_updates, option), (11, self.probing_cookie_secret, required), (13, htlc_onion_fields, optional_vec), - (14, decode_update_add_htlcs_opt, option), + // 14 was previously used for the decode_update_add_htlcs map. (15, self.inbound_payment_id_secret, required), (17, in_flight_monitor_updates, option), (19, peer_storage_dir, optional_vec), @@ -17309,12 +17314,8 @@ pub(super) struct ChannelManagerData { in_flight_monitor_updates: HashMap<(PublicKey, ChannelId), Vec>, peer_storage_dir: Vec<(PublicKey, Vec)>, async_receive_offer_cache: AsyncReceiveOfferCache, - // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of - // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from - // `Channel{Monitor}` data. - forward_htlcs_legacy: HashMap>, - pending_intercepted_htlcs_legacy: HashMap, - decode_update_add_htlcs_legacy: HashMap>, + // The `ChannelManager` version that was written. + version: u8, } /// Arguments for deserializing [`ChannelManagerData`]. @@ -17338,7 +17339,7 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> fn read( reader: &mut R, args: ChannelManagerDataReadArgs<'a, ES, NS, SP, L>, ) -> Result { - let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); + let version = read_ver_prefix!(reader, SERIALIZATION_VERSION); let chain_hash: ChainHash = Readable::read(reader)?; let best_block_height: u32 = Readable::read(reader)?; @@ -17354,26 +17355,30 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> ( args.entropy_source, args.signer_provider, + args.logger, &provided_channel_type_features(&args.config), ), )?; channels.push(channel); } - let forward_htlcs_count: u64 = Readable::read(reader)?; - let mut forward_htlcs_legacy: HashMap> = - hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); - for _ in 0..forward_htlcs_count { - let short_channel_id = Readable::read(reader)?; - let pending_forwards_count: u64 = Readable::read(reader)?; - let mut pending_forwards = Vec::with_capacity(cmp::min( - pending_forwards_count as usize, - MAX_ALLOC_SIZE / mem::size_of::(), - )); - for _ in 0..pending_forwards_count { - pending_forwards.push(Readable::read(reader)?); + if version < RECONSTRUCT_HTLCS_FROM_CHANS_VERSION { + let forward_htlcs_count: u64 = Readable::read(reader)?; + // This map is written if version = 1 (LDK versions 0.4-) only. + let mut _forward_htlcs_legacy: HashMap> = + hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); + for _ in 0..forward_htlcs_count { + let short_channel_id = Readable::read(reader)?; + let pending_forwards_count: u64 = Readable::read(reader)?; + let mut pending_forwards = Vec::with_capacity(cmp::min( + pending_forwards_count as usize, + MAX_ALLOC_SIZE / mem::size_of::(), + )); + for _ in 0..pending_forwards_count { + pending_forwards.push(Readable::read(reader)?); + } + _forward_htlcs_legacy.insert(short_channel_id, pending_forwards); } - forward_htlcs_legacy.insert(short_channel_id, pending_forwards); } let claimable_htlcs_count: u64 = Readable::read(reader)?; @@ -17460,9 +17465,13 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> }; } - let mut pending_intercepted_htlcs_legacy: Option> = - None; - let mut decode_update_add_htlcs_legacy: Option>> = + // Read for compatibility with 0.4- but no longer used in 0.5+, instead these maps are + // reconstructed during runtime from decode_update_add_htlcs, via the next call to + // process_pending_htlc_forwards. + let mut _pending_intercepted_htlcs_legacy: Option< + HashMap, + > = None; + let mut _decode_update_add_htlcs_legacy: Option>> = None; // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. let mut pending_outbound_payments_no_retry: Option>> = @@ -17490,7 +17499,7 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> let mut async_receive_offer_cache: AsyncReceiveOfferCache = AsyncReceiveOfferCache::new(); read_tlv_fields!(reader, { (1, pending_outbound_payments_no_retry, option), - (2, pending_intercepted_htlcs_legacy, option), + (2, _pending_intercepted_htlcs_legacy, (legacy, HashMap, |_| None)), (3, pending_outbound_payments, option), (4, pending_claiming_payments, option), (5, received_network_pubkey, option), @@ -17501,7 +17510,7 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> (10, legacy_in_flight_monitor_updates, option), (11, probing_cookie_secret, option), (13, claimable_htlc_onion_fields, optional_vec), - (14, decode_update_add_htlcs_legacy, option), + (14, _decode_update_add_htlcs_legacy, (legacy, HashMap>, |_| None)), (15, inbound_payment_id_secret, option), (17, in_flight_monitor_updates, option), (19, peer_storage_dir, optional_vec), @@ -17634,13 +17643,10 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> best_block_height, best_block_hash, channels, - forward_htlcs_legacy, claimable_payments, peer_init_features, pending_events_read, highest_seen_timestamp, - pending_intercepted_htlcs_legacy: pending_intercepted_htlcs_legacy - .unwrap_or_else(new_hash_map), pending_outbound_payments, pending_claiming_payments: pending_claiming_payments.unwrap_or_else(new_hash_map), received_network_pubkey, @@ -17648,12 +17654,11 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> .unwrap_or_else(Vec::new), fake_scid_rand_bytes, probing_cookie_secret, - decode_update_add_htlcs_legacy: decode_update_add_htlcs_legacy - .unwrap_or_else(new_hash_map), inbound_payment_id_secret, in_flight_monitor_updates: in_flight_monitor_updates.unwrap_or_default(), peer_storage_dir: peer_storage_dir.unwrap_or_default(), async_receive_offer_cache, + version, }) } } @@ -17928,23 +17933,21 @@ impl< best_block_height, best_block_hash, channels, - mut forward_htlcs_legacy, claimable_payments, peer_init_features, mut pending_events_read, highest_seen_timestamp, - mut pending_intercepted_htlcs_legacy, pending_outbound_payments, pending_claiming_payments, received_network_pubkey, monitor_update_blocked_actions_per_peer, mut fake_scid_rand_bytes, mut probing_cookie_secret, - mut decode_update_add_htlcs_legacy, mut inbound_payment_id_secret, mut in_flight_monitor_updates, peer_storage_dir, async_receive_offer_cache, + version: _version, } = data; let empty_peer_state = || PeerState { @@ -18228,7 +18231,7 @@ impl< } // Post-deserialization processing - let mut decode_update_add_htlcs = new_hash_map(); + let mut decode_update_add_htlcs: HashMap> = new_hash_map(); if fake_scid_rand_bytes.is_none() { fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes()); } @@ -18490,41 +18493,36 @@ impl< pending_background_events.push(new_event); } - // In LDK 0.2 and below, the `ChannelManager` would track all payments and HTLCs internally and - // persist that state, relying on it being up-to-date on restart. Newer versions are moving - // towards reducing this reliance on regular persistence of the `ChannelManager`, and instead - // reconstruct HTLC/payment state based on `Channel{Monitor}` data if - // `reconstruct_manager_from_monitors` is set below. Currently it is only set in tests, randomly - // to ensure the legacy codepaths also have test coverage. - #[cfg(not(test))] - let reconstruct_manager_from_monitors = false; - #[cfg(test)] - let reconstruct_manager_from_monitors = { - use core::hash::{BuildHasher, Hasher}; - - match std::env::var("LDK_TEST_REBUILD_MGR_FROM_MONITORS") { - Ok(val) => match val.as_str() { - "1" => true, - "0" => false, - _ => panic!("LDK_TEST_REBUILD_MGR_FROM_MONITORS must be 0 or 1, got: {}", val), - }, - Err(_) => { - let rand_val = - std::collections::hash_map::RandomState::new().build_hasher().finish(); - if rand_val % 2 == 0 { - true - } else { - false - } - }, - } - }; - // If there's any preimages for forwarded HTLCs hanging around in ChannelMonitors we // should ensure we try them again on the inbound edge. We put them here and do so after we // have a fully-constructed `ChannelManager` at the end. let mut pending_claims_to_replay = Vec::new(); + // If we find an inbound HTLC that claims to already be forwarded to the outbound edge, we + // store an identifier for it here and verify that it is either (a) present in the outbound + // edge or (b) removed from the outbound edge via claim. If it's in neither of these states, we + // infer that it was removed from the outbound edge via fail, and fail it backwards to ensure + // that it is handled. + let mut already_forwarded_htlcs: HashMap< + (ChannelId, PaymentHash), + Vec<(HTLCPreviousHopData, u64)>, + > = new_hash_map(); + let prune_forwarded_htlc = |already_forwarded_htlcs: &mut HashMap< + (ChannelId, PaymentHash), + Vec<(HTLCPreviousHopData, u64)>, + >, + prev_hop: &HTLCPreviousHopData, + payment_hash: &PaymentHash| { + if let hash_map::Entry::Occupied(mut entry) = + already_forwarded_htlcs.entry((prev_hop.channel_id, *payment_hash)) + { + entry.get_mut().retain(|(htlc, _)| prev_hop.htlc_id != htlc.htlc_id); + if entry.get().is_empty() { + entry.remove(); + } + } + }; + { // If we're tracking pending payments, ensure we haven't lost any by looking at the // ChannelMonitor data for any channels for which we do not have authorative state @@ -18544,19 +18542,28 @@ impl< let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); - if reconstruct_manager_from_monitors { - if let Some(chan) = peer_state.channel_by_id.get(channel_id) { - if let Some(funded_chan) = chan.as_funded() { - let inbound_committed_update_adds = - funded_chan.get_inbound_committed_update_adds(); - if !inbound_committed_update_adds.is_empty() { - // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized - // `Channel`, as part of removing the requirement to regularly persist the - // `ChannelManager`. - decode_update_add_htlcs.insert( - funded_chan.context.outbound_scid_alias(), - inbound_committed_update_adds, - ); + if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + if let Some(funded_chan) = chan.as_funded() { + let scid_alias = funded_chan.context.outbound_scid_alias(); + let inbound_committed_update_adds = + funded_chan.inbound_committed_unresolved_htlcs(); + for (payment_hash, htlc) in inbound_committed_update_adds { + match htlc { + InboundUpdateAdd::WithOnion { update_add_htlc } => { + // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized + // `Channel` as part of removing the requirement to regularly persist the + // `ChannelManager`. + decode_update_add_htlcs + .entry(scid_alias) + .or_insert_with(Vec::new) + .push(update_add_htlc); + }, + InboundUpdateAdd::Forwarded { hop_data, outbound_amt_msat } => { + already_forwarded_htlcs + .entry((hop_data.channel_id, payment_hash)) + .or_insert_with(Vec::new) + .push((hop_data, outbound_amt_msat)); + }, } } } @@ -18600,178 +18607,159 @@ impl< let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); + if !is_channel_closed { + if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + if let Some(funded_chan) = chan.as_funded() { + for (payment_hash, prev_hop) in funded_chan.outbound_htlc_forwards() + { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + &prev_hop, + "HTLC already forwarded to the outbound edge", + &args.logger, + ); + prune_forwarded_htlc( + &mut already_forwarded_htlcs, + &prev_hop, + &payment_hash, + ); + } + } + } + } } - for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() - { - let logger = - WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash)); - let htlc_id = SentHTLCId::from_source(&htlc_source); - match htlc_source { - HTLCSource::PreviousHopData(prev_hop_data) => { - let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { - info.prev_funding_outpoint == prev_hop_data.outpoint - && info.prev_htlc_id == prev_hop_data.htlc_id - }; - // If `reconstruct_manager_from_monitors` is set, we always add all inbound committed - // HTLCs to `decode_update_add_htlcs` in the above loop, but we need to prune from - // those added HTLCs if they were already forwarded to the outbound edge. Otherwise, - // we'll double-forward. - if reconstruct_manager_from_monitors { + if is_channel_closed { + for (htlc_source, (htlc, preimage_opt)) in + monitor.get_all_current_outbound_htlcs() + { + let logger = WithChannelMonitor::from( + &args.logger, + monitor, + Some(htlc.payment_hash), + ); + let htlc_id = SentHTLCId::from_source(&htlc_source); + match htlc_source { + HTLCSource::PreviousHopData(prev_hop_data) => { + // We always add all inbound committed HTLCs to `decode_update_add_htlcs` in the + // above loop, but we need to prune from those added HTLCs if they were already + // forwarded to the outbound edge. Otherwise, we'll double-forward. dedup_decode_update_add_htlcs( &mut decode_update_add_htlcs, &prev_hop_data, "HTLC already forwarded to the outbound edge", &&logger, ); - } - - if !is_channel_closed || reconstruct_manager_from_monitors { - continue; - } - // The ChannelMonitor is now responsible for this HTLC's - // failure/success and will let us know what its outcome is. If we - // still have an entry for this HTLC in `forward_htlcs_legacy`, - // `pending_intercepted_htlcs_legacy`, or - // `decode_update_add_htlcs_legacy`, we were apparently not persisted - // after the monitor was when forwarding the payment. - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs_legacy, - &prev_hop_data, - "HTLC was forwarded to the closed channel", - &&logger, - ); - forward_htlcs_legacy.retain(|_, forwards| { - forwards.retain(|forward| { - if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - false - } else { true } - } else { true } - }); - !forwards.is_empty() - }); - pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - pending_events_read.retain(|(event, _)| { - if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { - intercepted_id != ev_id - } else { true } - }); - false - } else { true } - }); - }, - HTLCSource::OutboundRoute { - payment_id, - session_priv, - path, - bolt12_invoice, - .. - } => { - if !is_channel_closed { - continue; - } - if let Some(preimage) = preimage_opt { - let pending_events = Mutex::new(pending_events_read); - let update = PaymentCompleteUpdate { - counterparty_node_id: monitor.get_counterparty_node_id(), - channel_funding_outpoint: monitor.get_funding_txo(), - channel_id: monitor.channel_id(), - htlc_id, - }; - let mut compl_action = Some( - EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(update) + prune_forwarded_htlc( + &mut already_forwarded_htlcs, + &prev_hop_data, + &htlc.payment_hash, ); - pending_outbounds.claim_htlc( - payment_id, - preimage, - bolt12_invoice, - session_priv, - path, - true, - &mut compl_action, - &pending_events, - &logger, + }, + HTLCSource::OutboundRoute { + payment_id, + session_priv, + path, + bolt12_invoice, + .. + } => { + if let Some(preimage) = preimage_opt { + let pending_events = Mutex::new(pending_events_read); + let update = PaymentCompleteUpdate { + counterparty_node_id: monitor.get_counterparty_node_id(), + channel_funding_outpoint: monitor.get_funding_txo(), + channel_id: monitor.channel_id(), + htlc_id, + }; + let mut compl_action = Some( + EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(update) ); - // If the completion action was not consumed, then there was no - // payment to claim, and we need to tell the `ChannelMonitor` - // we don't need to hear about the HTLC again, at least as long - // as the PaymentSent event isn't still sitting around in our - // event queue. - let have_action = if compl_action.is_some() { - let pending_events = pending_events.lock().unwrap(); - pending_events.iter().any(|(_, act)| *act == compl_action) - } else { - false - }; - if !have_action && compl_action.is_some() { - let mut peer_state = per_peer_state + pending_outbounds.claim_htlc( + payment_id, + preimage, + bolt12_invoice, + session_priv, + path, + true, + &mut compl_action, + &pending_events, + &logger, + ); + // If the completion action was not consumed, then there was no + // payment to claim, and we need to tell the `ChannelMonitor` + // we don't need to hear about the HTLC again, at least as long + // as the PaymentSent event isn't still sitting around in our + // event queue. + let have_action = if compl_action.is_some() { + let pending_events = pending_events.lock().unwrap(); + pending_events.iter().any(|(_, act)| *act == compl_action) + } else { + false + }; + if !have_action && compl_action.is_some() { + let mut peer_state = per_peer_state .get(&counterparty_node_id) .map(|state| state.lock().unwrap()) .expect( "Channels originating a preimage must have peer state", ); - let update_id = peer_state + let update_id = peer_state .closed_channel_monitor_update_ids .get_mut(channel_id) .expect( "Channels originating a preimage must have a monitor", ); - // Note that for channels closed pre-0.1, the latest - // update_id is `u64::MAX`. - *update_id = update_id.saturating_add(1); - - pending_background_events.push( - BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id: monitor - .get_counterparty_node_id(), - funding_txo: monitor.get_funding_txo(), - channel_id: monitor.channel_id(), - update: ChannelMonitorUpdate { - update_id: *update_id, - channel_id: Some(monitor.channel_id()), - updates: vec![ + // Note that for channels closed pre-0.1, the latest + // update_id is `u64::MAX`. + *update_id = update_id.saturating_add(1); + + pending_background_events.push( + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id: monitor + .get_counterparty_node_id(), + funding_txo: monitor.get_funding_txo(), + channel_id: monitor.channel_id(), + update: ChannelMonitorUpdate { + update_id: *update_id, + channel_id: Some(monitor.channel_id()), + updates: vec![ ChannelMonitorUpdateStep::ReleasePaymentComplete { htlc: htlc_id, }, ], + }, }, - }, - ); + ); + } + pending_events_read = pending_events.into_inner().unwrap(); } - pending_events_read = pending_events.into_inner().unwrap(); - } - }, + }, + } } - } - for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() { - let logger = - WithChannelMonitor::from(&args.logger, monitor, Some(payment_hash)); - log_info!( - logger, - "Failing HTLC with payment hash {} as it was resolved on-chain.", - payment_hash - ); - let completion_action = Some(PaymentCompleteUpdate { - counterparty_node_id: monitor.get_counterparty_node_id(), - channel_funding_outpoint: monitor.get_funding_txo(), - channel_id: monitor.channel_id(), - htlc_id: SentHTLCId::from_source(&htlc_source), - }); + for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() { + let logger = + WithChannelMonitor::from(&args.logger, monitor, Some(payment_hash)); + log_info!( + logger, + "Failing HTLC with payment hash {} as it was resolved on-chain.", + payment_hash + ); + let completion_action = Some(PaymentCompleteUpdate { + counterparty_node_id: monitor.get_counterparty_node_id(), + channel_funding_outpoint: monitor.get_funding_txo(), + channel_id: monitor.channel_id(), + htlc_id: SentHTLCId::from_source(&htlc_source), + }); - failed_htlcs.push(( - htlc_source, - payment_hash, - monitor.get_counterparty_node_id(), - monitor.channel_id(), - LocalHTLCFailureReason::OnChainTimeout, - completion_action, - )); + failed_htlcs.push(( + htlc_source, + payment_hash, + monitor.get_counterparty_node_id(), + monitor.channel_id(), + LocalHTLCFailureReason::OnChainTimeout, + completion_action, + )); + } } // Whether the downstream channel was closed or not, try to re-apply any payment @@ -19073,54 +19061,41 @@ impl< } } - if reconstruct_manager_from_monitors { - // De-duplicate HTLCs that are present in both `failed_htlcs` and `decode_update_add_htlcs`. - // Omitting this de-duplication could lead to redundant HTLC processing and/or bugs. - for (src, _, _, _, _, _) in failed_htlcs.iter() { - if let HTLCSource::PreviousHopData(prev_hop_data) = src { - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - prev_hop_data, - "HTLC was failed backwards during manager read", - &args.logger, - ); - } + // De-duplicate HTLCs that are present in both `failed_htlcs` and `decode_update_add_htlcs`. + // Omitting this de-duplication could lead to redundant HTLC processing and/or bugs. + for (src, payment_hash, _, _, _, _) in failed_htlcs.iter() { + if let HTLCSource::PreviousHopData(prev_hop_data) = src { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was failed backwards during manager read", + &args.logger, + ); + prune_forwarded_htlc(&mut already_forwarded_htlcs, prev_hop_data, payment_hash); } + } - // See above comment on `failed_htlcs`. - for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { - for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - prev_hop_data, - "HTLC was already decoded and marked as a claimable payment", - &args.logger, - ); - } + // See above comment on `failed_htlcs`. + for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { + for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was already decoded and marked as a claimable payment", + &args.logger, + ); } } - let (decode_update_add_htlcs, forward_htlcs, pending_intercepted_htlcs) = - if reconstruct_manager_from_monitors { - (decode_update_add_htlcs, new_hash_map(), new_hash_map()) - } else { - ( - decode_update_add_htlcs_legacy, - forward_htlcs_legacy, - pending_intercepted_htlcs_legacy, - ) - }; - - // If we have a pending intercept HTLC present but no corresponding event, add that now rather - // than relying on the user having persisted the event prior to shutdown. - for (id, fwd) in pending_intercepted_htlcs.iter() { - if !pending_events_read.iter().any( - |(ev, _)| matches!(ev, Event::HTLCIntercepted { intercept_id, .. } if intercept_id == id), - ) { - match create_htlc_intercepted_event(*id, fwd) { - Ok(ev) => pending_events_read.push_back((ev, None)), - Err(()) => debug_assert!(false), - } + // See above comment on `failed_htlcs`. + for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { + for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was already decoded and marked as a claimable payment", + &args.logger, + ); } } @@ -19176,9 +19151,9 @@ impl< inbound_payment_key: expanded_inbound_key, pending_outbound_payments: pending_outbounds, - pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs), + pending_intercepted_htlcs: Mutex::new(new_hash_map()), - forward_htlcs: Mutex::new(forward_htlcs), + forward_htlcs: Mutex::new(new_hash_map()), decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, @@ -19229,9 +19204,46 @@ impl< }; let mut processed_claims: HashSet> = new_hash_set(); - for (_, monitor) in args.channel_monitors.iter() { + for (channel_id, monitor) in args.channel_monitors.iter() { for (payment_hash, (payment_preimage, payment_claims)) in monitor.get_stored_preimages() { + // If we have unresolved inbound committed HTLCs that were already forwarded to the + // outbound edge and removed via claim, we need to make sure to claim them backwards via + // adding them to `pending_claims_to_replay`. + if let Some(forwarded_htlcs) = + already_forwarded_htlcs.remove(&(*channel_id, payment_hash)) + { + for (hop_data, outbound_amt_msat) in forwarded_htlcs { + let new_pending_claim = + !pending_claims_to_replay.iter().any(|(src, _, _, _, _, _, _)| { + matches!(src, HTLCSource::PreviousHopData(hop) if hop.htlc_id == hop_data.htlc_id && hop.channel_id == hop_data.channel_id) + }); + if new_pending_claim { + let counterparty_node_id = monitor.get_counterparty_node_id(); + let is_channel_closed = channel_manager + .per_peer_state + .read() + .unwrap() + .get(&counterparty_node_id) + .map_or(true, |peer_state_mtx| { + !peer_state_mtx + .lock() + .unwrap() + .channel_by_id + .contains_key(channel_id) + }); + pending_claims_to_replay.push(( + HTLCSource::PreviousHopData(hop_data), + payment_preimage, + outbound_amt_msat, + is_channel_closed, + counterparty_node_id, + monitor.get_funding_txo(), + *channel_id, + )); + } + } + } if !payment_claims.is_empty() { for payment_claim in payment_claims { if processed_claims.contains(&payment_claim.mpp_parts) { @@ -19473,6 +19485,21 @@ impl< channel_manager .fail_htlc_backwards_internal(&source, &hash, &reason, receiver, ev_action); } + for ((_, hash), htlcs) in already_forwarded_htlcs.into_iter() { + for (htlc, _) in htlcs { + let channel_id = htlc.channel_id; + let node_id = htlc.counterparty_node_id; + let source = HTLCSource::PreviousHopData(htlc); + let failure_reason = LocalHTLCFailureReason::TemporaryChannelFailure; + let failure_data = channel_manager.get_htlc_inbound_temp_fail_data(failure_reason); + let reason = HTLCFailReason::reason(failure_reason, failure_data); + let receiver = HTLCHandlingFailureType::Forward { node_id, channel_id }; + // The event completion action is only relevant for HTLCs that originate from our node, not + // forwarded HTLCs. + channel_manager + .fail_htlc_backwards_internal(&source, &hash, &reason, receiver, None); + } + } for ( source, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 218779123f6..8508322b63a 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1270,6 +1270,13 @@ pub fn check_added_monitors>(node: & } } +pub fn get_latest_mon_update_id<'a, 'b, 'c>( + node: &Node<'a, 'b, 'c>, channel_id: ChannelId, +) -> (u64, u64) { + let monitor_id_state = node.chain_monitor.latest_monitor_update_id.lock().unwrap(); + monitor_id_state.get(&channel_id).unwrap().clone() +} + fn claimed_htlc_matches_path<'a, 'b, 'c>( origin_node: &Node<'a, 'b, 'c>, path: &[&Node<'a, 'b, 'c>], htlc: &ClaimedHTLC, ) -> bool { @@ -1355,8 +1362,10 @@ pub fn _reload_node<'a, 'b, 'c>( } #[macro_export] -macro_rules! reload_node { - ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { +macro_rules! _reload_node_inner { + ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: + ident, $new_chain_monitor: ident, $new_channelmanager: ident + ) => { let chanman_encoded = $chanman_encoded; $persister = $crate::util::test_utils::TestPersister::new(); @@ -1370,15 +1379,24 @@ macro_rules! reload_node { ); $node.chain_monitor = &$new_chain_monitor; - $new_channelmanager = - _reload_node(&$node, $new_config, &chanman_encoded, $monitors_encoded); + $new_channelmanager = $crate::ln::functional_test_utils::_reload_node( + &$node, + $new_config, + &chanman_encoded, + $monitors_encoded, + ); $node.node = &$new_channelmanager; $node.onion_messenger.set_offers_handler(&$new_channelmanager); $node.onion_messenger.set_async_payments_handler(&$new_channelmanager); }; +} + +#[macro_export] +macro_rules! reload_node { + // Reload the node using the node's current config ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { let config = $node.node.get_current_config(); - reload_node!( + $crate::_reload_node_inner!( $node, config, $chanman_encoded, @@ -1388,6 +1406,18 @@ macro_rules! reload_node { $new_channelmanager ); }; + // Reload the node with the new provided config + ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { + $crate::_reload_node_inner!( + $node, + $new_config, + $chanman_encoded, + $monitors_encoded, + $persister, + $new_chain_monitor, + $new_channelmanager + ); + }; } pub fn create_funding_transaction<'a, 'b, 'c>( @@ -2914,7 +2944,7 @@ pub fn check_payment_claimable( #[cfg(any(test, ldk_bench, feature = "_test_utils"))] macro_rules! expect_payment_claimable { ($node: expr, $expected_payment_hash: expr, $expected_payment_secret: expr, $expected_recv_value: expr) => { - expect_payment_claimable!( + $crate::expect_payment_claimable!( $node, $expected_payment_hash, $expected_payment_secret, @@ -5172,6 +5202,9 @@ pub struct ReconnectArgs<'a, 'b, 'c, 'd> { pub pending_cell_htlc_claims: (usize, usize), pub pending_cell_htlc_fails: (usize, usize), pub pending_raa: (bool, bool), + /// If true, don't assert that pending messages are empty after the commitment dance completes. + /// Useful when holding cell HTLCs will be released and need to be handled by the caller. + pub allow_post_commitment_dance_msgs: (bool, bool), } impl<'a, 'b, 'c, 'd> ReconnectArgs<'a, 'b, 'c, 'd> { @@ -5194,6 +5227,7 @@ impl<'a, 'b, 'c, 'd> ReconnectArgs<'a, 'b, 'c, 'd> { pending_cell_htlc_claims: (0, 0), pending_cell_htlc_fails: (0, 0), pending_raa: (false, false), + allow_post_commitment_dance_msgs: (false, false), } } } @@ -5219,6 +5253,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { pending_raa, pending_responding_commitment_signed, pending_responding_commitment_signed_dup_monitor, + allow_post_commitment_dance_msgs, } = args; connect_nodes(node_a, node_b); let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b); @@ -5402,11 +5437,13 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { get_event_msg!(node_a, MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes node_b.node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); - assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors( &node_b, if pending_responding_commitment_signed_dup_monitor.0 { 0 } else { 1 }, ); + if !allow_post_commitment_dance_msgs.0 { + assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); + } } } else { assert!(chan_msgs.2.is_none()); @@ -5516,11 +5553,13 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { get_event_msg!(node_b, MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes node_a.node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); - assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors( &node_a, if pending_responding_commitment_signed_dup_monitor.1 { 0 } else { 1 }, ); + if !allow_post_commitment_dance_msgs.1 { + assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); + } } } else { assert!(chan_msgs.2.is_none()); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index c0432051a62..1ee63fd1745 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -1211,6 +1211,13 @@ fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) { let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); + // While an inbound HTLC is committed in a channel but not yet forwarded, we store its onion in + // the `Channel` in case we need to remember it on restart. Once it's irrevocably forwarded to the + // outbound edge, we can prune it on the inbound edge. + assert_eq!( + nodes[1].node.test_get_inbound_committed_htlcs_with_onion(nodes[0].node.get_our_node_id(), chan_id_1), + 1 + ); // Decode the HTLC onion but don't forward it to the next hop, such that the HTLC ends up in // `ChannelManager::forward_htlcs` or `ChannelManager::pending_intercepted_htlcs`. @@ -1232,6 +1239,13 @@ fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) { args_b_c.send_announcement_sigs = (true, true); reconnect_nodes(args_b_c); + // Before an inbound HTLC is irrevocably forwarded, its onion should still be persisted within the + // inbound edge channel. + assert_eq!( + nodes[1].node.test_get_inbound_committed_htlcs_with_onion(nodes[0].node.get_our_node_id(), chan_id_1), + 1 + ); + // Forward the HTLC and ensure we can claim it post-reload. nodes[1].node.process_pending_htlc_forwards(); @@ -1254,6 +1268,12 @@ fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[2], &nodes[1], &updates.commitment_signed, false, false); expect_and_process_pending_htlcs(&nodes[2], false); + // After an inbound HTLC is irrevocably forwarded, its onion should be pruned within the inbound + // edge channel. + assert_eq!( + nodes[1].node.test_get_inbound_committed_htlcs_with_onion(nodes[0].node.get_our_node_id(), chan_id_1), + 0 + ); expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id()); let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]]; @@ -1319,6 +1339,117 @@ fn test_manager_persisted_post_outbound_edge_forward() { expect_payment_sent(&nodes[0], payment_preimage, None, true, true); } +#[test] +fn test_manager_persisted_post_outbound_edge_holding_cell() { + // Test that we will not double-forward an HTLC after restart if it is already in the outbound + // edge's holding cell, which was previously broken. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); + + // Lock in the HTLC from node_a <> node_b. + let amt_msat = 1000; + let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors(&nodes[0], 1); + let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); + + // Send a 2nd HTLC node_c -> node_b, to force the first HTLC into the holding cell. + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + let (route_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[1], amt_msat); + nodes[2].node.send_payment_with_route(route_2, payment_hash_2, RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + let send_event = + SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &send_event.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &send_event.commitment_msg); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors(&nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Add the HTLC to the outbound edge, node_b <> node_c. Force the outbound HTLC into the b<>c + // holding cell. + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors(&nodes[1], 0); + assert_eq!( + nodes[1].node.test_holding_cell_outbound_htlc_forwards_count(nodes[2].node.get_our_node_id(), chan_id_2), + 1 + ); + + // Disconnect peers and reload the forwarding node_b. + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + + let node_b_encoded = nodes[1].node.encode(); + let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); + let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); + reload_node!(nodes[1], node_b_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); + + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_id_2); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id_2, latest_update); + + reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[0])); + + // Reconnect b<>c. Node_b has pending RAA + commitment_signed from the incomplete c->b + // commitment dance, plus an HTLC in the holding cell that will be released after the dance. + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + reconnect_args.pending_raa = (false, true); + reconnect_args.pending_responding_commitment_signed = (false, true); + // Node_c needs a monitor update to catch up after processing node_b's reestablish. + reconnect_args.expect_renegotiated_funding_locked_monitor_update = (false, true); + // The holding cell HTLC will be released after the commitment dance - handle it below. + reconnect_args.allow_post_commitment_dance_msgs = (false, true); + reconnect_nodes(reconnect_args); + + // The holding cell HTLC was released during the reconnect. Complete its commitment dance. + let holding_cell_htlc_msgs = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(holding_cell_htlc_msgs.len(), 1); + match &holding_cell_htlc_msgs[0] { + MessageSendEvent::UpdateHTLCs { node_id, updates, .. } => { + assert_eq!(*node_id, nodes[2].node.get_our_node_id()); + assert_eq!(updates.update_add_htlcs.len(), 1); + nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[2], &nodes[1], &updates.commitment_signed, false, false); + } + _ => panic!("Unexpected message: {:?}", holding_cell_htlc_msgs[0]), + } + + // Ensure node_b won't double-forward the outbound HTLC (this was previously broken). + nodes[1].node.process_pending_htlc_forwards(); + let msgs = nodes[1].node.get_and_clear_pending_msg_events(); + assert!(msgs.is_empty(), "Expected 0 messages, got {:?}", msgs); + + // The a->b->c HTLC is now committed on node_c. The c->b HTLC is committed on node_b. + // Both payments should now be claimable. + expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id()); + expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, amt_msat, None, nodes[1].node.get_our_node_id()); + + // Claim the a->b->c payment on node_c. + let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]]; + do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], path, payment_preimage)); + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); + + // Claim the c->b payment on node_b. + nodes[1].node.claim_funds(payment_preimage_2); + expect_payment_claimed!(nodes[1], payment_hash_2, amt_msat); + check_added_monitors(&nodes[1], 1); + let mut update = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); + nodes[2].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), update.update_fulfill_htlcs.remove(0)); + do_commitment_signed_dance(&nodes[2], &nodes[1], &update.commitment_signed, false, false); + expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true); +} + #[test] fn test_reload_partial_funding_batch() { let chanmon_cfgs = create_chanmon_cfgs(3); @@ -1647,3 +1778,304 @@ fn test_hold_completed_inflight_monitor_updates_upon_manager_reload() { reconnect_nodes(reconnect_args); } +#[test] +fn outbound_removed_holding_cell_resolved_no_double_forward() { + // Test that if a forwarding node has an HTLC that is fully removed on the outbound edge + // but where the inbound edge resolution is in the holding cell, and we reload the node in this + // state, that node will not double-forward the HTLC. + + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_0_id = nodes[0].node.get_our_node_id(); + let node_1_id = nodes[1].node.get_our_node_id(); + let node_2_id = nodes[2].node.get_our_node_id(); + + let chan_0_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + let chan_id_0_1 = chan_0_1.2; + let chan_id_1_2 = chan_1_2.2; + + // Send a payment from nodes[0] to nodes[2] via nodes[1]. + let (route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); + send_along_route_with_secret( + &nodes[0], route, &[&[&nodes[1], &nodes[2]]], 1_000_000, payment_hash, payment_secret, + ); + + // Claim the payment on nodes[2]. + nodes[2].node.claim_funds(payment_preimage); + check_added_monitors(&nodes[2], 1); + expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); + + // Disconnect nodes[0] from nodes[1] BEFORE processing the fulfill. + // This forces the inbound fulfill resolution go to into nodes[1]'s holding cell for the inbound + // channel. + nodes[0].node.peer_disconnected(node_1_id); + nodes[1].node.peer_disconnected(node_0_id); + + // Process the fulfill from nodes[2] to nodes[1]. + let updates_2_1 = get_htlc_update_msgs(&nodes[2], &node_1_id); + nodes[1].node.handle_update_fulfill_htlc(node_2_id, updates_2_1.update_fulfill_htlcs[0].clone()); + check_added_monitors(&nodes[1], 1); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates_2_1.commitment_signed, false, false); + expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); + + // At this point: + // - The outbound HTLC nodes[1]->nodes[2] is resolved and removed + // - The inbound HTLC nodes[0]->nodes[1] is still in a Committed state, with the fulfill + // resolution in nodes[1]'s chan_0_1 holding cell + let node_1_serialized = nodes[1].node.encode(); + let mon_0_1_serialized = get_monitor!(nodes[1], chan_id_0_1).encode(); + let mon_1_2_serialized = get_monitor!(nodes[1], chan_id_1_2).encode(); + + // Reload nodes[1]. + // During deserialization, we previously would have not noticed that the nodes[0]<>nodes[1] HTLC + // had a resolution pending in the holding cell, and reconstructed the ChannelManager's pending + // HTLC state indicating that the HTLC still needed to be forwarded to the outbound edge. + reload_node!( + nodes[1], + node_1_serialized, + &[&mon_0_1_serialized, &mon_1_2_serialized], + persister, + new_chain_monitor, + nodes_1_deserialized + ); + + // Check that nodes[1] doesn't double-forward the HTLC. + nodes[1].node.process_pending_htlc_forwards(); + + // Reconnect nodes[1] to nodes[0]. The claim should be in nodes[1]'s holding cell. + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[0]); + reconnect_args.pending_cell_htlc_claims = (0, 1); + reconnect_nodes(reconnect_args); + + // nodes[0] should now have received the fulfill and generate PaymentSent. + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +} + +#[test] +fn test_reload_node_with_preimage_in_monitor_claims_htlc() { + // Test that if a forwarding node has an HTLC that was irrevocably removed on the outbound edge + // via claim but is still forwarded-and-unresolved in the inbound edge, that HTLC will not be + // failed back on the inbound edge on reload. + // + // For context, the ChannelManager is moving towards reconstructing the pending inbound HTLC set + // from Channel data on startup. If we find an inbound HTLC that is flagged as already-forwarded, + // we then check that the HTLC is either (a) still present in the outbound edge or (b) removed + // from the outbound edge but with a preimage present in the corresponding ChannelMonitor, + // indicating that it was removed from the outbound edge via claim. If neither of those are the + // case, we infer that the HTLC was removed from the outbound edge via failure and fail the HTLC + // backwards. + // + // Here we ensure that inbound HTLCs in case (b) above will not be failed backwards on manager + // reload. + + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_0_id = nodes[0].node.get_our_node_id(); + let node_1_id = nodes[1].node.get_our_node_id(); + let node_2_id = nodes[2].node.get_our_node_id(); + + let chan_0_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + let chan_id_0_1 = chan_0_1.2; + let chan_id_1_2 = chan_1_2.2; + + // Send a payment from nodes[0] to nodes[2] via nodes[1]. + let (route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); + send_along_route_with_secret( + &nodes[0], route, &[&[&nodes[1], &nodes[2]]], 1_000_000, payment_hash, payment_secret, + ); + + // Claim the payment on nodes[2]. + nodes[2].node.claim_funds(payment_preimage); + check_added_monitors(&nodes[2], 1); + expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); + + // Disconnect nodes[0] from nodes[1] BEFORE processing the fulfill. + // This prevents the claim from propagating back, leaving the inbound HTLC in ::Forwarded state. + nodes[0].node.peer_disconnected(node_1_id); + nodes[1].node.peer_disconnected(node_0_id); + + // Process the fulfill from nodes[2] to nodes[1]. + // This stores the preimage in nodes[1]'s monitor for chan_1_2. + let updates_2_1 = get_htlc_update_msgs(&nodes[2], &node_1_id); + nodes[1].node.handle_update_fulfill_htlc(node_2_id, updates_2_1.update_fulfill_htlcs[0].clone()); + check_added_monitors(&nodes[1], 1); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates_2_1.commitment_signed, false, false); + expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); + + // Clear the holding cell's claim entry on chan_0_1 before serialization. + // This simulates a crash where the HTLC was fully removed from the outbound edge but is still + // present on the inbound edge without a resolution. + nodes[1].node.test_clear_channel_holding_cell(node_0_id, chan_id_0_1); + + // At this point: + // - The inbound HTLC on nodes[1] (from nodes[0]) is in ::Forwarded state + // - The preimage IS in nodes[1]'s monitor for chan_1_2 + // - The outbound HTLC to nodes[2] is resolved + // + // Serialize nodes[1] state and monitors before reloading. + let node_1_serialized = nodes[1].node.encode(); + let mon_0_1_serialized = get_monitor!(nodes[1], chan_id_0_1).encode(); + let mon_1_2_serialized = get_monitor!(nodes[1], chan_id_1_2).encode(); + + // Reload nodes[1]. + // During deserialization, we track inbound HTLCs that purport to already be forwarded on the + // outbound edge. If any are entirely missing from the outbound edge with no preimage available, + // they will be failed backwards. Otherwise, as in this case where a preimage is available, the + // payment should be claimed backwards. + reload_node!( + nodes[1], + node_1_serialized, + &[&mon_0_1_serialized, &mon_1_2_serialized], + persister, + new_chain_monitor, + nodes_1_deserialized + ); + + // When the claim is reconstructed during reload, a PaymentForwarded event is generated. + // This event has next_user_channel_id as None since the outbound HTLC was already removed. + // Fetching events triggers the pending monitor update (adding preimage) to be applied. + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match &events[0] { + Event::PaymentForwarded { total_fee_earned_msat: Some(1000), .. } => {}, + _ => panic!("Expected PaymentForwarded event"), + } + check_added_monitors(&nodes[1], 1); + + // Reconnect nodes[1] to nodes[0]. The claim should be in nodes[1]'s holding cell. + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[0]); + reconnect_args.pending_cell_htlc_claims = (0, 1); + reconnect_nodes(reconnect_args); + + // nodes[0] should now have received the fulfill and generate PaymentSent. + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +} + +#[test] +fn test_reload_node_without_preimage_fails_htlc() { + // Test that if a forwarding node has an HTLC that was removed on the outbound edge via failure + // but is still forwarded-and-unresolved in the inbound edge, that HTLC will be correctly + // failed back on reload via the already_forwarded_htlcs mechanism. + // + // For context, the ChannelManager reconstructs the pending inbound HTLC set from Channel data + // on startup. If an inbound HTLC is present but flagged as already-forwarded, we check that + // the HTLC is either (a) still present in the outbound edge or (b) removed from the outbound + // edge but with a preimage present in the corresponding ChannelMonitor, indicating it was + // removed via claim. If neither, we infer the HTLC was removed via failure and fail it back. + // + // Here we test the failure case: no preimage is present, so the HTLC should be failed back. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_0_id = nodes[0].node.get_our_node_id(); + let node_1_id = nodes[1].node.get_our_node_id(); + let node_2_id = nodes[2].node.get_our_node_id(); + + let chan_0_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + let chan_id_0_1 = chan_0_1.2; + let chan_id_1_2 = chan_1_2.2; + + // Send a payment from nodes[0] to nodes[2] via nodes[1]. + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); + send_along_route_with_secret( + &nodes[0], route, &[&[&nodes[1], &nodes[2]]], 1_000_000, payment_hash, payment_secret, + ); + + // Disconnect nodes[0] from nodes[1] BEFORE processing the failure. + // This prevents the fail from propagating back, leaving the inbound HTLC in ::Forwarded state. + nodes[0].node.peer_disconnected(node_1_id); + nodes[1].node.peer_disconnected(node_0_id); + + // Fail the payment on nodes[2] and process the failure to nodes[1]. + // This removes the outbound HTLC and queues a fail in the holding cell. + nodes[2].node.fail_htlc_backwards(&payment_hash); + expect_and_process_pending_htlcs_and_htlc_handling_failed( + &nodes[2], &[HTLCHandlingFailureType::Receive { payment_hash }] + ); + check_added_monitors(&nodes[2], 1); + + let updates_2_1 = get_htlc_update_msgs(&nodes[2], &node_1_id); + nodes[1].node.handle_update_fail_htlc(node_2_id, &updates_2_1.update_fail_htlcs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates_2_1.commitment_signed, false, false); + expect_and_process_pending_htlcs_and_htlc_handling_failed( + &nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(node_2_id), channel_id: chan_id_1_2 }] + ); + + // Clear the holding cell's fail entry on chan_0_1 before serialization. + // This simulates a crash where the HTLC was fully removed from the outbound edge but is still + // present on the inbound edge without a resolution. Otherwise, we would not be able to exercise + // the desired failure paths due to the holding cell failure resolution being present. + nodes[1].node.test_clear_channel_holding_cell(node_0_id, chan_id_0_1); + + // Now serialize. The state has: + // - Inbound HTLC on chan_0_1 in ::Forwarded state + // - Outbound HTLC on chan_1_2 resolved (not present) + // - No preimage in monitors (it was a failure) + // - No holding cell entry for the fail (we cleared it) + let node_1_serialized = nodes[1].node.encode(); + let mon_0_1_serialized = get_monitor!(nodes[1], chan_id_0_1).encode(); + let mon_1_2_serialized = get_monitor!(nodes[1], chan_id_1_2).encode(); + + // Reload nodes[1]. + // The already_forwarded_htlcs mechanism should detect: + // - Inbound HTLC is in ::Forwarded state + // - Outbound HTLC is not present in outbound channel + // - No preimage in monitors + // Therefore it should fail the HTLC backwards. + reload_node!( + nodes[1], + node_1_serialized, + &[&mon_0_1_serialized, &mon_1_2_serialized], + persister, + new_chain_monitor, + nodes_1_deserialized + ); + + // After reload, nodes[1] should have generated an HTLCHandlingFailed event. + let events = nodes[1].node.get_and_clear_pending_events(); + assert!(!events.is_empty(), "Expected HTLCHandlingFailed event"); + for event in events { + match event { + Event::HTLCHandlingFailed { .. } => {}, + _ => panic!("Unexpected event {:?}", event), + } + } + + // Process the failure so it goes back into chan_0_1's holding cell. + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors(&nodes[1], 0); // No monitor update yet (peer disconnected) + + // Reconnect nodes[1] to nodes[0]. The fail should be in nodes[1]'s holding cell. + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[0]); + reconnect_args.pending_cell_htlc_fails = (0, 1); + reconnect_nodes(reconnect_args); + + // nodes[0] should now have received the failure and generate PaymentFailed. + expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new()); +} diff --git a/lightning/src/util/ser.rs b/lightning/src/util/ser.rs index f821aa5afc0..6579c0353a3 100644 --- a/lightning/src/util/ser.rs +++ b/lightning/src/util/ser.rs @@ -979,13 +979,15 @@ where } } -// Vectors +/// Write number of items in a vec followed by each element, without writing a length-prefix for +/// each element. +#[macro_export] macro_rules! impl_writeable_for_vec { ($ty: ty $(, $name: ident)*) => { impl<$($name : Writeable),*> Writeable for Vec<$ty> { #[inline] fn write(&self, w: &mut W) -> Result<(), io::Error> { - CollectionLength(self.len() as u64).write(w)?; + $crate::util::ser::CollectionLength(self.len() as u64).write(w)?; for elem in self.iter() { elem.write(w)?; } @@ -994,15 +996,21 @@ macro_rules! impl_writeable_for_vec { } } } +/// Read the number of items in a vec followed by each element, without reading a length prefix for +/// each element. +/// +/// Each element is read with `MaybeReadable`, meaning if an element cannot be read then it is +/// skipped without returning `DecodeError::InvalidValue`. +#[macro_export] macro_rules! impl_readable_for_vec { ($ty: ty $(, $name: ident)*) => { impl<$($name : Readable),*> Readable for Vec<$ty> { #[inline] - fn read(r: &mut R) -> Result { - let len: CollectionLength = Readable::read(r)?; - let mut ret = Vec::with_capacity(cmp::min(len.0 as usize, MAX_BUF_SIZE / core::mem::size_of::<$ty>())); + fn read(r: &mut R) -> Result { + let len: $crate::util::ser::CollectionLength = Readable::read(r)?; + let mut ret = Vec::with_capacity(cmp::min(len.0 as usize, $crate::util::ser::MAX_BUF_SIZE / core::mem::size_of::<$ty>())); for _ in 0..len.0 { - if let Some(val) = MaybeReadable::read(r)? { + if let Some(val) = $crate::util::ser::MaybeReadable::read(r)? { ret.push(val); } } diff --git a/pending_changelog/upgrade-0.2-pending-htlc.txt b/pending_changelog/upgrade-0.2-pending-htlc.txt new file mode 100644 index 00000000000..6ca11755ee9 --- /dev/null +++ b/pending_changelog/upgrade-0.2-pending-htlc.txt @@ -0,0 +1,9 @@ +Backwards Compatibility +======================= + + * Upgrading directly from 0.2 to 0.5 with pending HTLC forwards is not supported. + If you have pending HTLC forwards when upgrading from 0.2, you must first upgrade + to 0.3, forward the HTLCs (completing at least the outbound commitment), then + upgrade to 0.5. Attempting to upgrade directly from 0.2 to 0.5 with pending HTLC + forwards will result in a `DecodeError::InvalidValue` when reading the + `ChannelManager`.