rust-lightning icon indicating copy to clipboard operation
rust-lightning copied to clipboard

Run rustfmt on `payment_tests` and `functional_tests` and split up the latter

Open TheBlueMatt opened this issue 7 months ago • 6 comments

functional_tests' time has come, and there's some easy batches of tests to move into separate files. Even at the end its still 10k LoC, but that's better than the 14K it started as (let alone how long it'd be if it were naively rustfmt'd).

TheBlueMatt avatar Apr 28 '25 14:04 TheBlueMatt

👋 Thanks for assigning @joostjager as a reviewer! I'll wait for their review and will help manage the review process. Once they submit their review, I'll check if a second reviewer would be helpful.

ldk-reviews-bot avatar Apr 28 '25 14:04 ldk-reviews-bot

👋 The first review has been submitted!

Do you think this PR is ready for a second reviewer? If so, click here to assign a second reviewer.

ldk-reviews-bot avatar Apr 29 '25 09:04 ldk-reviews-bot

✅ Added second reviewer: @valentinewallace

ldk-reviews-bot avatar Apr 29 '25 11:04 ldk-reviews-bot

Needs rebase since #3700 landed

valentinewallace avatar Apr 29 '25 18:04 valentinewallace

Rebased and addressed feedback.

TheBlueMatt avatar May 01 '25 16:05 TheBlueMatt

FYI, looks like fixup commits weren't added prior to the rebase so it's hard to see what changed. I'm also generally fine with squashing in changes if the rebase is a separate push.

valentinewallace avatar May 01 '25 16:05 valentinewallace

Rebased.

FYI, looks like fixup commits weren't added prior to the rebase so it's hard to see what changed. I'm also generally fine with squashing in changes if the rebase is a separate push.

Apologies not quite sure why I got overly eager and did that, but the full diff from a fresh naive rebase since the first push in this commit to the current contents follows (note that some of the diff is just different variable names picked during the rebase-redo).

diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs
index ab79aa70a..252958035 100644
--- a/lightning/src/ln/functional_tests.rs
+++ b/lightning/src/ln/functional_tests.rs
@@ -31,9 +31,7 @@ use crate::ln::chan_utils::{
 };
 use crate::ln::channel::{
-	get_holder_selected_channel_reserve_satoshis, Channel, InboundV1Channel, OutboundV1Channel,
-	COINBASE_MATURITY,
-};
-use crate::ln::channel::{
-	ChannelError, DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, MIN_CHAN_DUST_LIMIT_SATOSHIS,
+	get_holder_selected_channel_reserve_satoshis, Channel, ChannelError, InboundV1Channel,
+	OutboundV1Channel, COINBASE_MATURITY, DISCONNECT_PEER_AWAITING_RESPONSE_TICKS,
+	MIN_CHAN_DUST_LIMIT_SATOSHIS,
 };
 use crate::ln::channelmanager::{
@@ -775,14 +773,11 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac
 	let nodes = create_network(3, &node_cfgs, &node_chanmgrs);

-	let node_a_id = nodes[0].node.get_our_node_id();
-	let node_b_id = nodes[1].node.get_our_node_id();
-	let node_c_id = nodes[2].node.get_our_node_id();
-
 	for node in nodes.iter() {
 		*node.fee_estimator.sat_per_kw.lock().unwrap() = 2000;
 	}

-	let node_b_id = node_b_id;
-	let node_c_id = node_c_id;
+	let node_a_id = nodes[0].node.get_our_node_id();
+	let node_b_id = nodes[1].node.get_our_node_id();
+	let node_c_id = nodes[2].node.get_our_node_id();

 	create_announced_chan_between_nodes(&nodes, 0, 1);
@@ -3741,7 +3736,7 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {

 	if forwarded_htlc {
-		let failure =
+		let fail_type =
 			HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 };
-		expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![failure]);
+		expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]);
 		check_added_monitors(&nodes[1], 1);
 		let fail_commit = nodes[1].node.get_and_clear_pending_msg_events();
@@ -9320,12 +9315,10 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
 	let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);

+	*nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
+
 	let node_a_id = nodes[0].node.get_our_node_id();
 	let node_b_id = nodes[1].node.get_our_node_id();
 	let node_c_id = nodes[2].node.get_our_node_id();

-	*nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
-
-	let node_c_id = node_c_id;
-
 	create_announced_chan_between_nodes(&nodes, 0, 1);
 	let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
@@ -9647,8 +9640,6 @@ pub fn test_inconsistent_mpp_params() {
 	expect_pending_htlcs_forwardable_ignore!(nodes[3]);
 	nodes[3].node.process_pending_htlc_forwards();
-	expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(
-		nodes[3],
-		vec![HTLCHandlingFailureType::Receive { payment_hash: hash }]
-	);
+	let fail_type = HTLCHandlingFailureType::Receive { payment_hash: hash };
+	expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], [fail_type]);
 	nodes[3].node.process_pending_htlc_forwards();

@@ -10717,6 +10708,6 @@ fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash
 		claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
 	} else {
-		let failure = HTLCHandlingFailureType::Receive { payment_hash: hash };
-		expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![failure]);
+		let fail_type = HTLCHandlingFailureType::Receive { payment_hash: hash };
+		expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]);

 		check_added_monitors(&nodes[1], 1);
diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs
index 16de71b0e..aee764682 100644
--- a/lightning/src/ln/htlc_reserve_unit_tests.rs
+++ b/lightning/src/ln/htlc_reserve_unit_tests.rs
@@ -1,5 +1,5 @@
 //! Various unit tests covering HTLC handling as well as tests covering channel reserve tracking.

-use crate::events::{ClosureReason, Event, HTLCDestination, PaymentPurpose};
+use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaymentPurpose};
 use crate::ln::chan_utils::{
 	self, commitment_tx_base_weight, htlc_success_tx_weight, CommitmentTransaction,
@@ -676,7 +676,6 @@ pub fn holding_cell_htlc_counting() {
 	// fails), the second will process the resulting failure and fail the HTLC backward.
 	expect_pending_htlcs_forwardable!(nodes[1]);
-	let failure =
-		HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 };
-	expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![failure]);
+	let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 };
+	expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![fail]);
 	check_added_monitors(&nodes[1], 1);

diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs
index eb06df55c..b185e7395 100644
--- a/lightning/src/ln/payment_tests.rs
+++ b/lightning/src/ln/payment_tests.rs
@@ -160,7 +160,6 @@ fn mpp_retry() {
 	// Attempt to forward the payment and complete the 2nd path's failure.
 	expect_pending_htlcs_forwardable!(&nodes[2]);
-	let failure =
-		HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_id };
-	expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], vec![failure]);
+	let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_id };
+	expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], [fail]);
 	let htlc_updates = get_htlc_update_msgs!(nodes[2], node_a_id);
 	assert!(htlc_updates.update_add_htlcs.is_empty());
@@ -283,7 +282,6 @@ fn mpp_retry_overpay() {
 	// Attempt to forward the payment and complete the 2nd path's failure.
 	expect_pending_htlcs_forwardable!(&nodes[2]);
-	let failure =
-		HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_id };
-	expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], vec![failure]);
+	let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_id };
+	expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], [fail]);

 	let htlc_updates = get_htlc_update_msgs!(nodes[2], node_a_id);
@@ -387,8 +385,8 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) {

 		// Failed HTLC from node 3 -> 1
-		let failure = HTLCHandlingFailureType::Receive { payment_hash };
-		expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![failure]);
+		let fail = HTLCHandlingFailureType::Receive { payment_hash: hash };
+		expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], [fail]);

-		let htlc_fail_updates = get_htlc_update_msgs!(nodes[3], nodes[1].node.get_our_node_id());
+		let htlc_fail_updates = get_htlc_update_msgs!(nodes[3], node_b_id);
 		assert_eq!(htlc_fail_updates.update_fail_htlcs.len(), 1);
 		nodes[1].node.handle_update_fail_htlc(node_d_id, &htlc_fail_updates.update_fail_htlcs[0]);
@@ -398,7 +396,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) {

 		// Failed HTLC from node 1 -> 0
-		let failure =
+		let fail_type =
 			HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_3_id };
-		expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![failure]);
+		expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]);

 		let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id);
@@ -550,5 +548,5 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() {
 	let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
 	let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
-	let (update_a, _, chan_4_channel_id, _) = create_announced_chan_between_nodes(&nodes, 2, 3);
+	let (update_a, _, chan_4_chan_id, _) = create_announced_chan_between_nodes(&nodes, 2, 3);
 	let chan_4_id = update_a.contents.short_channel_id;
 	let amount = 40_000;
@@ -655,6 +653,6 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() {
 	}
 	nodes[3].node.process_pending_htlc_forwards();
-	let failure = HTLCHandlingFailureType::Receive { payment_hash };
-	expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![failure]);
+	let fail_type = HTLCHandlingFailureType::Receive { payment_hash };
+	expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], [fail_type]);
 	check_added_monitors!(nodes[3], 1);

@@ -663,7 +661,8 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() {
 	nodes[2].node.handle_update_fail_htlc(node_d_id, &update_fail_0.update_fail_htlcs[0]);
 	commitment_signed_dance!(nodes[2], nodes[3], update_fail_0.commitment_signed, false);
-	let failure =
-		HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_channel_id };
-	expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![failure]);
+
+	let fail_type =
+		HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_chan_id };
+	expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [fail_type]);
 	check_added_monitors!(nodes[2], 1);

@@ -765,7 +764,11 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
 	nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]);
 	commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true);
+
 	expect_pending_htlcs_forwardable!(nodes[1]);
-	let hop = &HTLCDestination::NextHopChannel { node_id: Some(node_c_id), channel_id: chan_id_2 };
-	expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[hop]);
+	expect_htlc_handling_failed_destinations!(
+		nodes[1].node.get_and_clear_pending_events(),
+		[HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }]
+	);
+
 	check_added_monitors(&nodes[1], 1);
 	// nodes[1] now immediately fails the HTLC as the next-hop channel is disconnected
@@ -1049,7 +1052,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
 	// previous hop channel is already on-chain, but it makes nodes[2] willing to see additional
 	// incoming HTLCs with the same payment hash later.
-	nodes[2].node.fail_htlc_backwards(&payment_hash);
-	let failure = HTLCHandlingFailureType::Receive { payment_hash };
-	expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [failure]);
+	nodes[2].node.fail_htlc_backwards(&hash);
+	let fail_type = HTLCHandlingFailureType::Receive { payment_hash: hash };
+	expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [fail_type]);
 	check_added_monitors!(nodes[2], 1);

@@ -1355,7 +1358,8 @@ fn test_fulfill_restart_failure() {

 	nodes[1].node.fail_htlc_backwards(&payment_hash);
-	let failure = HTLCHandlingFailureType::Receive { payment_hash };
-	expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![failure]);
+	let fail_type = HTLCHandlingFailureType::Receive { payment_hash };
+	expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]);
 	check_added_monitors!(nodes[1], 1);
+
 	let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id);
 	nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_fail_updates.update_fail_htlcs[0]);
@@ -1863,6 +1867,6 @@ fn abandoned_send_payment_idempotent() {

 	nodes[1].node.fail_htlc_backwards(&first_payment_hash);
-	let failure = HTLCHandlingFailureType::Receive { payment_hash: first_payment_hash };
-	expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [failure]);
+	let fail_type = HTLCHandlingFailureType::Receive { payment_hash: first_payment_hash };
+	expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]);

 	// Until we abandon the payment upon path failure, no matter how many timer ticks pass, we still cannot reuse the
@@ -2191,8 +2195,7 @@ fn do_test_intercepted_payment(test: InterceptTest) {
 		// Ensure we can fail the intercepted payment back.
 		nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap();
-		expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(
-			nodes[1],
-			vec![HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }]
-		);
+		let fail =
+			HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid };
+		expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], [fail]);
 		nodes[1].node.process_pending_htlc_forwards();
 		let update_fail = get_htlc_update_msgs!(nodes[1], node_a_id);
@@ -2211,10 +2214,10 @@ fn do_test_intercepted_payment(test: InterceptTest) {
 	} else if test == InterceptTest::Forward {
 		// Check that we'll fail as expected when sending to a channel that isn't in `ChannelReady` yet.
-		let chan_id = nodes[1].node.create_channel(node_c_id, 100_000, 0, 42, None, None).unwrap();
+		let temp_id = nodes[1].node.create_channel(node_c_id, 100_000, 0, 42, None, None).unwrap();
 		let unusable_chan_err =
-			nodes[1].node.forward_intercepted_htlc(intercept_id, &chan_id, node_c_id, outbound_amt);
+			nodes[1].node.forward_intercepted_htlc(intercept_id, &temp_id, node_c_id, outbound_amt);
 		let err = format!(
 			"Channel with id {} for the passed counterparty node_id {} is still opening.",
-			chan_id, node_c_id,
+			temp_id, node_c_id,
 		);
 		assert_eq!(unusable_chan_err, Err(APIError::ChannelUnavailable { err }));
@@ -2277,7 +2280,7 @@ fn do_test_intercepted_payment(test: InterceptTest) {
 			connect_block(&nodes[1], &block);
 		}
-		let failure =
+		let fail_type =
 			HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid };
-		expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![failure]);
+		expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]);
 		check_added_monitors!(nodes[1], 1);

@@ -2540,5 +2543,5 @@ fn do_automatic_retries(test: AutoRetry) {
 			nodes[1].node.process_pending_htlc_forwards();
 			expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1],
-				vec![HTLCHandlingFailureType::Forward {
+				[HTLCHandlingFailureType::Forward {
 					node_id: Some(node_c_id),
 					channel_id: $failing_channel_id,
@@ -3095,7 +3098,6 @@ fn fails_paying_after_rejected_by_payee() {

 	nodes[1].node.fail_htlc_backwards(&payment_hash);
-
-	let failure = HTLCHandlingFailureType::Receive { payment_hash };
-	expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [failure]);
+	let fail_type = HTLCHandlingFailureType::Receive { payment_hash };
+	expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]);
 	let reason = PaymentFailureReason::RecipientRejected;
 	pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, payment_hash, reason);
@@ -4349,10 +4351,6 @@ fn do_test_custom_tlvs(spontaneous: bool, even_tlvs: bool, known_tlvs: bool) {
 		(false, true) => {
 			nodes[1].node.claim_funds(preimage);
-			let expected_destinations =
-				vec![HTLCHandlingFailureType::Receive { payment_hash: hash }];
-			expect_pending_htlcs_forwardable_and_htlc_handling_failed!(
-				nodes[1],
-				expected_destinations
-			);
+			let fail_type = HTLCHandlingFailureType::Receive { payment_hash: hash };
+			expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]);
 			let reason = PaymentFailureReason::RecipientRejected;
 			pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, hash, reason);
@@ -4404,7 +4402,6 @@ fn test_retry_custom_tlvs() {
 	// Attempt to forward the payment and complete the path's failure.
 	expect_pending_htlcs_forwardable!(&nodes[1]);
-	let failure =
-		HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2_id };
-	expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![failure]);
+	let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2_id };
+	expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], [fail]);
 	check_added_monitors!(nodes[1], 1);

@@ -4599,5 +4596,5 @@ fn do_test_custom_tlvs_consistency(
 	} else {
 		// Expect fail back
-		let expected_destinations = vec![HTLCHandlingFailureType::Receive { payment_hash: hash }];
+		let expected_destinations = [HTLCHandlingFailureType::Receive { payment_hash: hash }];
 		expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], expected_destinations);
 		check_added_monitors!(nodes[3], 1);
@@ -4607,11 +4604,7 @@ fn do_test_custom_tlvs_consistency(
 		commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false);

-		expect_pending_htlcs_forwardable_and_htlc_handling_failed!(
-			nodes[2],
-			vec![HTLCHandlingFailureType::Forward {
-				node_id: Some(node_d_id),
-				channel_id: chan_2_3.2
-			}]
-		);
+		let fail =
+			HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_2_3.2 };
+		expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [fail]);
 		check_added_monitors!(nodes[2], 1);

@@ -4774,5 +4767,5 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) {
 		expect_pending_htlcs_forwardable_conditions(
 			nodes[3].node.get_and_clear_pending_events(),
-			&[HTLCHandlingFailureType::Receive {payment_hash}],
+			&[HTLCHandlingFailureType::Receive { payment_hash }],
 		);
 		nodes[3].node.process_pending_htlc_forwards();
@@ -4783,8 +4776,8 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) {
 		nodes[2].node.handle_update_fail_htlc(node_d_id, &ds_fail.update_fail_htlcs[0]);
 		commitment_signed_dance!(nodes[2], nodes[3], ds_fail.commitment_signed, false, true);
-		expect_pending_htlcs_forwardable_conditions(
-			nodes[2].node.get_and_clear_pending_events(),
-			&[HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: cd_channel_used }],
-		);
+		let events = nodes[2].node.get_and_clear_pending_events();
+		let fail_type =
+			HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: cd_chan_id };
+		expect_pending_htlcs_forwardable_conditions(events, &[fail_type]);
 	} else {
 		expect_pending_htlcs_forwardable!(nodes[3]);
@@ -4888,7 +4881,6 @@ fn test_htlc_forward_considers_anchor_outputs_value() {
 	// The forwarding node should reject forwarding it as expected.
 	expect_pending_htlcs_forwardable!(nodes[1]);
-	let failure =
-		HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 };
-	expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![failure]);
+	let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 };
+	expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], [fail]);
 	check_added_monitors(&nodes[1], 1);

@@ -5086,8 +5078,8 @@ fn test_non_strict_forwarding() {
 	};
 	// The failure to forward will refer to the channel given in the onion.
-	expect_pending_htlcs_forwardable_conditions(
-		nodes[1].node.get_and_clear_pending_events(),
-		&[HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: routed_channel_id }],
-	);
+	let events = nodes[1].node.get_and_clear_pending_events();
+	let fail =
+		HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: routed_chan_id };
+	expect_pending_htlcs_forwardable_conditions(events, &[fail]);

 	let updates = get_htlc_update_msgs!(nodes[1], node_a_id);
@@ -5242,7 +5234,6 @@ fn max_out_mpp_path() {
 	let chanmon_cfgs = create_chanmon_cfgs(3);
 	let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
-	let node_chanmgrs = create_node_chanmgrs(
-		3, &node_cfgs, &[Some(user_cfg.clone()), Some(lsp_cfg.clone()), Some(user_cfg.clone())]
-	);
+	let configs = [Some(user_cfg.clone()), Some(lsp_cfg), Some(user_cfg)];
+	let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs);
 	let nodes = create_network(3, &node_cfgs, &node_chanmgrs);

@@ -5259,5 +5250,7 @@ fn max_out_mpp_path() {
 	let route_params_cfg = crate::routing::router::RouteParametersConfig::default();

-	nodes[0].node.pay_for_bolt11_invoice(&invoice, PaymentId([42; 32]), None, route_params_cfg, Retry::Attempts(0)).unwrap();
+	let id = PaymentId([42; 32]);
+	let retry = Retry::Attempts(0);
+	nodes[0].node.pay_for_bolt11_invoice(&invoice, id, None, route_params_cfg, retry).unwrap();

 	assert!(nodes[0].node.list_recent_payments().len() == 1);

TheBlueMatt avatar May 05 '25 00:05 TheBlueMatt

Seems this needs a rebase now.

tnull avatar May 12 '25 08:05 tnull

Rebased and addressed new feedback. Didn't squash yet tho.

TheBlueMatt avatar May 12 '25 22:05 TheBlueMatt

Squashed without further changes.

TheBlueMatt avatar May 13 '25 15:05 TheBlueMatt

Going ahead landing this since it has 3 ACKs.

tnull avatar May 15 '25 09:05 tnull