`rustfmt`: Run on `peer_handler.rs`
👋 Thanks for assigning @TheBlueMatt as a reviewer! I'll wait for their review and will help manage the review process. Once they submit their review, I'll check if a second reviewer would be helpful.
Codecov Report
All modified and coverable lines are covered by tests :white_check_mark:
Project coverage is 89.34%. Comparing base (
46cb5ff) to head (6403b34). Report is 5 commits behind head on main.
Additional details and impacted files
@@ Coverage Diff @@
## main #3725 +/- ##
==========================================
+ Coverage 89.11% 89.34% +0.23%
==========================================
Files 156 156
Lines 123435 126684 +3249
Branches 123435 126684 +3249
==========================================
+ Hits 109995 113191 +3196
- Misses 10758 10828 +70
+ Partials 2682 2665 -17
:umbrella: View full report in Codecov by Sentry.
:loudspeaker: Have feedback on the report? Share it here.
:rocket: New features to boost your workflow:
- :snowflake: Test Analytics: Detect flaky tests, report on failures, and find test suite problems.
Let me know if there's something particular left to address.
The tests could be cleaned up a bit more https://github.com/lightningdevkit/rust-lightning/pull/3725#discussion_r2035775461.
The tests could be cleaned up a bit more #3725 (comment).
Excuse the delay here once more!
Now rebased and addressed this remaining comment by pulling out some more handlers into intermediate variables as a fixup. Let me know if there's something else you'd like to see.
Feel free to squash, IMO
Squashed without further changes.
Force-pushed with the following changes:
diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs
index bcae7690f..73c831884 100644
--- a/lightning/src/ln/peer_handler.rs
+++ b/lightning/src/ln/peer_handler.rs
@@ -4420,48 +4420,40 @@ mod tests {
let act_three_with_init_b = fd_b.outbound_data.lock().unwrap().split_off(0);
- assert!(!peer_a
- .peers
- .read()
- .unwrap()
- .get(&fd_a)
- .unwrap()
- .lock()
- .unwrap()
- .handshake_complete());
+ {
+ let peer_a_lock = peer_a.peers.read().unwrap();
+ let handshake_complete =
+ peer_a_lock.get(&fd_a).unwrap().lock().unwrap().handshake_complete();
+ assert!(!handshake_complete);
+ }
+
assert_eq!(peer_a.read_event(&mut fd_a, &act_three_with_init_b).unwrap(), false);
peer_a.process_events();
- assert!(peer_a
- .peers
- .read()
- .unwrap()
- .get(&fd_a)
- .unwrap()
- .lock()
- .unwrap()
- .handshake_complete());
+
+ {
+ let peer_a_lock = peer_a.peers.read().unwrap();
+ let handshake_complete =
+ peer_a_lock.get(&fd_a).unwrap().lock().unwrap().handshake_complete();
+ assert!(handshake_complete);
+ }
let init_a = fd_a.outbound_data.lock().unwrap().split_off(0);
assert!(!init_a.is_empty());
- assert!(!peer_b
- .peers
- .read()
- .unwrap()
- .get(&fd_b)
- .unwrap()
- .lock()
- .unwrap()
- .handshake_complete());
+ {
+ let peer_b_lock = peer_b.peers.read().unwrap();
+ let handshake_complete =
+ peer_b_lock.get(&fd_b).unwrap().lock().unwrap().handshake_complete();
+ assert!(!handshake_complete);
+ }
+
assert_eq!(peer_b.read_event(&mut fd_b, &init_a).unwrap(), false);
peer_b.process_events();
- assert!(peer_b
- .peers
- .read()
- .unwrap()
- .get(&fd_b)
- .unwrap()
- .lock()
- .unwrap()
- .handshake_complete());
+
+ {
+ let peer_b_lock = peer_b.peers.read().unwrap();
+ let handshake_complete =
+ peer_b_lock.get(&fd_b).unwrap().lock().unwrap().handshake_complete();
+ assert!(handshake_complete);
+ }
// Make sure we're still connected.
@@ -4565,17 +4557,10 @@ mod tests {
}
- assert_eq!(
- peers[0]
- .peers
- .read()
- .unwrap()
- .get(&fd_a)
- .unwrap()
- .lock()
- .unwrap()
- .gossip_broadcast_buffer
- .len(),
- OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP
- );
+ {
+ let peer_a_lock = peers[0].peers.read().unwrap();
+ let buf_len =
+ peer_a_lock.get(&fd_a).unwrap().lock().unwrap().gossip_broadcast_buffer.len();
+ assert_eq!(buf_len, OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP);
+ }
// Check that if a broadcast message comes in from the channel handler (i.e. it is an
@@ -4583,17 +4568,11 @@ mod tests {
cfgs[0].chan_handler.pending_events.lock().unwrap().push(msg_ev);
peers[0].process_events();
- assert_eq!(
- peers[0]
- .peers
- .read()
- .unwrap()
- .get(&fd_a)
- .unwrap()
- .lock()
- .unwrap()
- .gossip_broadcast_buffer
- .len(),
- OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP + 1
- );
+
+ {
+ let peer_a_lock = peers[0].peers.read().unwrap();
+ let buf_len =
+ peer_a_lock.get(&fd_a).unwrap().lock().unwrap().gossip_broadcast_buffer.len();
+ assert_eq!(buf_len, OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP + 1);
+ }
// Finally, deliver all the messages and make sure we got the right count. Note that there
@@ -4605,14 +4584,11 @@ mod tests {
drain_queues!();
- assert!(peers[0]
- .peers
- .read()
- .unwrap()
- .get(&fd_a)
- .unwrap()
- .lock()
- .unwrap()
- .gossip_broadcast_buffer
- .is_empty());
+ {
+ let peer_a_lock = peers[0].peers.read().unwrap();
+ let empty =
+ peer_a_lock.get(&fd_a).unwrap().lock().unwrap().gossip_broadcast_buffer.is_empty();
+ assert!(empty);
+ }
+
assert_eq!(
cfgs[1].routing_handler.chan_anns_recvd.load(Ordering::Relaxed),
🔔 1st Reminder
Hey @TheBlueMatt! This PR has been waiting for your review. Please take a look when you have a chance. If you're unable to review, please let us know so we can find another reviewer.