@@ -892,7 +892,7 @@ pub(super) struct PeerState<SP: Deref> where SP::Target: SignerProvider {
892892 /// The peer is currently connected (i.e. we've seen a
893893 /// [`ChannelMessageHandler::peer_connected`] and no corresponding
894894 /// [`ChannelMessageHandler::peer_disconnected`].
895- is_connected: bool,
895+ pub is_connected: bool,
896896}
897897
898898impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
@@ -1392,6 +1392,9 @@ where
13921392
13931393 pending_offers_messages: Mutex<Vec<PendingOnionMessage<OffersMessage>>>,
13941394
1395+ /// Tracks the message events that are to be broadcasted when we are connected to some peer.
1396+ pending_broadcast_messages: Mutex<Vec<MessageSendEvent>>,
1397+
13951398 entropy_source: ES,
13961399 node_signer: NS,
13971400 signer_provider: SP,
@@ -1976,7 +1979,7 @@ macro_rules! handle_error {
19761979 match $internal {
19771980 Ok(msg) => Ok(msg),
19781981 Err(MsgHandleErrInternal { err, shutdown_finish, .. }) => {
1979- let mut msg_events = Vec::with_capacity(2) ;
1982+ let mut msg_event = None ;
19801983
19811984 if let Some((shutdown_res, update_option)) = shutdown_finish {
19821985 let counterparty_node_id = shutdown_res.counterparty_node_id;
@@ -1988,7 +1991,8 @@ macro_rules! handle_error {
19881991
19891992 $self.finish_close_channel(shutdown_res);
19901993 if let Some(update) = update_option {
1991- msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
1994+ let mut pending_broadcast_messages = $self.pending_broadcast_messages.lock().unwrap();
1995+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
19921996 msg: update
19931997 });
19941998 }
@@ -1998,17 +2002,17 @@ macro_rules! handle_error {
19982002
19992003 if let msgs::ErrorAction::IgnoreError = err.action {
20002004 } else {
2001- msg_events.push (events::MessageSendEvent::HandleError {
2005+ msg_event = Some (events::MessageSendEvent::HandleError {
20022006 node_id: $counterparty_node_id,
20032007 action: err.action.clone()
20042008 });
20052009 }
20062010
2007- if !msg_events.is_empty() {
2011+ if let Some(msg_event) = msg_event {
20082012 let per_peer_state = $self.per_peer_state.read().unwrap();
20092013 if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) {
20102014 let mut peer_state = peer_state_mutex.lock().unwrap();
2011- peer_state.pending_msg_events.append(&mut msg_events );
2015+ peer_state.pending_msg_events.push(msg_event );
20122016 }
20132017 }
20142018
@@ -2466,6 +2470,7 @@ where
24662470 funding_batch_states: Mutex::new(BTreeMap::new()),
24672471
24682472 pending_offers_messages: Mutex::new(Vec::new()),
2473+ pending_broadcast_messages: Mutex::new(Vec::new()),
24692474
24702475 entropy_source,
24712476 node_signer,
@@ -2957,17 +2962,11 @@ where
29572962 }
29582963 };
29592964 if let Some(update) = update_opt {
2960- // Try to send the `BroadcastChannelUpdate` to the peer we just force-closed on, but if
2961- // not try to broadcast it via whatever peer we have.
2962- let per_peer_state = self.per_peer_state.read().unwrap();
2963- let a_peer_state_opt = per_peer_state.get(peer_node_id)
2964- .ok_or(per_peer_state.values().next());
2965- if let Ok(a_peer_state_mutex) = a_peer_state_opt {
2966- let mut a_peer_state = a_peer_state_mutex.lock().unwrap();
2967- a_peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
2968- msg: update
2969- });
2970- }
2965+ // If we have some Channel Update to broadcast, we cache it and broadcast it later.
2966+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
2967+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
2968+ msg: update
2969+ });
29712970 }
29722971
29732972 Ok(counterparty_node_id)
@@ -4043,6 +4042,7 @@ where
40434042 .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
40444043 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
40454044 let peer_state = &mut *peer_state_lock;
4045+
40464046 for channel_id in channel_ids {
40474047 if !peer_state.has_channel(channel_id) {
40484048 return Err(APIError::ChannelUnavailable {
@@ -4059,7 +4059,8 @@ where
40594059 }
40604060 if let ChannelPhase::Funded(channel) = channel_phase {
40614061 if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
4062- peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
4062+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
4063+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
40634064 } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
40644065 peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
40654066 node_id: channel.context.get_counterparty_node_id(),
@@ -4969,7 +4970,8 @@ where
49694970 if n >= DISABLE_GOSSIP_TICKS {
49704971 chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
49714972 if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
4972- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
4973+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
4974+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
49734975 msg: update
49744976 });
49754977 }
@@ -4983,7 +4985,8 @@ where
49834985 if n >= ENABLE_GOSSIP_TICKS {
49844986 chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
49854987 if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
4986- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
4988+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
4989+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
49874990 msg: update
49884991 });
49894992 }
@@ -6642,9 +6645,8 @@ where
66426645 }
66436646 if let Some(ChannelPhase::Funded(chan)) = chan_option {
66446647 if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
6645- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6646- let peer_state = &mut *peer_state_lock;
6647- peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
6648+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
6649+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
66486650 msg: update
66496651 });
66506652 }
@@ -7304,7 +7306,8 @@ where
73047306 if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
73057307 failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed));
73067308 if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
7307- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
7309+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
7310+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
73087311 msg: update
73097312 });
73107313 }
@@ -7489,7 +7492,8 @@ where
74897492 // We're done with this channel. We got a closing_signed and sent back
74907493 // a closing_signed with a closing transaction to broadcast.
74917494 if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
7492- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
7495+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
7496+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
74937497 msg: update
74947498 });
74957499 }
@@ -8209,7 +8213,7 @@ where
82098213 /// will randomly be placed first or last in the returned array.
82108214 ///
82118215 /// Note that even though `BroadcastChannelAnnouncement` and `BroadcastChannelUpdate`
8212- /// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be pleaced among
8216+ /// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be placed among
82138217 /// the `MessageSendEvent`s to the specific peer they were generated under.
82148218 fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
82158219 let events = RefCell::new(Vec::new());
@@ -8229,6 +8233,7 @@ where
82298233 result = NotifyOption::DoPersist;
82308234 }
82318235
8236+ let mut is_any_peer_connected = false;
82328237 let mut pending_events = Vec::new();
82338238 let per_peer_state = self.per_peer_state.read().unwrap();
82348239 for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
@@ -8237,6 +8242,15 @@ where
82378242 if peer_state.pending_msg_events.len() > 0 {
82388243 pending_events.append(&mut peer_state.pending_msg_events);
82398244 }
8245+ if peer_state.is_connected {
8246+ is_any_peer_connected = true
8247+ }
8248+ }
8249+
8250+ // Ensure that we are connected to some peers before getting broadcast messages.
8251+ if is_any_peer_connected {
8252+ let mut broadcast_msgs = self.pending_broadcast_messages.lock().unwrap();
8253+ pending_events.append(&mut broadcast_msgs);
82408254 }
82418255
82428256 if !pending_events.is_empty() {
@@ -8441,6 +8455,7 @@ where
84418455 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
84428456 let peer_state = &mut *peer_state_lock;
84438457 let pending_msg_events = &mut peer_state.pending_msg_events;
8458+
84448459 peer_state.channel_by_id.retain(|_, phase| {
84458460 match phase {
84468461 // Retain unfunded channels.
@@ -8513,7 +8528,8 @@ where
85138528 let reason_message = format!("{}", reason);
85148529 failed_channels.push(channel.context.force_shutdown(true, reason));
85158530 if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
8516- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
8531+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
8532+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
85178533 msg: update
85188534 });
85198535 }
@@ -8960,7 +8976,12 @@ where
89608976 // Gossip
89618977 &events::MessageSendEvent::SendChannelAnnouncement { .. } => false,
89628978 &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
8963- &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
8979+ // [`ChannelManager::pending_broadcast_events`] holds the [`BroadcastChannelUpdate`]
8980+ // This check here is to ensure exhaustivity.
8981+ &events::MessageSendEvent::BroadcastChannelUpdate { .. } => {
8982+ debug_assert!(false, "This event shouldn't have been here");
8983+ false
8984+ },
89648985 &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
89658986 &events::MessageSendEvent::SendChannelUpdate { .. } => false,
89668987 &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
@@ -11149,6 +11170,8 @@ where
1114911170
1115011171 pending_offers_messages: Mutex::new(Vec::new()),
1115111172
11173+ pending_broadcast_messages: Mutex::new(Vec::new()),
11174+
1115211175 entropy_source: args.entropy_source,
1115311176 node_signer: args.node_signer,
1115411177 signer_provider: args.signer_provider,
@@ -11678,6 +11701,61 @@ mod tests {
1167811701 }
1167911702 }
1168011703
11704+ #[test]
11705+ fn test_channel_update_cached() {
11706+ let chanmon_cfgs = create_chanmon_cfgs(3);
11707+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
11708+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
11709+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
11710+
11711+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
11712+
11713+ nodes[0].node.force_close_channel_with_peer(&chan.2, &nodes[1].node.get_our_node_id(), None, true).unwrap();
11714+ check_added_monitors!(nodes[0], 1);
11715+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
11716+
11717+ // Confirm that the channel_update was not sent immediately to node[1] but was cached.
11718+ let node_1_events = nodes[1].node.get_and_clear_pending_msg_events();
11719+ assert_eq!(node_1_events.len(), 0);
11720+
11721+ {
11722+ // Assert that ChannelUpdate message has been added to node[0] pending broadcast messages
11723+ let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
11724+ assert_eq!(pending_broadcast_messages.len(), 1);
11725+ }
11726+
11727+ // Test that we do not retrieve the pending broadcast messages when we are not connected to any peer
11728+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
11729+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
11730+
11731+ nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id());
11732+ nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id());
11733+
11734+ let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
11735+ assert_eq!(node_0_events.len(), 0);
11736+
11737+ // Now we reconnect to a peer
11738+ nodes[0].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init {
11739+ features: nodes[2].node.init_features(), networks: None, remote_network_address: None
11740+ }, true).unwrap();
11741+ nodes[2].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
11742+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
11743+ }, false).unwrap();
11744+
11745+ // Confirm that get_and_clear_pending_msg_events correctly captures pending broadcast messages
11746+ let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
11747+ assert_eq!(node_0_events.len(), 1);
11748+ match &node_0_events[0] {
11749+ MessageSendEvent::BroadcastChannelUpdate { .. } => (),
11750+ _ => panic!("Unexpected event"),
11751+ }
11752+ {
11753+ // Assert that ChannelUpdate message has been cleared from nodes[0] pending broadcast messages
11754+ let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
11755+ assert_eq!(pending_broadcast_messages.len(), 0);
11756+ }
11757+ }
11758+
1168111759 #[test]
1168211760 fn test_drop_disconnected_peers_when_removing_channels() {
1168311761 let chanmon_cfgs = create_chanmon_cfgs(2);
0 commit comments