Skip to content

Commit f39a230

Browse files
committed
Always forward gossip for all our channels to all our peers
Sadly, the lightning gossip protocol operates by always flooding peers with all the latest gossip they receive. For nodes with many peers, this can result in lots of duplicative gossip as they receive every message from every peer. As a results, some lightning implementations disable gossip with new peers after some threshold. This should mostly work as these peers expect to receive the latest gossip from their many other peers. However, in some cases an LDK node may wish to open public channels but only has a single connection to the bulk of the rest of the network - with one such peer which requests that it not receive any gossip. In that case, LDK would dutifully never send any gossip to its only connection to the outside world. We would still send gossip for channels with that peer as it would be sent as unicast gossip, but if we then open another connection to another peer which doesn't have any connection to the outside world any information on that channel wouldn't propagate. We've seen this setup on some LSPs, where they have a public node and then an LSP which only connects through that public node, but expects to open public channels to its LSP clients. Here we finally forward all gossip that is about either side of any of our channels to all our peers, irrespective of their requested gossip filtering.
1 parent e0f53ac commit f39a230

File tree

1 file changed

+109
-10
lines changed

1 file changed

+109
-10
lines changed

lightning/src/ln/peer_handler.rs

Lines changed: 109 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1114,6 +1114,7 @@ pub struct PeerManager<
11141114
gossip_processing_backlog_lifted: AtomicBool,
11151115

11161116
node_signer: NS,
1117+
our_node_id: NodeId,
11171118

11181119
logger: L,
11191120
secp_ctx: Secp256k1<secp256k1::SignOnly>,
@@ -1328,6 +1329,9 @@ where
13281329
let ephemeral_hash = Sha256::from_engine(ephemeral_key_midstate.clone()).to_byte_array();
13291330
secp_ctx.seeded_randomize(&ephemeral_hash);
13301331

1332+
let our_node_pubkey =
1333+
node_signer.get_node_id(Recipient::Node).expect("node_id must be available");
1334+
13311335
PeerManager {
13321336
message_handler,
13331337
peers: FairRwLock::new(new_hash_map()),
@@ -1339,6 +1343,7 @@ where
13391343
gossip_processing_backlog_lifted: AtomicBool::new(false),
13401344
last_node_announcement_serial: AtomicU32::new(current_time),
13411345
logger,
1346+
our_node_id: NodeId::from_pubkey(&our_node_pubkey),
13421347
node_signer,
13431348
secp_ctx,
13441349
}
@@ -2667,12 +2672,16 @@ where
26672672
BroadcastGossipMessage::ChannelAnnouncement(ref msg) => {
26682673
log_gossip!(self.logger, "Sending message to all peers except {:?} or the announced channel's counterparties: {:?}", except_node, msg);
26692674
let encoded_msg = encode_msg!(msg);
2675+
let our_channel = self.our_node_id == msg.contents.node_id_1
2676+
|| self.our_node_id == msg.contents.node_id_2;
26702677

26712678
for (_, peer_mutex) in peers.iter() {
26722679
let mut peer = peer_mutex.lock().unwrap();
2673-
if !peer.handshake_complete()
2674-
|| !peer.should_forward_channel_announcement(msg.contents.short_channel_id)
2675-
{
2680+
if !peer.handshake_complete() {
2681+
continue;
2682+
}
2683+
let scid = msg.contents.short_channel_id;
2684+
if !our_channel && !peer.should_forward_channel_announcement(scid) {
26762685
continue;
26772686
}
26782687
debug_assert!(peer.their_node_id.is_some());
@@ -2711,12 +2720,15 @@ where
27112720
msg
27122721
);
27132722
let encoded_msg = encode_msg!(msg);
2723+
let our_announcement = self.our_node_id == msg.contents.node_id;
27142724

27152725
for (_, peer_mutex) in peers.iter() {
27162726
let mut peer = peer_mutex.lock().unwrap();
2717-
if !peer.handshake_complete()
2718-
|| !peer.should_forward_node_announcement(msg.contents.node_id)
2719-
{
2727+
if !peer.handshake_complete() {
2728+
continue;
2729+
}
2730+
let node_id = msg.contents.node_id;
2731+
if !our_announcement && !peer.should_forward_node_announcement(node_id) {
27202732
continue;
27212733
}
27222734
debug_assert!(peer.their_node_id.is_some());
@@ -2745,20 +2757,23 @@ where
27452757
peer.gossip_broadcast_buffer.push_back(encoded_message);
27462758
}
27472759
},
2748-
BroadcastGossipMessage::ChannelUpdate { msg, node_id_1: _, node_id_2: _ } => {
2760+
BroadcastGossipMessage::ChannelUpdate { msg, node_id_1, node_id_2 } => {
27492761
log_gossip!(
27502762
self.logger,
27512763
"Sending message to all peers except {:?}: {:?}",
27522764
except_node,
27532765
msg
27542766
);
27552767
let encoded_msg = encode_msg!(msg);
2768+
let our_channel = self.our_node_id == *node_id_1 || self.our_node_id == *node_id_2;
27562769

27572770
for (_, peer_mutex) in peers.iter() {
27582771
let mut peer = peer_mutex.lock().unwrap();
2759-
if !peer.handshake_complete()
2760-
|| !peer.should_forward_channel_announcement(msg.contents.short_channel_id)
2761-
{
2772+
if !peer.handshake_complete() {
2773+
continue;
2774+
}
2775+
let scid = msg.contents.short_channel_id;
2776+
if !our_channel && !peer.should_forward_channel_announcement(scid) {
27622777
continue;
27632778
}
27642779
debug_assert!(peer.their_node_id.is_some());
@@ -4890,6 +4905,90 @@ mod tests {
48904905
assert_eq!(filter_addresses(None), None);
48914906
}
48924907

4908+
#[test]
4909+
fn test_forward_gossip_for_our_channels_ignores_peer_filter() {
4910+
// Tests that gossip for channels where we are one of the endpoints is forwarded to all
4911+
// peers, regardless of any gossip filters they may have set. This ensures that updates
4912+
// for our own channels always propagate to all connected peers.
4913+
4914+
let cfgs = create_peermgr_cfgs(2);
4915+
let peers = create_network(2, &cfgs);
4916+
4917+
let id_0 = peers[0].node_signer.get_node_id(Recipient::Node).unwrap();
4918+
let addr_0 = SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 1000 };
4919+
let addr_1 = SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 1001 };
4920+
4921+
let mut fd_0_1 = FileDescriptor::new(1);
4922+
let mut fd_1_0 = FileDescriptor::new(2);
4923+
4924+
// Connect the peers and exchange the initial connection handshake.
4925+
let initial_data =
4926+
peers[1].new_outbound_connection(id_0, fd_1_0.clone(), Some(addr_0)).unwrap();
4927+
peers[0].new_inbound_connection(fd_0_1.clone(), Some(addr_1)).unwrap();
4928+
peers[0].read_event(&mut fd_0_1, &initial_data).unwrap();
4929+
4930+
peers[0].process_events();
4931+
let data_0_1 = fd_0_1.outbound_data.lock().unwrap().split_off(0);
4932+
peers[1].read_event(&mut fd_1_0, &data_0_1).unwrap();
4933+
4934+
peers[1].process_events();
4935+
let data_1_0 = fd_1_0.outbound_data.lock().unwrap().split_off(0);
4936+
peers[0].read_event(&mut fd_0_1, &data_1_0).unwrap();
4937+
4938+
peers[0].process_events();
4939+
let data_0_1 = fd_0_1.outbound_data.lock().unwrap().split_off(0);
4940+
peers[1].read_event(&mut fd_1_0, &data_0_1).unwrap();
4941+
4942+
// Once peer 1 receives the Init message in the last read_event, it'll generate a
4943+
// `GossipTimestampFilter` which will request gossip. Instead we drop it here.
4944+
cfgs[1]
4945+
.routing_handler
4946+
.pending_events
4947+
.lock()
4948+
.unwrap()
4949+
.retain(|ev| !matches!(ev, MessageSendEvent::SendGossipTimestampFilter { .. }));
4950+
4951+
peers[1].process_events();
4952+
let data_1_0 = fd_1_0.outbound_data.lock().unwrap().split_off(0);
4953+
peers[0].read_event(&mut fd_0_1, &data_1_0).unwrap();
4954+
4955+
peers[0].process_events();
4956+
assert!(fd_0_1.outbound_data.lock().unwrap().is_empty());
4957+
assert!(fd_1_0.outbound_data.lock().unwrap().is_empty());
4958+
4959+
let mut check_message_received = |expected_received: bool| {
4960+
let initial_count = cfgs[1].routing_handler.chan_upds_recvd.load(Ordering::Acquire);
4961+
4962+
peers[0].process_events();
4963+
let data_0_1 = fd_0_1.outbound_data.lock().unwrap().split_off(0);
4964+
assert_eq!(data_0_1.is_empty(), !expected_received);
4965+
peers[1].read_event(&mut fd_1_0, &data_0_1).unwrap();
4966+
4967+
let final_count = cfgs[1].routing_handler.chan_upds_recvd.load(Ordering::Acquire);
4968+
assert_eq!(final_count > initial_count, expected_received);
4969+
};
4970+
4971+
// Broadcast a gossip message that is unrelated to us and check that it doesn't get relayed
4972+
let unrelated_msg_ev = MessageSendEvent::BroadcastChannelUpdate {
4973+
msg: test_utils::get_dummy_channel_update(43),
4974+
node_id_1: NodeId::from_slice(&[2; 33]).unwrap(),
4975+
node_id_2: NodeId::from_slice(&[3; 33]).unwrap(),
4976+
};
4977+
cfgs[0].routing_handler.pending_events.lock().unwrap().push(unrelated_msg_ev);
4978+
4979+
check_message_received(false);
4980+
4981+
// Broadcast a gossip message that we're a party to and check that its relayed
4982+
let our_channel_msg_ev = MessageSendEvent::BroadcastChannelUpdate {
4983+
msg: test_utils::get_dummy_channel_update(43),
4984+
node_id_1: NodeId::from_pubkey(&id_0),
4985+
node_id_2: NodeId::from_slice(&[3; 33]).unwrap(),
4986+
};
4987+
cfgs[0].routing_handler.pending_events.lock().unwrap().push(our_channel_msg_ev);
4988+
4989+
check_message_received(true);
4990+
}
4991+
48934992
#[test]
48944993
#[cfg(feature = "std")]
48954994
fn test_process_events_multithreaded() {

0 commit comments

Comments
 (0)