Skip to main content

sc_network/litep2p/
mod.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
5
6// This program is free software: you can redistribute it and/or modify
7// it under the terms of the GNU General Public License as published by
8// the Free Software Foundation, either version 3 of the License, or
9// (at your option) any later version.
10
11// This program is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License
17// along with this program. If not, see <https://www.gnu.org/licenses/>.
18
19//! `NetworkBackend` implementation for `litep2p`.
20
21use crate::{
22	config::{
23		FullNetworkConfiguration, IncomingRequest, NodeKeyConfig, NotificationHandshake, Params,
24		SetConfig, TransportConfig,
25	},
26	error::Error,
27	event::{DhtEvent, Event},
28	litep2p::{
29		bitswap::BitswapServer,
30		discovery::{Discovery, DiscoveryEvent},
31		ipfs_dht::IpfsDht,
32		peerstore::Peerstore,
33		service::{Litep2pNetworkService, NetworkServiceCommand},
34		shim::{
35			notification::{
36				config::{NotificationProtocolConfig, ProtocolControlHandle},
37				peerset::PeersetCommand,
38			},
39			request_response::{RequestResponseConfig, RequestResponseProtocol},
40		},
41	},
42	peer_store::PeerStoreProvider,
43	service::{
44		metrics::{register_without_sources, MetricSources, Metrics, NotificationMetrics},
45		out_events,
46		traits::{BandwidthSink, NetworkBackend, NetworkService},
47	},
48	NetworkStatus, NotificationService, ProtocolName,
49};
50
51use codec::Encode;
52use futures::StreamExt;
53use litep2p::{
54	config::ConfigBuilder,
55	crypto::ed25519::Keypair,
56	error::{DialError, NegotiationError},
57	executor::Executor,
58	protocol::{
59		libp2p::{
60			bitswap::Config as BitswapConfig,
61			kademlia::{QueryId, Record},
62		},
63		request_response::ConfigBuilder as RequestResponseConfigBuilder,
64	},
65	transport::{
66		tcp::config::Config as TcpTransportConfig,
67		websocket::config::Config as WebSocketTransportConfig, ConnectionLimitsConfig, Endpoint,
68	},
69	types::{
70		multiaddr::{Multiaddr, Protocol},
71		ConnectionId,
72	},
73	Litep2p, Litep2pEvent, ProtocolName as Litep2pProtocolName,
74};
75use prometheus_endpoint::Registry;
76use sc_network_types::kad::{Key as RecordKey, PeerRecord, Record as P2PRecord};
77
78use sc_client_api::BlockBackend;
79use sc_network_common::{role::Roles, ExHashT};
80use sc_network_types::PeerId;
81use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver};
82use sp_runtime::traits::Block as BlockT;
83
84use std::{
85	cmp,
86	collections::{hash_map::Entry, HashMap, HashSet},
87	fs,
88	future::Future,
89	iter,
90	pin::Pin,
91	sync::{
92		atomic::{AtomicUsize, Ordering},
93		Arc,
94	},
95	time::{Duration, Instant},
96};
97
98mod bitswap;
99mod discovery;
100mod ipfs_dht;
101mod peerstore;
102mod service;
103mod shim;
104
105/// Litep2p bandwidth sink.
106struct Litep2pBandwidthSink {
107	sink: litep2p::BandwidthSink,
108}
109
110impl BandwidthSink for Litep2pBandwidthSink {
111	fn total_inbound(&self) -> u64 {
112		self.sink.inbound() as u64
113	}
114
115	fn total_outbound(&self) -> u64 {
116		self.sink.outbound() as u64
117	}
118}
119
120/// Litep2p task executor.
121struct Litep2pExecutor {
122	/// Executor.
123	executor: Box<dyn Fn(Pin<Box<dyn Future<Output = ()> + Send>>) + Send + Sync>,
124}
125
126impl Executor for Litep2pExecutor {
127	fn run(&self, future: Pin<Box<dyn Future<Output = ()> + Send>>) {
128		(self.executor)(future)
129	}
130
131	fn run_with_name(&self, _: &'static str, future: Pin<Box<dyn Future<Output = ()> + Send>>) {
132		(self.executor)(future)
133	}
134}
135
136/// Logging target for the file.
137const LOG_TARGET: &str = "sub-libp2p";
138
139/// Peer context.
140struct ConnectionContext {
141	/// Peer endpoints.
142	endpoints: HashMap<ConnectionId, Endpoint>,
143
144	/// Number of active connections.
145	num_connections: usize,
146}
147
148/// Kademlia query we are tracking.
149#[derive(Debug)]
150enum KadQuery {
151	/// `FIND_NODE` query for target and when it was initiated.
152	FindNode(PeerId, Instant),
153	/// `GET_VALUE` query for key and when it was initiated.
154	GetValue(RecordKey, Instant),
155	/// `PUT_VALUE` query for key and when it was initiated.
156	PutValue(RecordKey, Instant),
157	/// `GET_PROVIDERS` query for key and when it was initiated.
158	GetProviders(RecordKey, Instant),
159	/// `ADD_PROVIDER` query for key and when it was initiated.
160	AddProvider(RecordKey, Instant),
161}
162
163/// Networking backend for `litep2p`.
164pub struct Litep2pNetworkBackend {
165	/// Main `litep2p` object.
166	litep2p: Litep2p,
167
168	/// `NetworkService` implementation for `Litep2pNetworkBackend`.
169	network_service: Arc<dyn NetworkService>,
170
171	/// RX channel for receiving commands from `Litep2pNetworkService`.
172	cmd_rx: TracingUnboundedReceiver<NetworkServiceCommand>,
173
174	/// `Peerset` handles to notification protocols.
175	peerset_handles: HashMap<ProtocolName, ProtocolControlHandle>,
176
177	/// Pending Kademlia queries.
178	pending_queries: HashMap<QueryId, KadQuery>,
179
180	/// Discovery.
181	discovery: Discovery,
182
183	/// Number of connected peers.
184	num_connected: Arc<AtomicUsize>,
185
186	/// Connected peers.
187	peers: HashMap<litep2p::PeerId, ConnectionContext>,
188
189	/// Peerstore.
190	peerstore_handle: Arc<dyn PeerStoreProvider>,
191
192	/// Block announce protocol name.
193	block_announce_protocol: ProtocolName,
194
195	/// Sender for DHT events.
196	event_streams: out_events::OutChannels,
197
198	/// Prometheus metrics.
199	metrics: Option<Metrics>,
200}
201
202impl Litep2pNetworkBackend {
203	/// From an iterator of multiaddress(es), parse and group all addresses of peers
204	/// so that litep2p can consume the information easily.
205	fn parse_addresses(
206		addresses: impl Iterator<Item = Multiaddr>,
207	) -> HashMap<PeerId, Vec<Multiaddr>> {
208		addresses
209			.into_iter()
210			.filter_map(|address| match address.iter().next() {
211				Some(
212					Protocol::Dns(_) |
213					Protocol::Dns4(_) |
214					Protocol::Dns6(_) |
215					Protocol::Ip6(_) |
216					Protocol::Ip4(_),
217				) => match address.iter().find(|protocol| std::matches!(protocol, Protocol::P2p(_)))
218				{
219					Some(Protocol::P2p(multihash)) => PeerId::from_multihash(multihash.into())
220						.map_or(None, |peer| Some((peer, Some(address)))),
221					_ => None,
222				},
223				Some(Protocol::P2p(multihash)) => {
224					PeerId::from_multihash(multihash.into()).map_or(None, |peer| Some((peer, None)))
225				},
226				_ => None,
227			})
228			.fold(HashMap::new(), |mut acc, (peer, maybe_address)| {
229				let entry = acc.entry(peer).or_default();
230				maybe_address.map(|address| entry.push(address));
231
232				acc
233			})
234	}
235
236	/// Add new known addresses to `litep2p` and return the parsed peer IDs.
237	fn add_addresses(&mut self, peers: impl Iterator<Item = Multiaddr>) -> HashSet<PeerId> {
238		Self::parse_addresses(peers.into_iter())
239			.into_iter()
240			.filter_map(|(peer, addresses)| {
241				// `peers` contained multiaddress in the form `/p2p/<peer ID>`
242				if addresses.is_empty() {
243					return Some(peer);
244				}
245
246				if self.litep2p.add_known_address(peer.into(), addresses.clone().into_iter()) == 0 {
247					log::warn!(
248						target: LOG_TARGET,
249						"couldn't add any addresses for {peer:?} and it won't be added as reserved peer",
250					);
251					return None;
252				}
253
254				self.peerstore_handle.add_known_peer(peer);
255				Some(peer)
256			})
257			.collect()
258	}
259}
260
261impl Litep2pNetworkBackend {
262	/// Get `litep2p` keypair from `NodeKeyConfig`.
263	fn get_keypair(node_key: &NodeKeyConfig) -> Result<(Keypair, litep2p::PeerId), Error> {
264		let secret: litep2p::crypto::ed25519::SecretKey =
265			node_key.clone().into_keypair()?.secret().into();
266
267		let local_identity = Keypair::from(secret);
268		let local_public = local_identity.public();
269		let local_peer_id = local_public.to_peer_id();
270
271		Ok((local_identity, local_peer_id))
272	}
273
274	/// Configure transport protocols for `Litep2pNetworkBackend`.
275	fn configure_transport<B: BlockT + 'static, H: ExHashT>(
276		config: &FullNetworkConfiguration<B, H, Self>,
277	) -> ConfigBuilder {
278		let _ = match config.network_config.transport {
279			TransportConfig::MemoryOnly => panic!("memory transport not supported"),
280			TransportConfig::Normal { .. } => false,
281		};
282		let config_builder = ConfigBuilder::new();
283
284		let (tcp, websocket): (Vec<Option<_>>, Vec<Option<_>>) = config
285			.network_config
286			.listen_addresses
287			.iter()
288			.filter_map(|address| {
289				use sc_network_types::multiaddr::Protocol;
290
291				let mut iter = address.iter();
292
293				match iter.next() {
294					Some(Protocol::Ip4(_) | Protocol::Ip6(_)) => {},
295					protocol => {
296						log::error!(
297							target: LOG_TARGET,
298							"unknown protocol {protocol:?}, ignoring {address:?}",
299						);
300
301						return None;
302					},
303				}
304
305				match iter.next() {
306					Some(Protocol::Tcp(_)) => match iter.next() {
307						Some(Protocol::Ws(_) | Protocol::Wss(_)) => {
308							Some((None, Some(address.clone())))
309						},
310						Some(Protocol::P2p(_)) | None => Some((Some(address.clone()), None)),
311						protocol => {
312							log::error!(
313								target: LOG_TARGET,
314								"unknown protocol {protocol:?}, ignoring {address:?}",
315							);
316							None
317						},
318					},
319					protocol => {
320						log::error!(
321							target: LOG_TARGET,
322							"unknown protocol {protocol:?}, ignoring {address:?}",
323						);
324						None
325					},
326				}
327			})
328			.unzip();
329
330		config_builder
331			.with_websocket(WebSocketTransportConfig {
332				listen_addresses: websocket.into_iter().flatten().map(Into::into).collect(),
333				yamux_config: litep2p::yamux::Config::default(),
334				nodelay: true,
335				..Default::default()
336			})
337			.with_tcp(TcpTransportConfig {
338				listen_addresses: tcp.into_iter().flatten().map(Into::into).collect(),
339				yamux_config: litep2p::yamux::Config::default(),
340				nodelay: true,
341				..Default::default()
342			})
343	}
344}
345
346#[async_trait::async_trait]
347impl<B: BlockT + 'static, H: ExHashT> NetworkBackend<B, H> for Litep2pNetworkBackend {
348	type NotificationProtocolConfig = NotificationProtocolConfig;
349	type RequestResponseProtocolConfig = RequestResponseConfig;
350	type NetworkService<Block, Hash> = Arc<Litep2pNetworkService>;
351	type PeerStore = Peerstore;
352	type BitswapConfig = BitswapConfig;
353
354	fn new(mut params: Params<B, H, Self>) -> Result<Self, Error>
355	where
356		Self: Sized,
357	{
358		let (keypair, local_peer_id) =
359			Self::get_keypair(&params.network_config.network_config.node_key)?;
360		let (cmd_tx, cmd_rx) = tracing_unbounded("mpsc_network_worker", 100_000);
361
362		params.network_config.network_config.boot_nodes = params
363			.network_config
364			.network_config
365			.boot_nodes
366			.into_iter()
367			.filter(|boot_node| boot_node.peer_id != local_peer_id.into())
368			.collect();
369		params.network_config.network_config.default_peers_set.reserved_nodes = params
370			.network_config
371			.network_config
372			.default_peers_set
373			.reserved_nodes
374			.into_iter()
375			.filter(|reserved_node| {
376				if reserved_node.peer_id == local_peer_id.into() {
377					log::warn!(
378						target: LOG_TARGET,
379						"Local peer ID used in reserved node, ignoring: {reserved_node}",
380					);
381					false
382				} else {
383					true
384				}
385			})
386			.collect();
387
388		if let Some(path) = &params.network_config.network_config.net_config_path {
389			fs::create_dir_all(path)?;
390		}
391
392		log::info!(target: LOG_TARGET, "Local node identity is: {local_peer_id}");
393		log::info!(target: LOG_TARGET, "Running litep2p network backend");
394
395		params.network_config.sanity_check_addresses()?;
396		params.network_config.sanity_check_bootnodes()?;
397
398		let mut config_builder =
399			Self::configure_transport(&params.network_config).with_keypair(keypair.clone());
400		let known_addresses = params.network_config.known_addresses();
401		let peer_store_handle = params.network_config.peer_store_handle();
402		let executor = Arc::new(Litep2pExecutor { executor: params.executor });
403
404		let FullNetworkConfiguration {
405			notification_protocols,
406			request_response_protocols,
407			network_config,
408			..
409		} = params.network_config;
410
411		// initialize notification protocols
412		//
413		// pass the protocol configuration to `Litep2pConfigBuilder` and save the TX channel
414		// to the protocol's `Peerset` together with the protocol name to allow other subsystems
415		// of Polkadot SDK to control connectivity of the notification protocol
416		let block_announce_protocol = params.block_announce_config.protocol_name().clone();
417		let mut notif_protocols = HashMap::from_iter([(
418			params.block_announce_config.protocol_name().clone(),
419			params.block_announce_config.handle,
420		)]);
421
422		// handshake for all but the syncing protocol is set to node role
423		config_builder = notification_protocols
424			.into_iter()
425			.fold(config_builder, |config_builder, mut config| {
426				config.config.set_handshake(Roles::from(&params.role).encode());
427				notif_protocols.insert(config.protocol_name, config.handle);
428
429				config_builder.with_notification_protocol(config.config)
430			})
431			.with_notification_protocol(params.block_announce_config.config);
432
433		// initialize request-response protocols
434		let metrics = match &params.metrics_registry {
435			Some(registry) => Some(register_without_sources(registry)?),
436			None => None,
437		};
438
439		// create channels that are used to send request before initializing protocols so the
440		// senders can be passed onto all request-response protocols
441		//
442		// all protocols must have each others' senders so they can send the fallback request in
443		// case the main protocol is not supported by the remote peer and user specified a fallback
444		let (mut request_response_receivers, request_response_senders): (
445			HashMap<_, _>,
446			HashMap<_, _>,
447		) = request_response_protocols
448			.iter()
449			.map(|config| {
450				let (tx, rx) = tracing_unbounded("outbound-requests", 10_000);
451				((config.protocol_name.clone(), rx), (config.protocol_name.clone(), tx))
452			})
453			.unzip();
454
455		config_builder = request_response_protocols.into_iter().fold(
456			config_builder,
457			|config_builder, config| {
458				let (protocol_config, handle) = RequestResponseConfigBuilder::new(
459					Litep2pProtocolName::from(config.protocol_name.clone()),
460				)
461				.with_max_size(cmp::max(config.max_request_size, config.max_response_size) as usize)
462				.with_fallback_names(config.fallback_names.into_iter().map(From::from).collect())
463				.with_timeout(config.request_timeout)
464				.build();
465
466				let protocol = RequestResponseProtocol::new(
467					config.protocol_name.clone(),
468					handle,
469					Arc::clone(&peer_store_handle),
470					config.inbound_queue,
471					request_response_receivers
472						.remove(&config.protocol_name)
473						.expect("receiver exists as it was just added and there are no duplicate protocols; qed"),
474					request_response_senders.clone(),
475					metrics.clone(),
476				);
477
478				executor.run(Box::pin(async move {
479					protocol.run().await;
480				}));
481
482				config_builder.with_request_response_protocol(protocol_config)
483			},
484		);
485
486		// collect known addresses
487		let known_addresses: HashMap<litep2p::PeerId, Vec<Multiaddr>> =
488			known_addresses.into_iter().fold(HashMap::new(), |mut acc, (peer, address)| {
489				use sc_network_types::multiaddr::Protocol;
490
491				let address = match address.iter().last() {
492					Some(Protocol::Ws(_) | Protocol::Wss(_) | Protocol::Tcp(_)) => {
493						address.with(Protocol::P2p(peer.into()))
494					},
495					Some(Protocol::P2p(_)) => address,
496					_ => return acc,
497				};
498
499				acc.entry(peer.into()).or_default().push(address.into());
500				peer_store_handle.add_known_peer(peer);
501
502				acc
503			});
504
505		// enable ipfs ping, identify and kademlia, and potentially mdns if user enabled it
506		let listen_addresses = Arc::new(Default::default());
507		let (discovery, ping_config, identify_config, kademlia_config, maybe_mdns_config) =
508			Discovery::new(
509				local_peer_id,
510				&network_config,
511				params.genesis_hash,
512				params.fork_id.as_deref(),
513				&params.protocol_id,
514				known_addresses.clone(),
515				Arc::clone(&listen_addresses),
516				Arc::clone(&peer_store_handle),
517			);
518
519		// enable Bitswap & IPFS DHT
520		if let Some(config) = params.ipfs_config {
521			config_builder = config_builder.with_libp2p_bitswap(config.bitswap_config);
522
523			if !config.bootnodes.is_empty() {
524				let (ipfs_dht, kad_config) = IpfsDht::new(config.bootnodes, config.block_provider);
525				config_builder = config_builder.with_libp2p_kademlia(kad_config);
526				executor.run(Box::pin(ipfs_dht.run()));
527			} else {
528				log::warn!(
529					target: LOG_TARGET,
530					"Not starting IPFS DHT publisher because no IPFS bootnodes are configured. \
531					 Only direct Bitswap requests will be handled.",
532				);
533			}
534		}
535
536		config_builder = config_builder
537			.with_known_addresses(known_addresses.clone().into_iter())
538			.with_libp2p_ping(ping_config)
539			.with_libp2p_identify(identify_config)
540			.with_libp2p_kademlia(kademlia_config)
541			.with_connection_limits(ConnectionLimitsConfig::default().max_incoming_connections(
542				Some(crate::MAX_CONNECTIONS_ESTABLISHED_INCOMING as usize),
543			))
544			.with_keep_alive_timeout(network_config.idle_connection_timeout)
545			// Use system DNS resolver to enable intranet domain resolution and administrator
546			// control over DNS lookup.
547			.with_system_resolver()
548			.with_executor(executor);
549
550		if let Some(config) = maybe_mdns_config {
551			config_builder = config_builder.with_mdns(config);
552		}
553
554		let litep2p =
555			Litep2p::new(config_builder.build()).map_err(|error| Error::Litep2p(error))?;
556
557		litep2p.listen_addresses().for_each(|address| {
558			log::debug!(target: LOG_TARGET, "listening on: {address}");
559
560			listen_addresses.write().insert(address.clone());
561		});
562
563		let public_addresses = litep2p.public_addresses();
564		for address in network_config.public_addresses.iter() {
565			if let Err(err) = public_addresses.add_address(address.clone().into()) {
566				log::warn!(
567					target: LOG_TARGET,
568					"failed to add public address {address:?}: {err:?}",
569				);
570			}
571		}
572
573		let network_service = Arc::new(Litep2pNetworkService::new(
574			local_peer_id,
575			keypair.clone(),
576			cmd_tx,
577			Arc::clone(&peer_store_handle),
578			notif_protocols.clone(),
579			block_announce_protocol.clone(),
580			request_response_senders,
581			Arc::clone(&listen_addresses),
582			public_addresses,
583		));
584
585		// register rest of the metrics now that `Litep2p` has been created
586		let num_connected = Arc::new(Default::default());
587		let bandwidth: Arc<dyn BandwidthSink> =
588			Arc::new(Litep2pBandwidthSink { sink: litep2p.bandwidth_sink() });
589
590		if let Some(registry) = &params.metrics_registry {
591			MetricSources::register(registry, bandwidth, Arc::clone(&num_connected))?;
592		}
593
594		Ok(Self {
595			network_service,
596			cmd_rx,
597			metrics,
598			peerset_handles: notif_protocols,
599			num_connected,
600			discovery,
601			pending_queries: HashMap::new(),
602			peerstore_handle: peer_store_handle,
603			block_announce_protocol,
604			event_streams: out_events::OutChannels::new(None)?,
605			peers: HashMap::new(),
606			litep2p,
607		})
608	}
609
610	fn network_service(&self) -> Arc<dyn NetworkService> {
611		Arc::clone(&self.network_service)
612	}
613
614	fn peer_store(
615		bootnodes: Vec<sc_network_types::PeerId>,
616		metrics_registry: Option<Registry>,
617	) -> Self::PeerStore {
618		Peerstore::new(bootnodes, metrics_registry)
619	}
620
621	fn register_notification_metrics(registry: Option<&Registry>) -> NotificationMetrics {
622		NotificationMetrics::new(registry)
623	}
624
625	/// Create Bitswap server.
626	fn bitswap_server(
627		client: Arc<dyn BlockBackend<B> + Send + Sync>,
628	) -> (Pin<Box<dyn Future<Output = ()> + Send>>, Self::BitswapConfig) {
629		BitswapServer::new(client)
630	}
631
632	/// Create notification protocol configuration for `protocol`.
633	fn notification_config(
634		protocol_name: ProtocolName,
635		fallback_names: Vec<ProtocolName>,
636		max_notification_size: u64,
637		handshake: Option<NotificationHandshake>,
638		set_config: SetConfig,
639		metrics: NotificationMetrics,
640		peerstore_handle: Arc<dyn PeerStoreProvider>,
641	) -> (Self::NotificationProtocolConfig, Box<dyn NotificationService>) {
642		Self::NotificationProtocolConfig::new(
643			protocol_name,
644			fallback_names,
645			max_notification_size as usize,
646			handshake,
647			set_config,
648			metrics,
649			peerstore_handle,
650		)
651	}
652
653	/// Create request-response protocol configuration.
654	fn request_response_config(
655		protocol_name: ProtocolName,
656		fallback_names: Vec<ProtocolName>,
657		max_request_size: u64,
658		max_response_size: u64,
659		request_timeout: Duration,
660		inbound_queue: Option<async_channel::Sender<IncomingRequest>>,
661	) -> Self::RequestResponseProtocolConfig {
662		Self::RequestResponseProtocolConfig::new(
663			protocol_name,
664			fallback_names,
665			max_request_size,
666			max_response_size,
667			request_timeout,
668			inbound_queue,
669		)
670	}
671
672	/// Start [`Litep2pNetworkBackend`] event loop.
673	async fn run(mut self) {
674		log::debug!(target: LOG_TARGET, "starting litep2p network backend");
675
676		loop {
677			let num_connected_peers = self
678				.peerset_handles
679				.get(&self.block_announce_protocol)
680				.map_or(0usize, |handle| handle.connected_peers.load(Ordering::Relaxed));
681			self.num_connected.store(num_connected_peers, Ordering::Relaxed);
682
683			tokio::select! {
684				command = self.cmd_rx.next() => match command {
685					None => return,
686					Some(command) => match command {
687						NetworkServiceCommand::FindClosestPeers { target } => {
688							let query_id = self.discovery.find_node(target.into()).await;
689							self.pending_queries.insert(query_id, KadQuery::FindNode(target, Instant::now()));
690						}
691						NetworkServiceCommand::GetValue{ key } => {
692							let query_id = self.discovery.get_value(key.clone()).await;
693							self.pending_queries.insert(query_id, KadQuery::GetValue(key, Instant::now()));
694						}
695						NetworkServiceCommand::PutValue { key, value } => {
696							let query_id = self.discovery.put_value(key.clone(), value).await;
697							self.pending_queries.insert(query_id, KadQuery::PutValue(key, Instant::now()));
698						}
699						NetworkServiceCommand::PutValueTo { record, peers, update_local_storage} => {
700							let kademlia_key = record.key.clone();
701							let query_id = self.discovery.put_value_to_peers(record.into(), peers, update_local_storage).await;
702							self.pending_queries.insert(query_id, KadQuery::PutValue(kademlia_key, Instant::now()));
703						}
704						NetworkServiceCommand::StoreRecord { key, value, publisher, expires } => {
705							self.discovery.store_record(key, value, publisher.map(Into::into), expires).await;
706						}
707						NetworkServiceCommand::StartProviding { key } => {
708							let query_id = self.discovery.start_providing(key.clone()).await;
709							self.pending_queries.insert(query_id, KadQuery::AddProvider(key, Instant::now()));
710						}
711						NetworkServiceCommand::StopProviding { key } => {
712							self.discovery.stop_providing(key).await;
713						}
714						NetworkServiceCommand::GetProviders { key } => {
715							let query_id = self.discovery.get_providers(key.clone()).await;
716							self.pending_queries.insert(query_id, KadQuery::GetProviders(key, Instant::now()));
717						}
718						NetworkServiceCommand::EventStream { tx } => {
719							self.event_streams.push(tx);
720						}
721						NetworkServiceCommand::Status { tx } => {
722							let _ = tx.send(NetworkStatus {
723								num_connected_peers: self
724									.peerset_handles
725									.get(&self.block_announce_protocol)
726									.map_or(0usize, |handle| handle.connected_peers.load(Ordering::Relaxed)),
727								total_bytes_inbound: self.litep2p.bandwidth_sink().inbound() as u64,
728								total_bytes_outbound: self.litep2p.bandwidth_sink().outbound() as u64,
729							});
730						}
731						NetworkServiceCommand::AddPeersToReservedSet {
732							protocol,
733							peers,
734						} => {
735							let peers = self.add_addresses(peers.into_iter().map(Into::into));
736
737							match self.peerset_handles.get(&protocol) {
738								Some(handle) => {
739									let _ = handle.tx.unbounded_send(PeersetCommand::AddReservedPeers { peers });
740								}
741								None => log::warn!(target: LOG_TARGET, "protocol {protocol} doens't exist"),
742							};
743						}
744						NetworkServiceCommand::AddKnownAddress { peer, address } => {
745							let mut address: Multiaddr = address.into();
746
747							if !address.iter().any(|protocol| std::matches!(protocol, Protocol::P2p(_))) {
748								address.push(Protocol::P2p(litep2p::PeerId::from(peer).into()));
749							}
750
751							if self.litep2p.add_known_address(peer.into(), iter::once(address.clone())) > 0 {
752								// libp2p backend generates `DiscoveryOut::Discovered(peer_id)`
753								// event when a new address is added for a peer, which leads to the
754								// peer being added to peerstore. Do the same directly here.
755								self.peerstore_handle.add_known_peer(peer);
756							} else {
757								log::debug!(
758									target: LOG_TARGET,
759									"couldn't add known address ({address}) for {peer:?}, unsupported transport"
760								);
761							}
762						},
763						NetworkServiceCommand::SetReservedPeers { protocol, peers } => {
764							let peers = self.add_addresses(peers.into_iter().map(Into::into));
765
766							match self.peerset_handles.get(&protocol) {
767								Some(handle) => {
768									let _ = handle.tx.unbounded_send(PeersetCommand::SetReservedPeers { peers });
769								}
770								None => log::warn!(target: LOG_TARGET, "protocol {protocol} doens't exist"),
771							}
772
773						},
774						NetworkServiceCommand::DisconnectPeer {
775							protocol,
776							peer,
777						} => {
778							let Some(handle) = self.peerset_handles.get(&protocol) else {
779								log::warn!(target: LOG_TARGET, "protocol {protocol} doens't exist");
780								continue
781							};
782
783							let _ = handle.tx.unbounded_send(PeersetCommand::DisconnectPeer { peer });
784						}
785						NetworkServiceCommand::SetReservedOnly {
786							protocol,
787							reserved_only,
788						} => {
789							let Some(handle) = self.peerset_handles.get(&protocol) else {
790								log::warn!(target: LOG_TARGET, "protocol {protocol} doens't exist");
791								continue
792							};
793
794							let _ = handle.tx.unbounded_send(PeersetCommand::SetReservedOnly { reserved_only });
795						}
796						NetworkServiceCommand::RemoveReservedPeers {
797							protocol,
798							peers,
799						} => {
800							let Some(handle) = self.peerset_handles.get(&protocol) else {
801								log::warn!(target: LOG_TARGET, "protocol {protocol} doens't exist");
802								continue
803							};
804
805							let _ = handle.tx.unbounded_send(PeersetCommand::RemoveReservedPeers { peers });
806						}
807					}
808				},
809				event = self.discovery.next() => match event {
810					None => return,
811					Some(DiscoveryEvent::Discovered { addresses }) => {
812						// if at least one address was added for the peer, report the peer to `Peerstore`
813						for (peer, addresses) in Litep2pNetworkBackend::parse_addresses(addresses.into_iter()) {
814							if self.litep2p.add_known_address(peer.into(), addresses.clone().into_iter()) > 0 {
815								self.peerstore_handle.add_known_peer(peer);
816							}
817						}
818					}
819					Some(DiscoveryEvent::RoutingTableUpdate { peers }) => {
820						for peer in peers {
821							self.peerstore_handle.add_known_peer(peer.into());
822						}
823					}
824					Some(DiscoveryEvent::FindNodeSuccess { query_id, target, peers }) => {
825						match self.pending_queries.remove(&query_id) {
826							Some(KadQuery::FindNode(_, started)) => {
827								log::trace!(
828									target: LOG_TARGET,
829									"`FIND_NODE` for {target:?} ({query_id:?}) succeeded",
830								);
831
832								self.event_streams.send(
833									Event::Dht(
834										DhtEvent::ClosestPeersFound(
835											target.into(),
836											peers
837												.into_iter()
838												.map(|(peer, addrs)| (
839													peer.into(),
840													addrs.into_iter().map(Into::into).collect(),
841												))
842												.collect(),
843										)
844									)
845								);
846
847								if let Some(ref metrics) = self.metrics {
848									metrics
849										.kademlia_query_duration
850										.with_label_values(&["node-find"])
851										.observe(started.elapsed().as_secs_f64());
852								}
853							},
854							query => {
855								log::error!(
856									target: LOG_TARGET,
857									"Missing/invalid pending query for `FIND_NODE`: {query:?}"
858								);
859								debug_assert!(false);
860							}
861						}
862					},
863					Some(DiscoveryEvent::GetRecordPartialResult { query_id, record }) => {
864						if !self.pending_queries.contains_key(&query_id) {
865							log::error!(
866								target: LOG_TARGET,
867								"Missing/invalid pending query for `GET_VALUE` partial result: {query_id:?}"
868							);
869
870							continue
871						}
872
873						let peer_id: sc_network_types::PeerId = record.peer.into();
874						let record = PeerRecord {
875							record: P2PRecord {
876								key: record.record.key.to_vec().into(),
877								value: record.record.value,
878								publisher: record.record.publisher.map(|peer_id| {
879									let peer_id: sc_network_types::PeerId = peer_id.into();
880									peer_id.into()
881								}),
882								expires: record.record.expires,
883							},
884							peer: Some(peer_id.into()),
885						};
886
887						self.event_streams.send(
888							Event::Dht(
889								DhtEvent::ValueFound(
890									record.into()
891								)
892							)
893						);
894					}
895					Some(DiscoveryEvent::GetRecordSuccess { query_id }) => {
896						match self.pending_queries.remove(&query_id) {
897							Some(KadQuery::GetValue(key, started)) => {
898								log::trace!(
899									target: LOG_TARGET,
900									"`GET_VALUE` for {key:?} ({query_id:?}) succeeded",
901								);
902
903								if let Some(ref metrics) = self.metrics {
904									metrics
905										.kademlia_query_duration
906										.with_label_values(&["value-get"])
907										.observe(started.elapsed().as_secs_f64());
908								}
909							},
910							query => {
911								log::error!(
912									target: LOG_TARGET,
913									"Missing/invalid pending query for `GET_VALUE`: {query:?}"
914								);
915								debug_assert!(false);
916							},
917						}
918					}
919					Some(DiscoveryEvent::PutRecordSuccess { query_id }) => {
920						match self.pending_queries.remove(&query_id) {
921							Some(KadQuery::PutValue(key, started)) => {
922								log::trace!(
923									target: LOG_TARGET,
924									"`PUT_VALUE` for {key:?} ({query_id:?}) succeeded",
925								);
926
927								self.event_streams.send(Event::Dht(
928									DhtEvent::ValuePut(key)
929								));
930
931								if let Some(ref metrics) = self.metrics {
932									metrics
933										.kademlia_query_duration
934										.with_label_values(&["value-put"])
935										.observe(started.elapsed().as_secs_f64());
936								}
937							},
938							query => {
939								log::error!(
940									target: LOG_TARGET,
941									"Missing/invalid pending query for `PUT_VALUE`: {query:?}"
942								);
943								debug_assert!(false);
944							}
945						}
946					}
947					Some(DiscoveryEvent::GetProvidersSuccess { query_id, providers }) => {
948						match self.pending_queries.remove(&query_id) {
949							Some(KadQuery::GetProviders(key, started)) => {
950								log::trace!(
951									target: LOG_TARGET,
952									"`GET_PROVIDERS` for {key:?} ({query_id:?}) succeeded",
953								);
954
955								// We likely requested providers to connect to them,
956								// so let's add their addresses to litep2p's transport manager.
957								// Consider also looking the addresses of providers up with `FIND_NODE`
958								// query, as it can yield more up to date addresses.
959								providers.iter().for_each(|p| {
960									self.litep2p.add_known_address(p.peer, p.addresses.clone().into_iter());
961								});
962
963								self.event_streams.send(Event::Dht(
964									DhtEvent::ProvidersFound(
965										key.clone().into(),
966										providers.into_iter().map(|p| p.peer.into()).collect()
967									)
968								));
969
970								// litep2p returns all providers in a single event, so we let
971								// subscribers know no more providers will be yielded.
972								self.event_streams.send(Event::Dht(
973									DhtEvent::NoMoreProviders(key.into())
974								));
975
976								if let Some(ref metrics) = self.metrics {
977									metrics
978										.kademlia_query_duration
979										.with_label_values(&["providers-get"])
980										.observe(started.elapsed().as_secs_f64());
981								}
982							},
983							query => {
984								log::error!(
985									target: LOG_TARGET,
986									"Missing/invalid pending query for `GET_PROVIDERS`: {query:?}"
987								);
988								debug_assert!(false);
989							}
990						}
991					}
992					Some(DiscoveryEvent::AddProviderSuccess { query_id, provided_key }) => {
993						match self.pending_queries.remove(&query_id) {
994							Some(KadQuery::AddProvider(key, started)) => {
995								debug_assert_eq!(key, provided_key.into());
996
997								log::trace!(
998									target: LOG_TARGET,
999									"`ADD_PROVIDER` for {key:?} ({query_id:?}) succeeded",
1000								);
1001
1002								self.event_streams.send(Event::Dht(
1003									DhtEvent::StartedProviding(key.into())
1004								));
1005
1006								if let Some(ref metrics) = self.metrics {
1007									metrics
1008										.kademlia_query_duration
1009										.with_label_values(&["provider-add"])
1010										.observe(started.elapsed().as_secs_f64());
1011								}
1012							}
1013							Some(_) => {
1014								log::error!(
1015									target: LOG_TARGET,
1016									"Invalid pending query for `ADD_PROVIDER`: {query_id:?}"
1017								);
1018								debug_assert!(false);
1019							}
1020							None => {
1021								log::trace!(
1022									target: LOG_TARGET,
1023									"`ADD_PROVIDER` for key {provided_key:?} ({query_id:?}) succeeded (republishing)",
1024								);
1025							}
1026						}
1027					}
1028					Some(DiscoveryEvent::QueryFailed { query_id }) => {
1029						match self.pending_queries.remove(&query_id) {
1030							Some(KadQuery::FindNode(peer_id, started)) => {
1031								log::debug!(
1032									target: LOG_TARGET,
1033									"`FIND_NODE` ({query_id:?}) failed for target {peer_id:?}",
1034								);
1035
1036								self.event_streams.send(Event::Dht(
1037									DhtEvent::ClosestPeersNotFound(peer_id.into())
1038								));
1039
1040								if let Some(ref metrics) = self.metrics {
1041									metrics
1042										.kademlia_query_duration
1043										.with_label_values(&["node-find-failed"])
1044										.observe(started.elapsed().as_secs_f64());
1045								}
1046							},
1047							Some(KadQuery::GetValue(key, started)) => {
1048								log::debug!(
1049									target: LOG_TARGET,
1050									"`GET_VALUE` ({query_id:?}) failed for key {key:?}",
1051								);
1052
1053								self.event_streams.send(Event::Dht(
1054									DhtEvent::ValueNotFound(key)
1055								));
1056
1057								if let Some(ref metrics) = self.metrics {
1058									metrics
1059										.kademlia_query_duration
1060										.with_label_values(&["value-get-failed"])
1061										.observe(started.elapsed().as_secs_f64());
1062								}
1063							},
1064							Some(KadQuery::PutValue(key, started)) => {
1065								log::debug!(
1066									target: LOG_TARGET,
1067									"`PUT_VALUE` ({query_id:?}) failed for key {key:?}",
1068								);
1069
1070								self.event_streams.send(Event::Dht(
1071									DhtEvent::ValuePutFailed(key)
1072								));
1073
1074								if let Some(ref metrics) = self.metrics {
1075									metrics
1076										.kademlia_query_duration
1077										.with_label_values(&["value-put-failed"])
1078										.observe(started.elapsed().as_secs_f64());
1079								}
1080							},
1081							Some(KadQuery::GetProviders(key, started)) => {
1082								log::debug!(
1083									target: LOG_TARGET,
1084									"`GET_PROVIDERS` ({query_id:?}) failed for key {key:?}"
1085								);
1086
1087								self.event_streams.send(Event::Dht(
1088									DhtEvent::ProvidersNotFound(key)
1089								));
1090
1091								if let Some(ref metrics) = self.metrics {
1092									metrics
1093										.kademlia_query_duration
1094										.with_label_values(&["providers-get-failed"])
1095										.observe(started.elapsed().as_secs_f64());
1096								}
1097							},
1098							Some(KadQuery::AddProvider(key, started)) => {
1099								log::debug!(
1100									target: LOG_TARGET,
1101									"`ADD_PROVIDER` ({query_id:?}) failed with key {key:?}",
1102								);
1103
1104								self.event_streams.send(Event::Dht(
1105									DhtEvent::StartProvidingFailed(key)
1106								));
1107
1108								if let Some(ref metrics) = self.metrics {
1109									metrics
1110										.kademlia_query_duration
1111										.with_label_values(&["provider-add-failed"])
1112										.observe(started.elapsed().as_secs_f64());
1113								}
1114							},
1115							None => {
1116								log::debug!(
1117									target: LOG_TARGET,
1118									"non-existent query (likely republishing a provider) failed ({query_id:?})",
1119								);
1120							}
1121						}
1122					}
1123					Some(DiscoveryEvent::Identified { peer, listen_addresses, supported_protocols, .. }) => {
1124						self.discovery.add_self_reported_address(peer, supported_protocols, listen_addresses).await;
1125					}
1126					Some(DiscoveryEvent::ExternalAddressDiscovered { address }) => {
1127						match self.litep2p.public_addresses().add_address(address.clone().into()) {
1128							Ok(inserted) => if inserted {
1129								log::info!(target: LOG_TARGET, "🔍 Discovered new external address for our node: {address}");
1130							},
1131							Err(err) => {
1132								log::warn!(
1133									target: LOG_TARGET,
1134									"🔍 Failed to add discovered external address {address:?}: {err:?}",
1135								);
1136							},
1137						}
1138					}
1139					Some(DiscoveryEvent::ExternalAddressExpired{ address }) => {
1140						let local_peer_id = self.litep2p.local_peer_id();
1141
1142						// Litep2p requires the peer ID to be present in the address.
1143						let address = if !std::matches!(address.iter().last(), Some(Protocol::P2p(_))) {
1144							address.with(Protocol::P2p(*local_peer_id.as_ref()))
1145						} else {
1146							address
1147						};
1148
1149						if self.litep2p.public_addresses().remove_address(&address) {
1150							log::info!(target: LOG_TARGET, "🔍 Expired external address for our node: {address}");
1151						} else {
1152							log::warn!(
1153								target: LOG_TARGET,
1154								"🔍 Failed to remove expired external address {address:?}"
1155							);
1156						}
1157					}
1158					Some(DiscoveryEvent::Ping { peer, rtt }) => {
1159						log::trace!(
1160							target: LOG_TARGET,
1161							"ping time with {peer:?}: {rtt:?}",
1162						);
1163					}
1164					Some(DiscoveryEvent::IncomingRecord { record: Record { key, value, publisher, expires }} ) => {
1165						self.event_streams.send(Event::Dht(
1166							DhtEvent::PutRecordRequest(
1167								key.into(),
1168								value,
1169								publisher.map(Into::into),
1170								expires,
1171							)
1172						));
1173					},
1174
1175					Some(DiscoveryEvent::RandomKademliaStarted) => {
1176						if let Some(metrics) = self.metrics.as_ref() {
1177							metrics.kademlia_random_queries_total.inc();
1178						}
1179					}
1180				},
1181				event = self.litep2p.next_event() => match event {
1182					Some(Litep2pEvent::ConnectionEstablished { peer, endpoint }) => {
1183						let Some(metrics) = &self.metrics else {
1184							continue;
1185						};
1186
1187						let direction = match endpoint {
1188							Endpoint::Dialer { .. } => "out",
1189							Endpoint::Listener { .. } => {
1190								// Increment incoming connections counter.
1191								//
1192								// Note: For litep2p these are represented by established negotiated connections,
1193								// while for libp2p (legacy) these represent not-yet-negotiated connections.
1194								metrics.incoming_connections_total.inc();
1195
1196								"in"
1197							},
1198						};
1199						metrics.connections_opened_total.with_label_values(&[direction]).inc();
1200
1201						match self.peers.entry(peer) {
1202							Entry::Vacant(entry) => {
1203								entry.insert(ConnectionContext {
1204									endpoints: HashMap::from_iter([(endpoint.connection_id(), endpoint)]),
1205									num_connections: 1usize,
1206								});
1207								metrics.distinct_peers_connections_opened_total.inc();
1208							}
1209							Entry::Occupied(entry) => {
1210								let entry = entry.into_mut();
1211								entry.num_connections += 1;
1212								entry.endpoints.insert(endpoint.connection_id(), endpoint);
1213							}
1214						}
1215					}
1216					Some(Litep2pEvent::ConnectionClosed { peer, connection_id }) => {
1217						let Some(metrics) = &self.metrics else {
1218							continue;
1219						};
1220
1221						let Some(context) = self.peers.get_mut(&peer) else {
1222							log::debug!(target: LOG_TARGET, "unknown peer disconnected: {peer:?} ({connection_id:?})");
1223							continue
1224						};
1225
1226						let direction = match context.endpoints.remove(&connection_id) {
1227							None => {
1228								log::debug!(target: LOG_TARGET, "connection {connection_id:?} doesn't exist for {peer:?} ");
1229								continue
1230							}
1231							Some(endpoint) => {
1232								context.num_connections -= 1;
1233
1234								match endpoint {
1235									Endpoint::Dialer { .. } => "out",
1236									Endpoint::Listener { .. } => "in",
1237								}
1238							}
1239						};
1240
1241						metrics.connections_closed_total.with_label_values(&[direction, "actively-closed"]).inc();
1242
1243						if context.num_connections == 0 {
1244							self.peers.remove(&peer);
1245							metrics.distinct_peers_connections_closed_total.inc();
1246						}
1247					}
1248					Some(Litep2pEvent::DialFailure { address, error }) => {
1249						log::debug!(
1250							target: LOG_TARGET,
1251							"failed to dial peer at {address:?}: {error:?}",
1252						);
1253
1254						if let Some(metrics) = &self.metrics {
1255							let reason = match error {
1256								DialError::Timeout => "timeout",
1257								DialError::AddressError(_) => "invalid-address",
1258								DialError::DnsError(_) => "cannot-resolve-dns",
1259								DialError::NegotiationError(error) => match error {
1260									NegotiationError::Timeout => "timeout",
1261									NegotiationError::PeerIdMissing => "missing-peer-id",
1262									NegotiationError::StateMismatch => "state-mismatch",
1263									NegotiationError::PeerIdMismatch(_,_) => "peer-id-missmatch",
1264									NegotiationError::MultistreamSelectError(_) => "multistream-select-error",
1265									NegotiationError::SnowError(_) => "noise-error",
1266									NegotiationError::ParseError(_) => "parse-error",
1267									NegotiationError::IoError(_) => "io-error",
1268									NegotiationError::WebSocket(_) => "webscoket-error",
1269									NegotiationError::BadSignature => "bad-signature",
1270								}
1271							};
1272
1273							metrics.pending_connections_errors_total.with_label_values(&[&reason]).inc();
1274						}
1275					}
1276					Some(Litep2pEvent::ListDialFailures { errors }) => {
1277						log::debug!(
1278							target: LOG_TARGET,
1279							"failed to dial peer on multiple addresses {errors:?}",
1280						);
1281
1282						if let Some(metrics) = &self.metrics {
1283							metrics.pending_connections_errors_total.with_label_values(&["transport-errors"]).inc();
1284						}
1285					}
1286					None => {
1287						log::error!(
1288								target: LOG_TARGET,
1289								"Litep2p backend terminated"
1290						);
1291						return
1292					}
1293				},
1294			}
1295		}
1296	}
1297}