#![cfg_attr(not(any(test, feature = "std")), no_std)]
#![recursion_limit = "512"]
#![deny(rustdoc::broken_intra_doc_links)]
extern crate alloc;
use alloc::{
borrow::ToOwned as _,
boxed::Box,
format,
string::{String, ToString as _},
sync::Arc,
vec,
vec::Vec,
};
use core::{cmp, num::NonZeroU32, pin::Pin};
use futures::{channel::mpsc, prelude::*};
use hashbrown::{hash_map::Entry, HashMap};
use itertools::Itertools as _;
use smoldot::{
chain, chain_spec,
database::finalized_serialize,
header,
informant::HashDisplay,
libp2p::{connection, multiaddr, peer_id},
};
mod json_rpc_service;
mod network_service;
mod runtime_service;
mod sync_service;
mod transactions_service;
mod util;
pub mod platform;
pub use json_rpc_service::HandleRpcError;
pub use peer_id::PeerId;
pub struct ClientConfig {
pub tasks_spawner: Box<dyn Fn(String, future::BoxFuture<'static, ()>) + Send + Sync>,
pub system_name: String,
pub system_version: String,
}
#[derive(Debug, Clone)]
pub struct AddChainConfig<'a, TChain, TRelays> {
pub user_data: TChain,
pub specification: &'a str,
pub database_content: &'a str,
pub potential_relay_chains: TRelays,
pub json_rpc_responses: Option<mpsc::Sender<String>>,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct ChainId(usize);
pub struct Client<TPlat: platform::Platform, TChain = ()> {
spawn_new_task: Arc<dyn Fn(String, future::BoxFuture<'static, ()>) + Send + Sync>,
public_api_chains: slab::Slab<PublicApiChain<TChain>>,
chains_by_key: HashMap<ChainKey, RunningChain<TPlat>, fnv::FnvBuildHasher>,
system_name: String,
system_version: String,
}
struct PublicApiChain<TChain> {
user_data: TChain,
key: ChainKey,
chain_spec_chain_id: String,
json_rpc_sender: Option<json_rpc_service::Sender>,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
struct ChainKey {
genesis_block_hash: [u8; 32],
relay_chain: Option<(Box<ChainKey>, u32)>,
protocol_id: String,
}
struct RunningChain<TPlat: platform::Platform> {
services: future::MaybeDone<future::Shared<future::RemoteHandle<ChainServices<TPlat>>>>,
log_name: String,
num_references: NonZeroU32,
}
struct ChainServices<TPlat: platform::Platform> {
network_service: Arc<network_service::NetworkService<TPlat>>,
network_identity: peer_id::PeerId,
sync_service: Arc<sync_service::SyncService<TPlat>>,
runtime_service: Arc<runtime_service::RuntimeService<TPlat>>,
transactions_service: Arc<transactions_service::TransactionsService<TPlat>>,
block_number_bytes: usize,
}
impl<TPlat: platform::Platform> Clone for ChainServices<TPlat> {
fn clone(&self) -> Self {
ChainServices {
network_service: self.network_service.clone(),
network_identity: self.network_identity.clone(),
sync_service: self.sync_service.clone(),
runtime_service: self.runtime_service.clone(),
transactions_service: self.transactions_service.clone(),
block_number_bytes: self.block_number_bytes,
}
}
}
impl<TPlat: platform::Platform, TChain> Client<TPlat, TChain> {
pub fn new(config: ClientConfig) -> Self {
let expected_chains = 8;
Client {
spawn_new_task: config.tasks_spawner.into(),
public_api_chains: slab::Slab::with_capacity(expected_chains),
chains_by_key: HashMap::with_capacity_and_hasher(expected_chains, Default::default()),
system_name: config.system_name,
system_version: config.system_version,
}
}
pub fn add_chain(
&mut self,
config: AddChainConfig<'_, TChain, impl Iterator<Item = ChainId>>,
) -> Result<ChainId, String> {
let chain_spec = match chain_spec::ChainSpec::from_json_bytes(&config.specification) {
Ok(cs) => cs,
Err(err) => {
return Err(format!("Failed to decode chain specification: {}", err));
}
};
let (chain_information, genesis_block_header, checkpoint_nodes) = {
match (
chain_spec
.as_chain_information()
.map(|(ci, _)| chain::chain_information::ValidChainInformation::try_from(ci)), chain_spec.light_sync_state().map(|s| {
chain::chain_information::ValidChainInformation::try_from(
s.as_chain_information(),
)
}),
decode_database(
config.database_content,
chain_spec.block_number_bytes().into(),
),
) {
(Ok(Ok(genesis_ci)), checkpoint, Ok((database, checkpoint_nodes)))
if checkpoint
.as_ref()
.and_then(|r| r.as_ref().ok())
.map_or(true, |cp| {
cp.as_ref().finalized_block_header.number
< database.as_ref().finalized_block_header.number
}) =>
{
let genesis_header = genesis_ci.as_ref().finalized_block_header.clone();
(database, genesis_header.into(), checkpoint_nodes)
}
(
Err(chain_spec::FromGenesisStorageError::UnknownStorageItems),
checkpoint,
Ok((database, checkpoint_nodes)),
) if checkpoint
.as_ref()
.and_then(|r| r.as_ref().ok())
.map_or(true, |cp| {
cp.as_ref().finalized_block_header.number
< database.as_ref().finalized_block_header.number
}) =>
{
let genesis_header = header::Header {
parent_hash: [0; 32],
number: 0,
state_root: *chain_spec.genesis_storage().into_trie_root_hash().unwrap(),
extrinsics_root: smoldot::trie::empty_trie_merkle_value(),
digest: header::DigestRef::empty().into(),
};
(database, genesis_header, checkpoint_nodes)
}
(Err(chain_spec::FromGenesisStorageError::UnknownStorageItems), None, _) => {
return Err(
"Either a checkpoint or the genesis storage must be provided".to_string(),
);
}
(
Err(chain_spec::FromGenesisStorageError::UnknownStorageItems),
Some(Ok(checkpoint)),
_,
) => {
let genesis_header = header::Header {
parent_hash: [0; 32],
number: 0,
state_root: *chain_spec.genesis_storage().into_trie_root_hash().unwrap(),
extrinsics_root: smoldot::trie::empty_trie_merkle_value(),
digest: header::DigestRef::empty().into(),
};
(checkpoint, genesis_header, Default::default())
}
(Err(err), _, _) => {
return Err(format!(
"Failed to build genesis chain information: {}",
err
));
}
(Ok(Err(err)), _, _) => {
return Err(format!("Invalid genesis chain information: {}", err));
}
(_, Some(Err(err)), _) => {
return Err(format!(
"Invalid checkpoint in chain specification: {}",
err
));
}
(Ok(Ok(genesis_ci)), Some(Ok(checkpoint)), _) => {
let genesis_header = genesis_ci.as_ref().finalized_block_header.clone();
(checkpoint, genesis_header.into(), Default::default())
}
(Ok(Ok(genesis_ci)), None, _) => {
let genesis_header =
header::Header::from(genesis_ci.as_ref().finalized_block_header.clone());
(genesis_ci, genesis_header, Default::default())
}
}
};
let relay_chain_id = if let Some((relay_chain_id, _para_id)) = chain_spec.relay_chain() {
let chain = config
.potential_relay_chains
.filter(|c| {
self.public_api_chains
.get(c.0)
.map_or(false, |chain| chain.chain_spec_chain_id == relay_chain_id)
})
.exactly_one();
match chain {
Ok(c) => Some(c),
Err(mut iter) => {
return Err(if iter.next().is_none() {
"Couldn't find any valid relay chain".to_string()
} else {
debug_assert!(iter.next().is_some());
"Multiple valid relay chains found".to_string()
});
}
}
} else {
None
};
let (bootstrap_nodes, invalid_bootstrap_nodes_sanitized) = {
let mut valid_list = Vec::with_capacity(chain_spec.boot_nodes().len());
let mut invalid_list = Vec::with_capacity(0);
for node in chain_spec.boot_nodes() {
match node {
chain_spec::Bootnode::Parsed { multiaddr, peer_id } => {
if let Ok(multiaddr) = multiaddr.parse::<multiaddr::Multiaddr>() {
let peer_id = peer_id::PeerId::from_bytes(peer_id).unwrap();
valid_list.push((peer_id, vec![multiaddr]));
} else {
invalid_list.push(multiaddr)
}
}
chain_spec::Bootnode::UnrecognizedFormat(unparsed) => invalid_list.push(
unparsed
.chars()
.filter(|c| c.is_ascii())
.collect::<String>(),
),
}
}
(valid_list, invalid_list)
};
let chain_spec_chain_id = chain_spec.id().to_owned();
let genesis_block_hash = genesis_block_header.hash(chain_spec.block_number_bytes().into());
let genesis_block_state_root = genesis_block_header.state_root;
let new_chain_key = ChainKey {
genesis_block_hash,
relay_chain: relay_chain_id.map(|ck| {
(
Box::new(self.public_api_chains.get(ck.0).unwrap().key.clone()),
chain_spec.relay_chain().unwrap().1,
)
}),
protocol_id: chain_spec.protocol_id().to_owned(),
};
let relay_chain_ready_future: Option<(future::MaybeDone<future::Shared<_>>, String)> =
relay_chain_id.map(|relay_chain| {
let relay_chain = &self
.chains_by_key
.get(&self.public_api_chains.get(relay_chain.0).unwrap().key)
.unwrap();
let future = match &relay_chain.services {
future::MaybeDone::Done(d) => future::MaybeDone::Done(d.clone()),
future::MaybeDone::Future(d) => future::MaybeDone::Future(d.clone()),
future::MaybeDone::Gone => unreachable!(),
};
(future, relay_chain.log_name.clone())
});
let log_name = {
let base = chain_spec
.id()
.chars()
.filter(|c| c.is_ascii_graphic())
.collect::<String>();
let mut suffix = None;
loop {
let attempt = if let Some(suffix) = suffix {
format!("{}-{}", base, suffix)
} else {
base.clone()
};
if !self.chains_by_key.values().any(|c| *c.log_name == attempt) {
break attempt;
}
match &mut suffix {
Some(v) => *v += 1,
v @ None => *v = Some(1),
}
}
};
let (services_init, log_name) = match self.chains_by_key.entry(new_chain_key.clone()) {
Entry::Occupied(mut entry) => {
entry.get_mut().num_references =
NonZeroU32::new(entry.get_mut().num_references.get() + 1).unwrap();
let entry = entry.into_mut();
(&mut entry.services, &entry.log_name)
}
Entry::Vacant(entry) => {
let network_noise_key = connection::NoiseKey::new(&rand::random());
let running_chain_init_future: future::RemoteHandle<ChainServices<TPlat>> = {
let spawn_new_task = self.spawn_new_task.clone();
let chain_spec = chain_spec.clone(); let log_name = log_name.clone();
let future = async move {
let relay_chain =
if let Some((mut relay_chain_ready_future, relay_chain_log_name)) =
relay_chain_ready_future
{
(&mut relay_chain_ready_future).await;
let running_relay_chain = Pin::new(&mut relay_chain_ready_future)
.take_output()
.unwrap();
Some((running_relay_chain, relay_chain_log_name))
} else {
None
};
let chain_name = chain_spec.name().to_owned();
let relay_chain_para_id = chain_spec.relay_chain().map(|(_, id)| id);
let starting_block_number =
chain_information.as_ref().finalized_block_header.number;
let starting_block_hash = chain_information
.as_ref()
.finalized_block_header
.hash(chain_spec.block_number_bytes().into());
let has_bad_blocks = chain_spec.bad_blocks_hashes().count() != 0;
let running_chain = start_services(
log_name.clone(),
spawn_new_task,
chain_information,
genesis_block_header
.scale_encoding_vec(chain_spec.block_number_bytes().into()),
chain_spec,
relay_chain.as_ref().map(|(r, _)| r),
network_noise_key,
)
.await;
if let Some((_, relay_chain_log_name)) = relay_chain.as_ref() {
log::info!(
target: "smoldot",
"Parachain initialization complete for {}. Name: {:?}. Genesis \
hash: {}. State root hash: 0x{}. Network identity: {}. Relay \
chain: {} (id: {})",
log_name,
chain_name,
HashDisplay(&genesis_block_hash),
hex::encode(&genesis_block_state_root),
running_chain.network_identity,
relay_chain_log_name,
relay_chain_para_id.unwrap(),
);
} else {
log::info!(
target: "smoldot",
"Chain initialization complete for {}. Name: {:?}. Genesis \
hash: {}. State root hash: 0x{}. Network identity: {}. Chain \
specification or database starting at: {} (#{})",
log_name,
chain_name,
HashDisplay(&genesis_block_hash),
hex::encode(&genesis_block_state_root),
running_chain.network_identity,
HashDisplay(&starting_block_hash),
starting_block_number
);
}
if has_bad_blocks {
log::warn!(
target: "smoldot",
"Chain specification of {} contains a list of bad blocks. Bad \
blocks are not implemented in the light client.", log_name
);
}
running_chain
};
let (background_future, output_future) = future.remote_handle();
(self.spawn_new_task)(
"services-initialization".to_owned(),
background_future.boxed(),
);
output_future
};
let entry = entry.insert(RunningChain {
services: future::maybe_done(running_chain_init_future.shared()),
log_name,
num_references: NonZeroU32::new(1).unwrap(),
});
(&mut entry.services, &entry.log_name)
}
};
if !invalid_bootstrap_nodes_sanitized.is_empty() {
log::warn!(
target: "smoldot",
"Failed to parse some of the bootnodes of {}. \
These bootnodes have been ignored. List: {}",
log_name, invalid_bootstrap_nodes_sanitized.join(", ")
);
}
if bootstrap_nodes.is_empty() {
log::warn!(
target: "smoldot",
"Newly-added chain {} has an empty list of bootnodes. Smoldot will likely fail \
to connect to its peer-to-peer network.",
log_name
);
}
let public_api_chains_entry = self.public_api_chains.vacant_entry();
let new_chain_id = ChainId(public_api_chains_entry.key());
(self.spawn_new_task)("network-service-add-initial-topology".to_owned(), {
let mut running_chain_init = match services_init {
future::MaybeDone::Done(d) => future::MaybeDone::Done(d.clone()),
future::MaybeDone::Future(d) => future::MaybeDone::Future(d.clone()),
future::MaybeDone::Gone => unreachable!(),
};
async move {
(&mut running_chain_init).await;
let running_chain = Pin::new(&mut running_chain_init).take_output().unwrap();
running_chain
.network_service
.discover(&TPlat::now(), 0, checkpoint_nodes, false)
.await;
running_chain
.network_service
.discover(&TPlat::now(), 0, bootstrap_nodes, true)
.await;
}
.boxed()
});
let json_rpc_sender = if let Some(json_rpc_responses) = config.json_rpc_responses {
let mut running_chain_init = match services_init {
future::MaybeDone::Done(d) => future::MaybeDone::Done(d.clone()),
future::MaybeDone::Future(d) => future::MaybeDone::Future(d.clone()),
future::MaybeDone::Gone => unreachable!(),
};
let (sender, service_starter) = json_rpc_service::service(json_rpc_service::Config {
log_name: log_name.clone(), max_pending_requests: NonZeroU32::new(128).unwrap(),
max_subscriptions: 1024, });
let spawn_new_task = self.spawn_new_task.clone();
let system_name = self.system_name.clone();
let system_version = self.system_version.clone();
let init_future = async move {
(&mut running_chain_init).await;
let running_chain = Pin::new(&mut running_chain_init).take_output().unwrap();
service_starter.start(json_rpc_service::StartConfig {
tasks_executor: Box::new(move |name, task| spawn_new_task(name, task)),
sync_service: running_chain.sync_service,
network_service: (running_chain.network_service, 0), transactions_service: running_chain.transactions_service,
runtime_service: running_chain.runtime_service,
chain_spec: &chain_spec,
peer_id: &running_chain.network_identity,
system_name,
system_version,
genesis_block_hash,
genesis_block_state_root,
responses_sender: json_rpc_responses,
max_parallel_requests: NonZeroU32::new(24).unwrap(),
})
};
(self.spawn_new_task)("json-rpc-service-init".to_owned(), init_future.boxed());
Some(sender)
} else {
None
};
public_api_chains_entry.insert(PublicApiChain {
user_data: config.user_data,
key: new_chain_key,
chain_spec_chain_id,
json_rpc_sender,
});
Ok(new_chain_id)
}
#[must_use]
pub fn remove_chain(&mut self, id: ChainId) -> TChain {
let removed_chain = self.public_api_chains.remove(id.0);
let running_chain = self.chains_by_key.get_mut(&removed_chain.key).unwrap();
if running_chain.num_references.get() == 1 {
log::info!(target: "smoldot", "Shutting down chain {}", running_chain.log_name);
self.chains_by_key.remove(&removed_chain.key);
} else {
running_chain.num_references =
NonZeroU32::new(running_chain.num_references.get() - 1).unwrap();
}
self.public_api_chains.shrink_to_fit();
removed_chain.user_data
}
pub fn chain_user_data_mut(&mut self, chain_id: ChainId) -> &mut TChain {
&mut self
.public_api_chains
.get_mut(chain_id.0)
.unwrap()
.user_data
}
pub fn json_rpc_request(
&mut self,
json_rpc_request: impl Into<String>,
chain_id: ChainId,
) -> Result<(), HandleRpcError> {
self.json_rpc_request_inner(json_rpc_request.into(), chain_id)
}
fn json_rpc_request_inner(
&mut self,
json_rpc_request: String,
chain_id: ChainId,
) -> Result<(), HandleRpcError> {
let json_rpc_sender = match self
.public_api_chains
.get_mut(chain_id.0)
.unwrap()
.json_rpc_sender
{
Some(ref mut json_rpc_sender) => json_rpc_sender,
_ => panic!(),
};
json_rpc_sender.queue_rpc_request(json_rpc_request)
}
pub fn database_content(
&self,
chain_id: ChainId,
max_size: usize,
) -> impl Future<Output = String> {
let key = &self.public_api_chains.get(chain_id.0).unwrap().key;
let mut services = match &self.chains_by_key.get(key).unwrap().services {
future::MaybeDone::Done(d) => future::MaybeDone::Done(d.clone()),
future::MaybeDone::Future(d) => future::MaybeDone::Future(d.clone()),
future::MaybeDone::Gone => unreachable!(),
};
async move {
(&mut services).await;
let services = Pin::new(&mut services).take_output().unwrap();
encode_database(&services, max_size).await
}
}
}
async fn start_services<TPlat: platform::Platform>(
log_name: String,
spawn_new_task: Arc<
dyn Fn(String, Pin<Box<dyn Future<Output = ()> + Send + 'static>>) + Send + Sync,
>,
chain_information: chain::chain_information::ValidChainInformation,
genesis_block_scale_encoded_header: Vec<u8>,
chain_spec: chain_spec::ChainSpec,
relay_chain: Option<&ChainServices<TPlat>>,
network_noise_key: connection::NoiseKey,
) -> ChainServices<TPlat> {
let network_identity =
peer_id::PublicKey::Ed25519(*network_noise_key.libp2p_public_ed25519_key()).into_peer_id();
let (network_service, mut network_event_receivers) =
network_service::NetworkService::new(network_service::Config {
tasks_executor: Box::new({
let spawn_new_task = spawn_new_task.clone();
move |name, fut| spawn_new_task(name, fut)
}),
num_events_receivers: 1, noise_key: network_noise_key,
chains: vec![network_service::ConfigChain {
log_name: log_name.clone(),
has_grandpa_protocol: matches!(
chain_information.as_ref().finality,
chain::chain_information::ChainInformationFinalityRef::Grandpa { .. }
),
genesis_block_hash: header::hash_from_scale_encoded_header(
&genesis_block_scale_encoded_header,
),
finalized_block_height: chain_information.as_ref().finalized_block_header.number,
best_block: (
chain_information.as_ref().finalized_block_header.number,
chain_information
.as_ref()
.finalized_block_header
.hash(chain_spec.block_number_bytes().into()),
),
protocol_id: chain_spec.protocol_id().to_string(),
block_number_bytes: usize::from(chain_spec.block_number_bytes()),
}],
})
.await;
let (sync_service, runtime_service) = if let Some(relay_chain) = relay_chain {
let sync_service = Arc::new(
sync_service::SyncService::new(sync_service::Config {
log_name: log_name.clone(),
chain_information: chain_information.clone(),
block_number_bytes: usize::from(chain_spec.block_number_bytes()),
tasks_executor: Box::new({
let spawn_new_task = spawn_new_task.clone();
move |name, fut| spawn_new_task(name, fut)
}),
network_service: (network_service.clone(), 0),
network_events_receiver: network_event_receivers.pop().unwrap(),
parachain: Some(sync_service::ConfigParachain {
parachain_id: chain_spec.relay_chain().unwrap().1,
relay_chain_sync: relay_chain.runtime_service.clone(),
relay_chain_block_number_bytes: relay_chain.block_number_bytes,
}),
})
.await,
);
let runtime_service = Arc::new(
runtime_service::RuntimeService::new(runtime_service::Config {
log_name: log_name.clone(),
tasks_executor: Box::new({
let spawn_new_task = spawn_new_task.clone();
move |name, fut| spawn_new_task(name, fut)
}),
sync_service: sync_service.clone(),
genesis_block_scale_encoded_header,
})
.await,
);
(sync_service, runtime_service)
} else {
let sync_service = Arc::new(
sync_service::SyncService::new(sync_service::Config {
log_name: log_name.clone(),
chain_information: chain_information.clone(),
block_number_bytes: usize::from(chain_spec.block_number_bytes()),
tasks_executor: Box::new({
let spawn_new_task = spawn_new_task.clone();
move |name, fut| spawn_new_task(name, fut)
}),
network_service: (network_service.clone(), 0),
network_events_receiver: network_event_receivers.pop().unwrap(),
parachain: None,
})
.await,
);
let runtime_service = Arc::new(
runtime_service::RuntimeService::new(runtime_service::Config {
log_name: log_name.clone(),
tasks_executor: Box::new({
let spawn_new_task = spawn_new_task.clone();
move |name, fut| spawn_new_task(name, fut)
}),
sync_service: sync_service.clone(),
genesis_block_scale_encoded_header,
})
.await,
);
(sync_service, runtime_service)
};
let transactions_service = Arc::new(
transactions_service::TransactionsService::new(transactions_service::Config {
log_name,
tasks_executor: Box::new(move |name, fut| spawn_new_task(name, fut)),
sync_service: sync_service.clone(),
runtime_service: runtime_service.clone(),
network_service: (network_service.clone(), 0),
max_pending_transactions: NonZeroU32::new(64).unwrap(),
max_concurrent_downloads: NonZeroU32::new(3).unwrap(),
max_concurrent_validations: NonZeroU32::new(2).unwrap(),
})
.await,
);
ChainServices {
network_service,
network_identity,
runtime_service,
sync_service,
transactions_service,
block_number_bytes: usize::from(chain_spec.block_number_bytes()),
}
}
async fn encode_database<TPlat: platform::Platform>(
services: &ChainServices<TPlat>,
max_size: usize,
) -> String {
let mut database_draft = SerdeDatabase {
chain: match services.sync_service.serialize_chain_information().await {
Some(ci) => {
let encoded = finalized_serialize::encode_chain(&ci, services.block_number_bytes);
serde_json::from_str(&encoded).unwrap()
}
None => {
let dummy_message = "<unknown>";
return if dummy_message.len() > max_size {
String::new()
} else {
dummy_message.to_owned()
};
}
},
nodes: services
.network_service
.discovered_nodes(0)
.await
.map(|(peer_id, addrs)| {
(
peer_id.to_base58(),
addrs.map(|a| a.to_string()).collect::<Vec<_>>(),
)
})
.collect(),
};
loop {
let serialized = serde_json::to_string(&database_draft).unwrap();
if serialized.len() <= max_size {
return serialized;
}
if database_draft.nodes.is_empty() {
let dummy_message = "<too-large>";
return if dummy_message.len() >= max_size {
String::new()
} else {
dummy_message.to_owned()
};
}
let mut nodes_to_remove = cmp::max(1, database_draft.nodes.len() / 2);
database_draft.nodes.retain(|_, _| {
if nodes_to_remove >= 1 {
nodes_to_remove -= 1;
false
} else {
true
}
});
}
}
fn decode_database(
encoded: &str,
block_number_bytes: usize,
) -> Result<
(
chain::chain_information::ValidChainInformation,
Vec<(PeerId, Vec<multiaddr::Multiaddr>)>,
),
(),
> {
let decoded: SerdeDatabase = serde_json::from_str(encoded).map_err(|_| ())?;
let (chain, _) = finalized_serialize::decode_chain(
&serde_json::to_string(&decoded.chain).unwrap(),
block_number_bytes,
)
.map_err(|_| ())?;
let nodes = decoded
.nodes
.iter()
.filter_map(|(peer_id, addrs)| {
let addrs = addrs
.iter()
.filter_map(|a| Some(a.parse::<multiaddr::Multiaddr>().ok()?))
.collect();
Some((peer_id.parse::<PeerId>().ok()?, addrs))
})
.collect::<Vec<_>>();
Ok((chain, nodes))
}
#[derive(serde::Serialize, serde::Deserialize)]
struct SerdeDatabase {
chain: Box<serde_json::value::RawValue>,
nodes: hashbrown::HashMap<String, Vec<String>, fnv::FnvBuildHasher>,
}