ant_node/
lib.rs

1// Copyright 2024 MaidSafe.net limited.
2//
3// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
4// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
5// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
6// KIND, either express or implied. Please review the Licences for the specific language governing
7// permissions and limitations relating to use of the SAFE Network Software.
8
9//! Implementation of the Node in SAFE Network.
10
11// For quick_error
12#![recursion_limit = "256"]
13#![doc(
14    html_logo_url = "https://github.com/maidsafe/QA/raw/master/Images/maidsafe_logo.png",
15    html_favicon_url = "https://maidsafe.net/img/favicon.ico",
16    test(attr(deny(warnings)))
17)]
18// Additional warnings and allows specific to this crate
19// missing_docs and unwrap_used are already covered by workspace lints
20#![warn(unreachable_pub, unused_results)]
21#![allow(clippy::large_enum_variant)]
22#![allow(clippy::result_large_err)]
23// Allow expect and panic usage - to be refactored
24#![allow(clippy::expect_used)]
25#![allow(clippy::panic)]
26
27#[macro_use]
28extern crate tracing;
29
30mod error;
31mod event;
32mod log_markers;
33#[cfg(feature = "open-metrics")]
34mod metrics;
35mod networking;
36mod node;
37mod put_validation;
38#[cfg(feature = "extension-module")]
39mod python;
40mod quote;
41mod replication;
42#[allow(missing_docs)]
43pub mod spawn;
44#[allow(missing_docs)]
45pub mod utils;
46
47pub use self::{
48    error::{Error, PutValidationError},
49    event::{NodeEvent, NodeEventsChannel, NodeEventsReceiver},
50    log_markers::Marker,
51    networking::sort_peers_by_key,
52    node::{NodeBuilder, PERIODIC_REPLICATION_INTERVAL_MAX_S},
53};
54pub use ant_bootstrap::{Bootstrap, BootstrapCacheStore, BootstrapConfig, InitialPeersConfig};
55
56use crate::error::Result;
57
58use crate::networking::Network;
59pub use crate::networking::SwarmLocalState;
60use ant_evm::RewardsAddress;
61use ant_protocol::{NetworkAddress, get_port_from_multiaddr};
62use libp2p::{Multiaddr, PeerId};
63use std::{
64    collections::{BTreeMap, HashSet},
65    path::PathBuf,
66};
67use tokio::sync::watch;
68
69/// Once a node is started and running, the user obtains
70/// a `NodeRunning` object which can be used to interact with it.
71#[derive(Clone)]
72pub struct RunningNode {
73    shutdown_sender: watch::Sender<bool>,
74    network: Network,
75    node_events_channel: NodeEventsChannel,
76    root_dir_path: PathBuf,
77    rewards_address: RewardsAddress,
78}
79
80impl RunningNode {
81    /// Returns this node's `PeerId`
82    pub fn peer_id(&self) -> PeerId {
83        self.network.peer_id()
84    }
85
86    /// Returns the root directory path for the node.
87    ///
88    /// This will either be a value defined by the user, or a default location, plus the peer ID
89    /// appended. The default location is platform specific:
90    ///  - Linux: $HOME/.local/share/autonomi/node/<peer-id>
91    ///  - macOS: $HOME/Library/Application Support/autonomi/node/<peer-id>
92    ///  - Windows: C:\Users\<username>\AppData\Roaming\autonomi\node\<peer-id>
93    #[expect(rustdoc::invalid_html_tags)]
94    pub fn root_dir_path(&self) -> PathBuf {
95        self.root_dir_path.clone()
96    }
97
98    /// Returns a `SwarmLocalState` with some information obtained from swarm's local state.
99    pub async fn get_swarm_local_state(&self) -> Result<SwarmLocalState> {
100        let state = self.network.get_swarm_local_state().await?;
101        Ok(state)
102    }
103
104    /// Return the node's listening addresses.
105    pub async fn get_listen_addrs(&self) -> Result<Vec<Multiaddr>> {
106        let listeners = self.network.get_swarm_local_state().await?.listeners;
107        Ok(listeners)
108    }
109
110    /// Return the node's listening addresses with the peer id appended.
111    pub async fn get_listen_addrs_with_peer_id(&self) -> Result<Vec<Multiaddr>> {
112        let listeners = self.get_listen_addrs().await?;
113
114        let multi_addrs: Vec<Multiaddr> = listeners
115            .into_iter()
116            .filter_map(|listen_addr| listen_addr.with_p2p(self.peer_id()).ok())
117            .collect();
118
119        Ok(multi_addrs)
120    }
121
122    /// Return the node's listening port
123    pub async fn get_node_listening_port(&self) -> Result<u16> {
124        let listen_addrs = self.network.get_swarm_local_state().await?.listeners;
125        for addr in listen_addrs {
126            if let Some(port) = get_port_from_multiaddr(&addr) {
127                return Ok(port);
128            }
129        }
130        Err(Error::FailedToGetNodePort)
131    }
132
133    /// Returns the node events channel where to subscribe to receive `NodeEvent`s
134    pub fn node_events_channel(&self) -> &NodeEventsChannel {
135        &self.node_events_channel
136    }
137
138    /// Returns the list of all the RecordKeys held by the node
139    pub async fn get_all_record_addresses(&self) -> Result<HashSet<NetworkAddress>> {
140        #[allow(clippy::mutable_key_type)] // for Bytes in NetworkAddress
141        let addresses: HashSet<_> = self
142            .network
143            .get_all_local_record_addresses()
144            .await?
145            .keys()
146            .cloned()
147            .collect();
148        Ok(addresses)
149    }
150
151    /// Returns a map where each key is the ilog2 distance of that Kbucket and each value is a vector of peers in that
152    /// bucket.
153    pub async fn get_kbuckets(&self) -> Result<BTreeMap<u32, Vec<PeerId>>> {
154        let kbuckets = self.network.get_kbuckets().await?;
155        Ok(kbuckets)
156    }
157
158    /// Returns the node's reward address
159    pub fn reward_address(&self) -> &RewardsAddress {
160        &self.rewards_address
161    }
162
163    /// Shutdown the network driver loop and the node (NetworkEvents) loop.
164    pub fn shutdown(self) {
165        // Send the shutdown signal to the swarm driver and node loop
166        let _ = self.shutdown_sender.send(true);
167    }
168}