use ant_quic::host_identity::{HostIdentity, auto_storage};
use ant_quic::transport::TransportAddr;
use ant_quic::unified_config::{AutoConnectPolicy, MdnsConfig, MdnsMode};
use ant_quic::{MtuConfig, P2pConfig, P2pEndpoint, P2pEvent, PeerId, TraversalPhase};
use clap::{Parser, Subcommand, ValueEnum};
use sha2::{Digest, Sha256};
use std::collections::HashMap;
use std::net::SocketAddr;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
use tokio::sync::RwLock;
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, warn};
const DEFAULT_BOOTSTRAP_NODES: &[&str] = &[
"saorsa-1.saorsalabs.com:9000",
"saorsa-2.saorsalabs.com:9000",
];
#[derive(Parser, Debug)]
#[command(name = "ant-quic")]
#[command(author, version, about, long_about = None)]
struct Args {
#[command(subcommand)]
command: Option<Command>,
#[arg(short, long, default_value = "[::]:0")]
listen: SocketAddr,
#[arg(short = 'k', long, value_delimiter = ',')]
known_peers: Vec<SocketAddr>,
#[arg(short, long, value_delimiter = ',')]
bootstrap: Vec<SocketAddr>,
#[arg(short, long, conflicts_with = "connect_peer_id")]
connect: Option<SocketAddr>,
#[arg(long, value_name = "HEX", conflicts_with = "connect")]
connect_peer_id: Option<String>,
#[arg(long)]
throughput_test: bool,
#[arg(long)]
counter_test: bool,
#[arg(long, default_value = "1000")]
counter_interval: u64,
#[arg(long)]
echo: bool,
#[arg(long, default_value = "1048576")]
test_size: usize,
#[arg(short, long)]
verbose: bool,
#[arg(long)]
stats: bool,
#[arg(long, default_value = "5")]
stats_interval: u64,
#[arg(long, default_value = "0")]
duration: u64,
#[arg(long)]
pqc_mtu: bool,
#[arg(long)]
json: bool,
#[arg(long, hide = true)]
no_default_bootstrap: bool,
#[arg(long)]
no_port_mapping: bool,
#[arg(long, conflicts_with = "no_mdns")]
mdns: bool,
#[arg(long, conflicts_with = "mdns")]
no_mdns: bool,
#[arg(long)]
mdns_service: Option<String>,
#[arg(long)]
mdns_namespace: Option<String>,
#[arg(long, value_enum)]
mdns_mode: Option<CliMdnsMode>,
#[arg(long, value_enum)]
mdns_auto_connect: Option<CliMdnsAutoConnect>,
#[arg(long)]
full_key: bool,
#[arg(long)]
metrics_server: Option<String>,
#[arg(long, default_value = "5")]
metrics_interval: u64,
#[arg(long, default_value = "unknown")]
node_location: String,
#[arg(long)]
node_id: Option<String>,
#[arg(long)]
generate_data: Option<u64>,
#[arg(long)]
verify_data: bool,
#[arg(long, default_value = "65536")]
chunk_size: usize,
#[arg(long)]
send_to: Option<String>,
#[arg(long, default_value = "30")]
send_to_timeout: u64,
}
#[derive(Subcommand, Debug)]
enum Command {
Identity {
#[command(subcommand)]
action: IdentityAction,
},
Cache {
#[command(subcommand)]
action: CacheAction,
},
Doctor,
}
#[derive(Subcommand, Debug)]
enum IdentityAction {
Show {
#[arg(long)]
all_networks: bool,
#[arg(long, default_value = "~/.ant-quic")]
data_dir: PathBuf,
},
Wipe {
#[arg(long)]
force: bool,
#[arg(long, default_value = "~/.ant-quic")]
data_dir: PathBuf,
},
Fingerprint,
}
#[derive(Subcommand, Debug)]
enum CacheAction {
Stats {
#[arg(long, default_value = "~/.ant-quic")]
data_dir: PathBuf,
},
Clear {
#[arg(long)]
force: bool,
#[arg(long, default_value = "~/.ant-quic")]
data_dir: PathBuf,
},
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
enum CliMdnsMode {
Browse,
Advertise,
Both,
}
impl From<CliMdnsMode> for MdnsMode {
fn from(value: CliMdnsMode) -> Self {
match value {
CliMdnsMode::Browse => Self::BrowseOnly,
CliMdnsMode::Advertise => Self::AdvertiseOnly,
CliMdnsMode::Both => Self::Both,
}
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
enum CliMdnsAutoConnect {
Disabled,
ApprovalRequired,
Enabled,
}
impl From<CliMdnsAutoConnect> for AutoConnectPolicy {
fn from(value: CliMdnsAutoConnect) -> Self {
match value {
CliMdnsAutoConnect::Disabled => Self::Disabled,
CliMdnsAutoConnect::ApprovalRequired => Self::ApprovalRequired,
CliMdnsAutoConnect::Enabled => Self::Enabled,
}
}
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
struct VerifiedDataChunk {
sequence: u64,
data: Vec<u8>,
checksum: String,
timestamp: u64,
}
impl VerifiedDataChunk {
fn new(sequence: u64, data: Vec<u8>) -> Self {
let checksum = compute_sha256(&data);
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|d| d.as_secs())
.unwrap_or(0);
Self {
sequence,
data,
checksum,
timestamp,
}
}
fn verify(&self) -> bool {
compute_sha256(&self.data) == self.checksum
}
}
fn compute_sha256(data: &[u8]) -> String {
let mut hasher = Sha256::new();
hasher.update(data);
hex::encode(hasher.finalize())
}
fn generate_verified_chunks(total_size: u64, chunk_size: usize) -> Vec<VerifiedDataChunk> {
let mut chunks = Vec::new();
let mut remaining = total_size;
let mut sequence = 0u64;
while remaining > 0 {
let this_chunk = (remaining as usize).min(chunk_size);
let payload: Vec<u8> = (0..this_chunk)
.map(|i| ((sequence as usize).wrapping_add(i) & 0xff) as u8)
.collect();
chunks.push(VerifiedDataChunk::new(sequence, payload));
remaining -= this_chunk as u64;
sequence += 1;
}
chunks
}
#[derive(Debug, Default)]
struct RuntimeStats {
bytes_sent: AtomicU64,
bytes_received: AtomicU64,
connections_accepted: AtomicU64,
connections_initiated: AtomicU64,
nat_traversals_completed: AtomicU64,
nat_traversals_failed: AtomicU64,
external_addresses_discovered: AtomicU64,
counters_sent: AtomicU64,
counters_received: AtomicU64,
echoes_sent: AtomicU64,
data_chunks_sent: AtomicU64,
data_chunks_verified: AtomicU64,
data_verification_failures: AtomicU64,
direct_connections: AtomicU64,
relayed_connections: AtomicU64,
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct PeerInfo {
pub peer_id: String,
pub remote_addr: String,
pub connected_at: u64,
pub bytes_sent: u64,
pub bytes_received: u64,
pub connection_type: String, }
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct NodeMetricsReport {
pub node_id: String,
pub location: String,
pub timestamp: u64,
pub uptime_secs: u64,
pub active_connections: usize,
pub bytes_sent_total: u64,
pub bytes_received_total: u64,
pub current_throughput_mbps: f64,
pub nat_traversal_successes: u64,
pub nat_traversal_failures: u64,
pub direct_connections: u64,
pub relayed_connections: u64,
pub data_chunks_sent: u64,
pub data_chunks_verified: u64,
pub data_verification_failures: u64,
pub external_addresses: Vec<String>,
pub connected_peers: Vec<PeerInfo>,
pub local_addr: String,
}
#[derive(Debug, Clone)]
#[allow(dead_code)] struct PeerState {
peer_id: PeerId,
remote_addr: TransportAddr,
connected_at: Instant,
bytes_sent: u64,
bytes_received: u64,
connection_type: String,
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let args = Args::parse();
let log_level = if args.verbose { "debug" } else { "info" };
tracing_subscriber::fmt()
.with_env_filter(format!("ant_quic={log_level},ant_quic={log_level}"))
.init();
if let Some(command) = args.command {
return handle_command(command).await;
}
info!("ant-quic v{}", env!("CARGO_PKG_VERSION"));
info!("Symmetric P2P node starting...");
let mut all_peers: Vec<SocketAddr> = args
.known_peers
.iter()
.chain(args.bootstrap.iter())
.copied()
.collect();
if all_peers.is_empty() && !args.no_default_bootstrap {
info!("No peers specified, using default Saorsa Labs bootstrap nodes");
for addr_str in DEFAULT_BOOTSTRAP_NODES {
match tokio::net::lookup_host(addr_str).await {
Ok(mut addrs) => {
if let Some(addr) = addrs.next() {
all_peers.push(addr);
info!(" - {} -> {}", addr_str, addr);
}
}
Err(e) => {
warn!("Failed to resolve {}: {}", addr_str, e);
}
}
}
}
let mut builder = P2pConfig::builder().bind_addr(args.listen);
for addr in &all_peers {
builder = builder.known_peer(*addr);
}
if args.pqc_mtu {
builder = builder.mtu(MtuConfig::pqc_optimized());
info!("Using PQC-optimized MTU settings");
}
if args.no_port_mapping {
builder = builder.port_mapping_enabled(false);
info!("Best-effort router port mapping disabled");
} else {
info!("Best-effort router port mapping enabled");
}
let mdns_requested = args.mdns
|| args.mdns_service.is_some()
|| args.mdns_namespace.is_some()
|| args.mdns_mode.is_some()
|| args.mdns_auto_connect.is_some();
if args.no_mdns && mdns_requested {
anyhow::bail!("--no-mdns cannot be combined with other mDNS configuration flags");
}
if args.no_mdns {
builder = builder.mdns_enabled(false);
info!("First-party mDNS disabled");
} else {
let mut mdns_config = MdnsConfig::default();
if let Some(service) = args.mdns_service.clone() {
mdns_config.service = Some(service);
}
if let Some(namespace) = args.mdns_namespace.clone() {
mdns_config.namespace = Some(namespace);
}
if let Some(mode) = args.mdns_mode {
mdns_config.mode = mode.into();
}
if let Some(auto_connect) = args.mdns_auto_connect {
mdns_config.auto_connect = auto_connect.into();
}
builder = builder.mdns(mdns_config.clone());
info!(
service = mdns_config.service.as_deref().unwrap_or_default(),
namespace = mdns_config.namespace.as_deref().unwrap_or_default(),
mode = ?mdns_config.mode,
auto_connect = ?mdns_config.auto_connect,
"First-party mDNS enabled"
);
}
let config = builder.build()?;
info!("Creating P2P endpoint...");
let endpoint = P2pEndpoint::new(config).await?;
let peer_id = endpoint.peer_id();
let public_key = endpoint.public_key_bytes();
info!("═══════════════════════════════════════════════════════════════");
info!(" NODE IDENTITY");
info!("═══════════════════════════════════════════════════════════════");
if args.full_key {
info!("Peer ID (full): {}", hex::encode(peer_id.0));
} else {
info!("Peer ID: {}", format_peer_id(&peer_id));
}
info!("Public Key (ML-DSA-65): {}", hex::encode(public_key));
if let Some(addr) = endpoint.local_addr() {
info!("Local Address: {}", addr);
}
info!("═══════════════════════════════════════════════════════════════");
if args.json {
if let Some(addr) = endpoint.local_addr() {
println!(
r#"{{"event":"local_identity","peer_id":"{}","addr":"{}"}}"#,
hex::encode(peer_id.0),
addr
);
} else {
println!(
r#"{{"event":"local_identity","peer_id":"{}"}}"#,
hex::encode(peer_id.0)
);
}
}
let shutdown = CancellationToken::new();
let shutdown_clone = shutdown.clone();
tokio::spawn(async move {
if let Err(e) = tokio::signal::ctrl_c().await {
error!("Failed to listen for ctrl-c: {}", e);
}
info!("Shutdown signal received");
shutdown_clone.cancel();
});
let stats = Arc::new(RuntimeStats::default());
let stats_clone = stats.clone();
let peer_states: Arc<RwLock<HashMap<PeerId, PeerState>>> =
Arc::new(RwLock::new(HashMap::new()));
let external_addrs: Arc<RwLock<Vec<TransportAddr>>> = Arc::new(RwLock::new(Vec::new()));
let endpoint_clone = endpoint.clone();
let shutdown_events = shutdown.clone();
let json_output = args.json;
let peer_states_events = peer_states.clone();
let external_addrs_events = external_addrs.clone();
let event_handle = tokio::spawn(async move {
let mut events = endpoint_clone.subscribe();
while !shutdown_events.is_cancelled() {
match tokio::time::timeout(Duration::from_millis(100), events.recv()).await {
Ok(Ok(event)) => {
handle_event_with_state(
&event,
&stats_clone,
&peer_states_events,
&external_addrs_events,
json_output,
)
.await;
}
Ok(Err(_)) => break, Err(_) => continue, }
}
});
let recv_decode_handle = if args.verify_data || args.send_to.is_some() {
let endpoint_recv = endpoint.clone();
let shutdown_recv = shutdown.clone();
let stats_recv = stats.clone();
Some(tokio::spawn(async move {
while !shutdown_recv.is_cancelled() {
match tokio::time::timeout(Duration::from_millis(200), endpoint_recv.recv()).await {
Ok(Ok((peer_id, data))) => {
stats_recv
.bytes_received
.fetch_add(data.len() as u64, Ordering::SeqCst);
if let Ok(chunk) = serde_json::from_slice::<VerifiedDataChunk>(&data) {
let sha_ok = chunk.verify();
if sha_ok {
stats_recv
.data_chunks_verified
.fetch_add(1, Ordering::SeqCst);
} else {
stats_recv
.data_verification_failures
.fetch_add(1, Ordering::SeqCst);
}
println!(
r#"{{"event":"data_received","peer_id":"{}","sequence":{},"bytes":{},"sha_match":{}}}"#,
format_peer_id(&peer_id),
chunk.sequence,
chunk.data.len(),
sha_ok
);
}
}
Ok(Err(_)) => break,
Err(_) => continue,
}
}
}))
} else {
None
};
let stats_clone2 = stats.clone();
let shutdown_stats = shutdown.clone();
let stats_handle = if args.stats {
let endpoint_stats = endpoint.clone();
let interval = args.stats_interval;
let json = args.json;
Some(tokio::spawn(async move {
let mut interval_timer = tokio::time::interval(Duration::from_secs(interval));
while !shutdown_stats.is_cancelled() {
interval_timer.tick().await;
print_stats(&endpoint_stats, &stats_clone2, json).await;
}
}))
} else {
None
};
let metrics_handle = if let Some(ref server) = args.metrics_server {
let endpoint_metrics = endpoint.clone();
let shutdown_metrics = shutdown.clone();
let stats_metrics = stats.clone();
let peer_states_metrics = peer_states.clone();
let external_addrs_metrics = external_addrs.clone();
let interval_secs = args.metrics_interval;
let server_url = server.clone();
let node_id = args
.node_id
.clone()
.unwrap_or_else(|| format_peer_id(&peer_id));
let location = args.node_location.clone();
let start_time = Instant::now();
info!(
"Metrics reporting enabled: {} every {}s",
server_url, interval_secs
);
Some(tokio::spawn(async move {
let client = reqwest::Client::builder()
.timeout(Duration::from_secs(10))
.build()
.unwrap_or_else(|_| reqwest::Client::new());
let mut interval = tokio::time::interval(Duration::from_secs(interval_secs));
let mut prev_bytes: u64 = 0;
let mut prev_time = Instant::now();
while !shutdown_metrics.is_cancelled() {
interval.tick().await;
let report = build_metrics_report(
&node_id,
&location,
start_time,
&endpoint_metrics,
&stats_metrics,
&peer_states_metrics,
&external_addrs_metrics,
&mut prev_bytes,
&mut prev_time,
)
.await;
let url = format!("{}/api/metrics", server_url);
match client.post(&url).json(&report).send().await {
Ok(response) => {
if response.status().is_success() {
debug!("Metrics sent successfully to {}", url);
} else {
warn!(
"Metrics server returned status {}: {}",
response.status(),
url
);
}
}
Err(e) => {
warn!("Failed to send metrics to {}: {}", url, e);
}
}
}
}))
} else {
None
};
let counter_handle = if args.counter_test {
let endpoint_counter = endpoint.clone();
let shutdown_counter = shutdown.clone();
let interval_ms = args.counter_interval;
let stats_counter = stats.clone();
let json = args.json;
Some(tokio::spawn(async move {
let mut counter: u64 = 0;
let mut interval = tokio::time::interval(Duration::from_millis(interval_ms));
while !shutdown_counter.is_cancelled() {
interval.tick().await;
counter += 1;
let peers = endpoint_counter.connected_peers().await;
let mut send_tasks = Vec::with_capacity(peers.len());
for peer in peers {
let endpoint_send = endpoint_counter.clone();
let stats_send = stats_counter.clone();
let peer_id = peer.peer_id;
send_tasks.push(tokio::spawn(async move {
let data = counter.to_be_bytes();
match endpoint_send.send(&peer_id, &data).await {
Ok(()) => {
stats_send.counters_sent.fetch_add(1, Ordering::SeqCst);
stats_send
.bytes_sent
.fetch_add(data.len() as u64, Ordering::SeqCst);
if json {
println!(
r#"{{"event":"counter_sent","counter":{},"peer":"{}"}}"#,
counter,
hex::encode(&peer_id.0[..8])
);
} else {
info!(
"Sent counter {} to peer {}",
counter,
hex::encode(&peer_id.0[..8])
);
}
}
Err(e) => {
debug!("Failed to send counter to {:?}: {}", peer_id, e);
}
}
}));
}
for task in send_tasks {
if let Err(e) = task.await {
debug!("Counter send task join error: {}", e);
}
}
}
}))
} else {
None
};
let echo_handle = {
let endpoint_echo = endpoint.clone();
let shutdown_echo = shutdown.clone();
let echo_enabled = args.echo;
let stats_echo = stats.clone();
let json = args.json;
tokio::spawn(async move {
loop {
let result = tokio::select! {
r = endpoint_echo.recv() => r,
_ = shutdown_echo.cancelled() => break,
};
match result {
Ok((peer_id, data)) => {
stats_echo
.bytes_received
.fetch_add(data.len() as u64, Ordering::SeqCst);
if data.len() == 8 {
if let Ok(bytes) = data[..8].try_into() {
let counter = u64::from_be_bytes(bytes);
stats_echo.counters_received.fetch_add(1, Ordering::SeqCst);
if json {
println!(
r#"{{"event":"counter_received","counter":{},"peer":"{}"}}"#,
counter,
hex::encode(&peer_id.0[..8])
);
} else {
info!(
"Received counter {} from peer {}",
counter,
hex::encode(&peer_id.0[..8])
);
}
}
} else if json {
println!(
r#"{{"event":"data_received","bytes":{},"peer":"{}"}}"#,
data.len(),
hex::encode(&peer_id.0[..8])
);
} else {
info!(
"Received {} bytes from peer {}",
data.len(),
hex::encode(&peer_id.0[..8])
);
}
if echo_enabled {
let endpoint_send = endpoint_echo.clone();
let stats_send = stats_echo.clone();
tokio::spawn(async move {
if let Err(e) = endpoint_send.send(&peer_id, &data).await {
debug!("Failed to echo: {}", e);
} else {
stats_send.echoes_sent.fetch_add(1, Ordering::SeqCst);
stats_send
.bytes_sent
.fetch_add(data.len() as u64, Ordering::SeqCst);
}
});
}
}
Err(_) => {
}
}
}
})
};
if !all_peers.is_empty() {
info!("Connecting to {} known peer(s)...", all_peers.len());
match endpoint.connect_known_peers().await {
Ok(count) => {
info!("Connected to {} known peer(s)", count);
stats
.connections_initiated
.fetch_add(count as u64, Ordering::SeqCst);
}
Err(e) => {
error!("Failed to connect to known peers: {}", e);
}
}
}
if let Some(peer_addr) = args.connect {
info!(
"Connecting to peer at {} via unified connectivity path...",
peer_addr
);
match endpoint.connect_addr(peer_addr).await {
Ok(peer) => {
info!("Connected to peer: {}", format_peer_id(&peer.peer_id));
stats.connections_initiated.fetch_add(1, Ordering::SeqCst);
if args.throughput_test {
run_throughput_test(&endpoint, &peer.peer_id, args.test_size).await?;
}
}
Err(e) => {
error!("Failed to connect to peer {}: {}", peer_addr, e);
}
}
}
if let Some(peer_id_hex) = &args.connect_peer_id {
match parse_peer_id_hex(peer_id_hex) {
Ok(peer_id) => {
info!(
"Connecting to peer ID {} via unified peer-oriented path...",
hex::encode(peer_id.0)
);
match endpoint.connect_peer(peer_id).await {
Ok(peer) => {
info!("Connected to peer: {}", format_peer_id(&peer.peer_id));
stats.connections_initiated.fetch_add(1, Ordering::SeqCst);
if args.throughput_test {
run_throughput_test(&endpoint, &peer.peer_id, args.test_size).await?;
}
}
Err(e) => {
error!("Failed to connect to peer {}: {}", peer_id_hex, e);
}
}
}
Err(e) => {
error!("Invalid --connect-peer-id value {}: {}", peer_id_hex, e);
}
}
}
if let Some(target_hex) = args.send_to.clone() {
let target_id = match parse_peer_id_hex(&target_hex) {
Ok(p) => p,
Err(e) => {
error!("Invalid --send-to value {}: {}", target_hex, e);
return Err(anyhow::anyhow!("invalid --send-to peer id"));
}
};
let total_bytes = args.generate_data.unwrap_or(64 * 1024 * 1024);
let chunk_size = args.chunk_size;
let target_short = format_peer_id(&target_id);
let timeout = Duration::from_secs(args.send_to_timeout);
info!(
"send-to: target={} bytes={} chunk_size={} timeout={}s",
target_short,
total_bytes,
chunk_size,
timeout.as_secs()
);
let wait_start = Instant::now();
let mut connected = false;
while wait_start.elapsed() < timeout {
if peer_states.read().await.contains_key(&target_id) {
connected = true;
break;
}
tokio::time::sleep(Duration::from_millis(250)).await;
}
if !connected {
println!(
r#"{{"event":"send_to_complete","target":"{}","bytes":0,"chunks":0,"duration_ms":{},"throughput_mbps":0.0,"sha_ok":false,"error":"target not connected within timeout"}}"#,
target_short,
wait_start.elapsed().as_millis()
);
error!(
"send-to: target {} not connected within {}s",
target_short,
timeout.as_secs()
);
} else {
let chunks = generate_verified_chunks(total_bytes, chunk_size);
let chunk_count = chunks.len();
let send_start = Instant::now();
let mut chunks_sent = 0u64;
let mut send_failures = 0u64;
let mut bytes_sent_wire = 0u64;
for chunk in &chunks {
if shutdown.is_cancelled() {
break;
}
let bytes = match serde_json::to_vec(chunk) {
Ok(b) => b,
Err(e) => {
error!(
"send-to: serialise failure for chunk {}: {}",
chunk.sequence, e
);
send_failures += 1;
continue;
}
};
match endpoint.send(&target_id, &bytes).await {
Ok(()) => {
chunks_sent += 1;
bytes_sent_wire += bytes.len() as u64;
stats
.bytes_sent
.fetch_add(bytes.len() as u64, Ordering::SeqCst);
stats.data_chunks_sent.fetch_add(1, Ordering::SeqCst);
}
Err(e) => {
send_failures += 1;
warn!("send-to: chunk {} failed: {}", chunk.sequence, e);
}
}
}
let duration_ms = send_start.elapsed().as_millis();
let throughput_mbps = if duration_ms > 0 {
(bytes_sent_wire as f64 * 8.0) / (duration_ms as f64 * 1_000.0)
} else {
0.0
};
println!(
r#"{{"event":"send_to_complete","target":"{}","bytes":{},"chunks":{},"chunks_total":{},"failures":{},"duration_ms":{},"throughput_mbps":{:.2},"sha_ok":true}}"#,
target_short,
bytes_sent_wire,
chunks_sent,
chunk_count,
send_failures,
duration_ms,
throughput_mbps
);
info!(
"send-to: complete — {} bytes in {} ms ({:.2} Mbps), {} chunks ok / {} failed",
bytes_sent_wire, duration_ms, throughput_mbps, chunks_sent, send_failures
);
}
}
let start_time = Instant::now();
let duration = if args.duration > 0 {
Some(Duration::from_secs(args.duration))
} else {
None
};
info!("Ready. Press Ctrl+C to shutdown.");
while !shutdown.is_cancelled() {
if let Some(max_duration) = duration
&& start_time.elapsed() > max_duration
{
info!("Duration limit reached");
break;
}
match tokio::time::timeout(Duration::from_millis(100), endpoint.accept()).await {
Ok(Some(peer)) => {
info!(
"Accepted connection from peer: {} at {}",
format_peer_id(&peer.peer_id),
peer.remote_addr
);
stats.connections_accepted.fetch_add(1, Ordering::SeqCst);
}
Ok(None) => {
}
Err(_) => {
}
}
}
info!("Shutting down...");
shutdown.cancel();
endpoint.shutdown().await;
event_handle.abort();
echo_handle.abort();
if let Some(h) = stats_handle {
h.abort();
}
if let Some(h) = counter_handle {
h.abort();
}
if let Some(h) = metrics_handle {
h.abort();
}
if let Some(h) = recv_decode_handle {
h.abort();
}
print_final_stats(&stats, start_time.elapsed(), args.json);
info!("Goodbye!");
Ok(())
}
async fn handle_event_with_state(
event: &P2pEvent,
stats: &RuntimeStats,
peer_states: &RwLock<HashMap<PeerId, PeerState>>,
external_addrs: &RwLock<Vec<TransportAddr>>,
json: bool,
) {
match event {
P2pEvent::PeerConnected {
peer_id,
addr,
side,
traversal_method,
} => {
let direction = if side.is_client() {
"outbound"
} else {
"inbound"
};
let connection_type = match traversal_method {
ant_quic::TraversalMethod::Direct => "direct",
ant_quic::TraversalMethod::HolePunch
| ant_quic::TraversalMethod::PortPrediction => "nat_traversed",
ant_quic::TraversalMethod::Relay => "relayed",
};
let state = PeerState {
peer_id: *peer_id,
remote_addr: addr.clone(),
connected_at: Instant::now(),
bytes_sent: 0,
bytes_received: 0,
connection_type: connection_type.to_string(),
};
peer_states.write().await.insert(*peer_id, state);
match traversal_method {
ant_quic::TraversalMethod::Direct => {
stats.direct_connections.fetch_add(1, Ordering::SeqCst);
}
ant_quic::TraversalMethod::Relay => {
stats.relayed_connections.fetch_add(1, Ordering::SeqCst);
}
ant_quic::TraversalMethod::HolePunch
| ant_quic::TraversalMethod::PortPrediction => {}
}
if json {
println!(
r#"{{"event":"peer_connected","peer_id":"{}","addr":"{}","direction":"{}","connection_type":"{}"}}"#,
format_peer_id(peer_id),
addr,
direction,
connection_type
);
} else {
info!(
"Peer connected: {} at {} ({} / {})",
format_peer_id(peer_id),
addr,
direction,
connection_type
);
}
}
P2pEvent::PeerDisconnected { peer_id, reason } => {
peer_states.write().await.remove(peer_id);
if json {
println!(
r#"{{"event":"peer_disconnected","peer_id":"{}","reason":"{:?}"}}"#,
format_peer_id(peer_id),
reason
);
} else {
info!(
"Peer disconnected: {} ({:?})",
format_peer_id(peer_id),
reason
);
}
}
P2pEvent::ExternalAddressDiscovered { addr } => {
stats
.external_addresses_discovered
.fetch_add(1, Ordering::SeqCst);
let mut addrs = external_addrs.write().await;
if !addrs.contains(addr) {
addrs.push(addr.clone());
}
if json {
println!(
r#"{{"event":"external_address_discovered","addr":"{}"}}"#,
addr
);
} else {
info!("External address discovered: {}", addr);
}
}
P2pEvent::NatTraversalProgress { peer_id, phase } => {
if matches!(phase, TraversalPhase::Connected) {
stats
.nat_traversals_completed
.fetch_add(1, Ordering::SeqCst);
if let Some(state) = peer_states.write().await.get_mut(peer_id) {
state.connection_type = "nat_traversed".to_string();
}
}
if json {
println!(
r#"{{"event":"nat_traversal_progress","peer_id":"{}","phase":"{:?}"}}"#,
format_peer_id(peer_id),
phase
);
} else {
info!(
"NAT traversal progress: {} - {:?}",
format_peer_id(peer_id),
phase
);
}
}
P2pEvent::PortMappingEstablished { external_addr } => {
if json {
println!(
r#"{{"event":"port_mapping_established","external_addr":"{}"}}"#,
external_addr
);
} else {
info!("Port mapping established: {}", external_addr);
}
}
P2pEvent::PortMappingRenewed { external_addr } => {
if json {
println!(
r#"{{"event":"port_mapping_renewed","external_addr":"{}"}}"#,
external_addr
);
} else {
info!("Port mapping renewed: {}", external_addr);
}
}
P2pEvent::PortMappingAddressChanged {
previous_addr,
external_addr,
} => {
if json {
println!(
r#"{{"event":"port_mapping_address_changed","previous_addr":"{}","external_addr":"{}"}}"#,
previous_addr, external_addr
);
} else {
info!(
"Port mapping address changed: {} -> {}",
previous_addr, external_addr
);
}
}
P2pEvent::PortMappingFailed { error } => {
if json {
println!(r#"{{"event":"port_mapping_failed","error":"{}"}}"#, error);
} else {
warn!("Port mapping failed: {}", error);
}
}
P2pEvent::PortMappingRemoved { external_addr } => {
if json {
println!(
r#"{{"event":"port_mapping_removed","external_addr":{}}}"#,
external_addr
.map(|addr| format!("\"{}\"", addr))
.unwrap_or_else(|| "null".to_string())
);
} else if let Some(addr) = external_addr {
info!("Port mapping removed: {}", addr);
} else {
info!("Port mapping removed");
}
}
P2pEvent::MdnsServiceAdvertised {
service,
namespace,
instance_fullname,
} => {
if json {
println!(
r#"{{"event":"mdns_service_advertised","service":"{}","namespace":{},"instance_fullname":"{}"}}"#,
service,
namespace
.as_ref()
.map(|value| format!("\"{}\"", value))
.unwrap_or_else(|| "null".to_string()),
instance_fullname
);
} else {
info!(
"mDNS service advertised: {} ({})",
instance_fullname,
namespace
.clone()
.unwrap_or_else(|| "no namespace".to_string())
);
}
}
P2pEvent::MdnsPeerDiscovered { peer } => {
if json {
println!(
r#"{{"event":"mdns_peer_discovered","fullname":"{}","addresses":"{}"}}"#,
peer.fullname,
peer.addresses
.iter()
.map(SocketAddr::to_string)
.collect::<Vec<_>>()
.join(",")
);
} else {
info!(
"mDNS peer discovered: {} -> {:?}",
peer.fullname, peer.addresses
);
}
}
P2pEvent::MdnsPeerUpdated { peer } => {
if json {
println!(
r#"{{"event":"mdns_peer_updated","fullname":"{}","addresses":"{}"}}"#,
peer.fullname,
peer.addresses
.iter()
.map(SocketAddr::to_string)
.collect::<Vec<_>>()
.join(",")
);
} else {
info!(
"mDNS peer updated: {} -> {:?}",
peer.fullname, peer.addresses
);
}
}
P2pEvent::MdnsPeerRemoved { peer } => {
if json {
println!(
r#"{{"event":"mdns_peer_removed","fullname":"{}"}}"#,
peer.fullname
);
} else {
info!("mDNS peer removed: {}", peer.fullname);
}
}
P2pEvent::MdnsPeerEligible { peer } => {
if json {
println!(
r#"{{"event":"mdns_peer_eligible","fullname":"{}","addresses":"{}"}}"#,
peer.fullname,
peer.addresses
.iter()
.map(SocketAddr::to_string)
.collect::<Vec<_>>()
.join(",")
);
} else {
info!(
"mDNS peer eligible: {} -> {:?}",
peer.fullname, peer.addresses
);
}
}
P2pEvent::MdnsPeerIneligible { peer, reason } => {
if json {
println!(
r#"{{"event":"mdns_peer_ineligible","fullname":"{}","reason":"{}"}}"#,
peer.fullname, reason
);
} else {
info!("mDNS peer ineligible: {} ({})", peer.fullname, reason);
}
}
P2pEvent::MdnsPeerApprovalRequired { peer, reason } => {
if json {
println!(
r#"{{"event":"mdns_peer_approval_required","fullname":"{}","reason":"{}"}}"#,
peer.fullname, reason
);
} else {
info!(
"mDNS peer approval required: {} ({})",
peer.fullname, reason
);
}
}
P2pEvent::MdnsAutoConnectAttempted { peer, addresses } => {
if json {
println!(
r#"{{"event":"mdns_auto_connect_attempted","fullname":"{}","addresses":"{}"}}"#,
peer.fullname,
addresses
.iter()
.map(SocketAddr::to_string)
.collect::<Vec<_>>()
.join(",")
);
} else {
info!(
"mDNS auto-connect attempted: {} -> {:?}",
peer.fullname, addresses
);
}
}
P2pEvent::MdnsAutoConnectSucceeded {
peer,
authenticated_peer_id,
remote_addr,
} => {
if json {
println!(
r#"{{"event":"mdns_auto_connect_succeeded","fullname":"{}","peer_id":"{}","remote_addr":"{}"}}"#,
peer.fullname,
hex::encode(authenticated_peer_id.0),
remote_addr
);
} else {
info!(
"mDNS auto-connect succeeded: {} authenticated as {} via {}",
peer.fullname,
hex::encode(authenticated_peer_id.0),
remote_addr
);
}
}
P2pEvent::MdnsAutoConnectFailed {
peer,
addresses,
error,
} => {
if json {
println!(
r#"{{"event":"mdns_auto_connect_failed","fullname":"{}","addresses":"{}","error":"{}"}}"#,
peer.fullname,
addresses
.iter()
.map(SocketAddr::to_string)
.collect::<Vec<_>>()
.join(","),
error
);
} else {
warn!(
"mDNS auto-connect failed: {} via {:?} ({})",
peer.fullname, addresses, error
);
}
}
P2pEvent::DataReceived { peer_id, bytes } => {
stats
.bytes_received
.fetch_add(*bytes as u64, Ordering::SeqCst);
if let Some(state) = peer_states.write().await.get_mut(peer_id) {
state.bytes_received += *bytes as u64;
}
debug!("Received {} bytes from {}", bytes, format_peer_id(peer_id));
}
P2pEvent::DirectPathStatus { peer_id, status } => {
if json {
println!(
r#"{{"event":"direct_path_status","peer_id":"{}","status":"{:?}"}}"#,
format_peer_id(peer_id),
status
);
} else {
info!(
"Direct path status: {} -> {:?}",
format_peer_id(peer_id),
status
);
}
}
_ => {
debug!("Event: {:?}", event);
}
}
}
async fn print_stats(endpoint: &P2pEndpoint, runtime_stats: &RuntimeStats, json: bool) {
let stats = endpoint.stats().await;
let port_mapping_active = endpoint.port_mapping_active();
let port_mapping_addr = endpoint.port_mapping_addr();
let mdns = endpoint.mdns_snapshot();
let relay_service_enabled = endpoint.relay_service_enabled();
let coordinator_service_enabled = endpoint.coordinator_service_enabled();
let bootstrap_service_enabled = endpoint.bootstrap_service_enabled();
if json {
println!(
r#"{{"type":"stats","active_connections":{},"successful_connections":{},"failed_connections":{},"nat_traversals":{},"bytes_sent":{},"bytes_received":{},"external_addresses":{},"port_mapping_active":{},"port_mapping_addr":{},"mdns_browsing":{},"mdns_advertising":{},"mdns_discovered_peers":{},"relay_service_enabled":{},"coordinator_service_enabled":{},"bootstrap_service_enabled":{}}}"#,
stats.active_connections,
stats.successful_connections,
stats.failed_connections,
runtime_stats
.nat_traversals_completed
.load(Ordering::SeqCst),
runtime_stats.bytes_sent.load(Ordering::SeqCst),
runtime_stats.bytes_received.load(Ordering::SeqCst),
runtime_stats
.external_addresses_discovered
.load(Ordering::SeqCst),
port_mapping_active,
port_mapping_addr
.map(|addr| format!("\"{}\"", addr))
.unwrap_or_else(|| "null".to_string()),
mdns.browsing,
mdns.advertising,
mdns.discovered_peers.len(),
relay_service_enabled,
coordinator_service_enabled,
bootstrap_service_enabled,
);
} else {
info!("=== Statistics ===");
info!(" Active connections: {}", stats.active_connections);
info!(" Successful connections: {}", stats.successful_connections);
info!(" Failed connections: {}", stats.failed_connections);
info!(
" NAT traversals completed: {}",
runtime_stats
.nat_traversals_completed
.load(Ordering::SeqCst)
);
info!(
" External addresses discovered: {}",
runtime_stats
.external_addresses_discovered
.load(Ordering::SeqCst)
);
info!(
" Bytes sent: {}",
format_bytes(runtime_stats.bytes_sent.load(Ordering::SeqCst))
);
info!(
" Bytes received: {}",
format_bytes(runtime_stats.bytes_received.load(Ordering::SeqCst))
);
info!(" Port mapping active: {}", port_mapping_active);
if let Some(mapped_addr) = port_mapping_addr {
info!(" Port mapping address: {}", mapped_addr);
}
info!(" mDNS browsing: {}", mdns.browsing);
info!(" mDNS advertising: {}", mdns.advertising);
info!(" mDNS discovered peers: {}", mdns.discovered_peers.len());
info!(" Relay service enabled: {}", relay_service_enabled);
info!(
" Coordinator service enabled: {}",
coordinator_service_enabled
);
info!(" Bootstrap service enabled: {}", bootstrap_service_enabled);
}
}
fn print_final_stats(stats: &RuntimeStats, duration: Duration, json: bool) {
let bytes_sent = stats.bytes_sent.load(Ordering::SeqCst);
let bytes_received = stats.bytes_received.load(Ordering::SeqCst);
let counters_sent = stats.counters_sent.load(Ordering::SeqCst);
let counters_received = stats.counters_received.load(Ordering::SeqCst);
let echoes_sent = stats.echoes_sent.load(Ordering::SeqCst);
let secs = duration.as_secs_f64();
if json {
println!(
r#"{{"type":"final_stats","duration_secs":{:.2},"bytes_sent":{},"bytes_received":{},"connections_accepted":{},"connections_initiated":{},"nat_traversals":{},"external_addresses":{},"counters_sent":{},"counters_received":{},"echoes_sent":{}}}"#,
secs,
bytes_sent,
bytes_received,
stats.connections_accepted.load(Ordering::SeqCst),
stats.connections_initiated.load(Ordering::SeqCst),
stats.nat_traversals_completed.load(Ordering::SeqCst),
stats.external_addresses_discovered.load(Ordering::SeqCst),
counters_sent,
counters_received,
echoes_sent,
);
} else {
info!("═══════════════════════════════════════════════════════════════");
info!(" FINAL STATISTICS");
info!("═══════════════════════════════════════════════════════════════");
info!(" Duration: {:.2}s", secs);
info!(
" Connections accepted: {}",
stats.connections_accepted.load(Ordering::SeqCst)
);
info!(
" Connections initiated: {}",
stats.connections_initiated.load(Ordering::SeqCst)
);
info!(
" NAT traversals: {}",
stats.nat_traversals_completed.load(Ordering::SeqCst)
);
info!(
" External addresses: {}",
stats.external_addresses_discovered.load(Ordering::SeqCst)
);
info!(" Bytes sent: {}", format_bytes(bytes_sent));
info!(" Bytes received: {}", format_bytes(bytes_received));
if counters_sent > 0 || counters_received > 0 {
info!(" Counters sent: {}", counters_sent);
info!(" Counters received: {}", counters_received);
}
if echoes_sent > 0 {
info!(" Echoes sent: {}", echoes_sent);
}
if secs > 0.0 {
let total_bytes = bytes_sent + bytes_received;
let throughput = total_bytes as f64 / secs;
info!(" Throughput: {}/s", format_bytes(throughput as u64));
}
info!("═══════════════════════════════════════════════════════════════");
}
}
async fn run_throughput_test(
endpoint: &P2pEndpoint,
peer_id: &PeerId,
data_size: usize,
) -> anyhow::Result<()> {
info!("Starting throughput test ({} bytes)...", data_size);
let data = vec![0xABu8; data_size];
let start = Instant::now();
match endpoint.send(peer_id, &data).await {
Ok(()) => {
let elapsed = start.elapsed();
let throughput = data_size as f64 / elapsed.as_secs_f64();
info!(
"Throughput test complete: {} in {:.2}ms ({}/s)",
format_bytes(data_size as u64),
elapsed.as_secs_f64() * 1000.0,
format_bytes(throughput as u64)
);
}
Err(e) => {
error!("Throughput test failed: {}", e);
}
}
Ok(())
}
fn format_peer_id(peer_id: &PeerId) -> String {
let bytes = &peer_id.0;
hex::encode(&bytes[..8])
}
fn parse_peer_id_hex(value: &str) -> anyhow::Result<PeerId> {
if value.len() != 64 {
anyhow::bail!(
"expected 64 hex characters for a 32-byte peer ID, got {}",
value.len()
);
}
let decoded = hex::decode(value).map_err(|e| anyhow::anyhow!("invalid hex peer ID: {}", e))?;
if decoded.len() != 32 {
anyhow::bail!(
"expected 32 decoded bytes for peer ID, got {}",
decoded.len()
);
}
let mut bytes = [0u8; 32];
bytes.copy_from_slice(&decoded);
Ok(PeerId(bytes))
}
fn format_bytes(bytes: u64) -> String {
const KB: u64 = 1024;
const MB: u64 = KB * 1024;
const GB: u64 = MB * 1024;
if bytes >= GB {
format!("{:.2} GB", bytes as f64 / GB as f64)
} else if bytes >= MB {
format!("{:.2} MB", bytes as f64 / MB as f64)
} else if bytes >= KB {
format!("{:.2} KB", bytes as f64 / KB as f64)
} else {
format!("{} B", bytes)
}
}
async fn build_metrics_report(
node_id: &str,
location: &str,
start_time: Instant,
endpoint: &P2pEndpoint,
stats: &RuntimeStats,
peer_states: &RwLock<HashMap<PeerId, PeerState>>,
external_addrs: &RwLock<Vec<TransportAddr>>,
prev_bytes: &mut u64,
prev_time: &mut Instant,
) -> NodeMetricsReport {
let now_secs = SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|d| d.as_secs())
.unwrap_or(0);
let bytes_sent = stats.bytes_sent.load(Ordering::SeqCst);
let bytes_received = stats.bytes_received.load(Ordering::SeqCst);
let total_bytes = bytes_sent + bytes_received;
let elapsed = prev_time.elapsed().as_secs_f64();
let throughput_mbps = if elapsed > 0.0 {
let bytes_diff = total_bytes.saturating_sub(*prev_bytes);
(bytes_diff as f64 * 8.0) / (elapsed * 1_000_000.0) } else {
0.0
};
*prev_bytes = total_bytes;
*prev_time = Instant::now();
let endpoint_stats = endpoint.stats().await;
let peers = endpoint.connected_peers().await;
let peer_states_read = peer_states.read().await;
let connected_peers: Vec<PeerInfo> = peers
.iter()
.map(|p| {
let state = peer_states_read.get(&p.peer_id);
PeerInfo {
peer_id: hex::encode(&p.peer_id.0[..8]),
remote_addr: p.remote_addr.to_string(),
connected_at: state
.map(|s| s.connected_at.elapsed().as_secs())
.unwrap_or(0),
bytes_sent: state.map(|s| s.bytes_sent).unwrap_or(0),
bytes_received: state.map(|s| s.bytes_received).unwrap_or(0),
connection_type: state
.map(|s| s.connection_type.clone())
.unwrap_or_else(|| "direct".to_string()),
}
})
.collect();
let external_addresses: Vec<String> = external_addrs
.read()
.await
.iter()
.map(|a| a.to_string())
.collect();
let local_addr = endpoint
.local_addr()
.map(|a| a.to_string())
.unwrap_or_else(|| "unknown".to_string());
NodeMetricsReport {
node_id: node_id.to_string(),
location: location.to_string(),
timestamp: now_secs,
uptime_secs: start_time.elapsed().as_secs(),
active_connections: endpoint_stats.active_connections,
bytes_sent_total: bytes_sent,
bytes_received_total: bytes_received,
current_throughput_mbps: throughput_mbps,
nat_traversal_successes: stats.nat_traversals_completed.load(Ordering::SeqCst),
nat_traversal_failures: stats.nat_traversals_failed.load(Ordering::SeqCst),
direct_connections: stats.direct_connections.load(Ordering::SeqCst),
relayed_connections: stats.relayed_connections.load(Ordering::SeqCst),
data_chunks_sent: stats.data_chunks_sent.load(Ordering::SeqCst),
data_chunks_verified: stats.data_chunks_verified.load(Ordering::SeqCst),
data_verification_failures: stats.data_verification_failures.load(Ordering::SeqCst),
external_addresses,
connected_peers,
local_addr,
}
}
async fn handle_command(command: Command) -> anyhow::Result<()> {
match command {
Command::Identity { action } => handle_identity_command(action).await,
Command::Cache { action } => handle_cache_command(action).await,
Command::Doctor => handle_doctor_command().await,
}
}
fn expand_tilde(path: &std::path::Path) -> PathBuf {
let path_str = path.to_string_lossy();
if let Some(stripped) = path_str.strip_prefix("~/")
&& let Some(home) = dirs::home_dir()
{
return home.join(stripped);
}
path.to_path_buf()
}
async fn handle_identity_command(action: IdentityAction) -> anyhow::Result<()> {
match action {
IdentityAction::Show {
all_networks,
data_dir,
} => {
let data_dir = expand_tilde(&data_dir);
println!("═══════════════════════════════════════════════════════════════");
println!(" HOST IDENTITY");
println!("═══════════════════════════════════════════════════════════════");
let storage_selection = auto_storage()?;
match storage_selection.storage.load() {
Ok(secret) => {
let host = HostIdentity::from_secret(secret);
println!("Fingerprint: {}", host.fingerprint());
println!("Policy: {:?}", host.policy());
println!("Storage: {}", storage_selection.storage.backend_name());
println!("Security: {:?}", storage_selection.security_level);
if let Some(warning) = storage_selection.security_level.warning_message() {
println!();
println!("{}", warning);
}
println!("Data Directory: {}", data_dir.display());
if all_networks {
println!();
println!("Stored Endpoint Keypairs:");
if data_dir.exists() {
let mut found = false;
if let Ok(entries) = std::fs::read_dir(&data_dir) {
for entry in entries.flatten() {
let name = entry.file_name();
let name_str = name.to_string_lossy();
if name_str.ends_with("_keypair.enc") {
let network_id_hex =
name_str.trim_end_matches("_keypair.enc");
println!(" - Network: {}", network_id_hex);
found = true;
}
}
}
if !found {
println!(" (none)");
}
} else {
println!(" (data directory not found)");
}
}
}
Err(e) => {
println!("No host identity found.");
println!("Error: {}", e);
println!();
println!("A new identity will be created when you first run the node.");
}
}
println!("═══════════════════════════════════════════════════════════════");
}
IdentityAction::Wipe { force, data_dir } => {
let data_dir = expand_tilde(&data_dir);
if !force {
println!(
"WARNING: This will permanently delete your host identity and all derived keys!"
);
println!("All stored endpoint keypairs will be lost.");
println!();
print!("Type 'DELETE' to confirm: ");
use std::io::Write;
std::io::stdout().flush()?;
let mut input = String::new();
std::io::stdin().read_line(&mut input)?;
if input.trim() != "DELETE" {
println!("Aborted.");
return Ok(());
}
}
let storage_selection = auto_storage()?;
if storage_selection.storage.exists() {
storage_selection.storage.delete()?;
println!("Host identity deleted from secure storage.");
} else {
println!("No host identity found in secure storage.");
}
if data_dir.exists() {
let mut deleted = 0;
if let Ok(entries) = std::fs::read_dir(&data_dir) {
for entry in entries.flatten() {
let name = entry.file_name();
let name_str = name.to_string_lossy();
if name_str.ends_with("_keypair.enc")
&& std::fs::remove_file(entry.path()).is_ok()
{
deleted += 1;
}
}
}
println!("Deleted {} encrypted keypair file(s).", deleted);
}
println!("Identity wiped. A new identity will be created on next run.");
}
IdentityAction::Fingerprint => {
let storage_selection = auto_storage()?;
match storage_selection.storage.load() {
Ok(secret) => {
let host = HostIdentity::from_secret(secret);
println!("{}", host.fingerprint());
}
Err(_) => {
eprintln!("No host identity found.");
std::process::exit(1);
}
}
}
}
Ok(())
}
async fn handle_cache_command(action: CacheAction) -> anyhow::Result<()> {
match action {
CacheAction::Stats { data_dir } => {
let data_dir = expand_tilde(&data_dir);
let cache_file = data_dir.join("bootstrap_cache.enc");
println!("═══════════════════════════════════════════════════════════════");
println!(" BOOTSTRAP CACHE STATS");
println!("═══════════════════════════════════════════════════════════════");
println!("Cache file: {}", cache_file.display());
if cache_file.exists() {
let metadata = std::fs::metadata(&cache_file)?;
println!("File size: {} bytes", metadata.len());
if let Ok(modified) = metadata.modified()
&& let Ok(elapsed) = modified.elapsed()
{
let secs = elapsed.as_secs();
if secs < 60 {
println!("Last modified: {}s ago", secs);
} else if secs < 3600 {
println!("Last modified: {}m ago", secs / 60);
} else if secs < 86400 {
println!("Last modified: {}h ago", secs / 3600);
} else {
println!("Last modified: {}d ago", secs / 86400);
}
}
println!();
println!("Note: Cache is encrypted. Detailed stats require decryption");
println!("which needs a running node with host identity.");
} else {
println!("Cache file not found.");
println!();
println!("A new cache will be created when you run the node.");
}
println!("═══════════════════════════════════════════════════════════════");
}
CacheAction::Clear { force, data_dir } => {
let data_dir = expand_tilde(&data_dir);
let cache_file = data_dir.join("bootstrap_cache.enc");
if !cache_file.exists() {
println!("No cache file found at {}", cache_file.display());
return Ok(());
}
if !force {
println!("WARNING: This will delete your bootstrap cache.");
println!("You will need to rediscover peers on next run.");
println!();
print!("Type 'CLEAR' to confirm: ");
use std::io::Write;
std::io::stdout().flush()?;
let mut input = String::new();
std::io::stdin().read_line(&mut input)?;
if input.trim() != "CLEAR" {
println!("Aborted.");
return Ok(());
}
}
std::fs::remove_file(&cache_file)?;
println!("Bootstrap cache cleared.");
}
}
Ok(())
}
async fn handle_doctor_command() -> anyhow::Result<()> {
println!("═══════════════════════════════════════════════════════════════");
println!(" ANT-QUIC DOCTOR");
println!("═══════════════════════════════════════════════════════════════");
println!();
let mut issues: Vec<String> = Vec::new();
let mut passed = 0;
print!("Checking host identity storage... ");
let storage_selection = match auto_storage() {
Ok(s) => {
println!("{} ({:?})", s.storage.backend_name(), s.security_level);
if let Some(warning) = s.security_level.warning_message() {
println!();
println!("{}", warning);
println!();
}
passed += 1;
s
}
Err(e) => {
println!("FAILED: {}", e);
issues.push("Cannot access host identity storage.".to_string());
return Ok(());
}
};
print!("Checking host identity... ");
match storage_selection.storage.load() {
Ok(secret) => {
let host = HostIdentity::from_secret(secret);
println!("OK (fingerprint: {})", host.fingerprint());
passed += 1;
}
Err(_) => {
println!("NOT FOUND");
issues.push("No host identity found. One will be created on first run.".to_string());
}
}
print!("Checking data directory... ");
let data_dir = dirs::home_dir()
.map(|h| h.join(".ant-quic"))
.unwrap_or_else(|| PathBuf::from(".ant-quic"));
if data_dir.exists() {
println!("OK ({})", data_dir.display());
passed += 1;
} else {
println!("NOT FOUND");
issues.push("Data directory not found. It will be created on first run.".to_string());
}
print!("Checking bootstrap cache... ");
let cache_file = data_dir.join("bootstrap_cache.enc");
if cache_file.exists() {
let size = std::fs::metadata(&cache_file).map(|m| m.len()).unwrap_or(0);
println!("OK ({} bytes)", size);
passed += 1;
} else {
println!("NOT FOUND");
issues.push("No bootstrap cache. Peers will be discovered on first run.".to_string());
}
print!("Checking network... ");
match tokio::net::UdpSocket::bind("[::]:0").await {
Ok(socket) => {
let addr = socket
.local_addr()
.unwrap_or_else(|_| std::net::SocketAddr::from(([0, 0, 0, 0, 0, 0, 0, 0], 0)));
println!("OK (can bind UDP on {})", addr);
passed += 1;
}
Err(e) => {
println!("FAILED");
issues.push(format!("Cannot bind UDP socket: {}", e));
}
}
print!("Checking DNS resolution... ");
let mut dns_ok = 0;
for node in DEFAULT_BOOTSTRAP_NODES {
if tokio::net::lookup_host(node).await.is_ok() {
dns_ok += 1;
}
}
if dns_ok == DEFAULT_BOOTSTRAP_NODES.len() {
println!("OK ({} nodes resolved)", dns_ok);
passed += 1;
} else if dns_ok > 0 {
println!(
"PARTIAL ({}/{} nodes resolved)",
dns_ok,
DEFAULT_BOOTSTRAP_NODES.len()
);
passed += 1;
} else {
println!("FAILED");
issues.push("Cannot resolve any bootstrap nodes. Check your DNS settings.".to_string());
}
println!();
println!("═══════════════════════════════════════════════════════════════");
println!(" SUMMARY");
println!("═══════════════════════════════════════════════════════════════");
println!("Checks passed: {}/6", passed);
if issues.is_empty() {
println!();
println!("All checks passed! Your system is ready to run ant-quic.");
} else {
println!();
println!("Issues found:");
for issue in &issues {
println!(" ! {}", issue);
}
}
println!("═══════════════════════════════════════════════════════════════");
Ok(())
}