use anyhow::{Context, Result};
use chrono::{DateTime, Duration, Utc};
use clap::Args;
use flate2::Compression;
use flate2::write::GzEncoder;
use freenet::config::ConfigPaths;
use freenet::tracing::tracer::get_log_dir;
use freenet_stdlib::client_api::{
ClientRequest, HostResponse, NodeDiagnosticsConfig, NodeQuery, QueryResponse, WebApi,
};
use serde::{Deserialize, Serialize};
use std::fs;
use std::io::{self, BufRead, BufReader, Write};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration as StdDuration;
use tokio_tungstenite::connect_async;
const DEFAULT_REPORT_SERVER: &str = "https://nova.locut.us/api/reports";
const LOG_RETENTION_MINUTES: i64 = 30;
const MAX_TOTAL_LOG_SIZE: usize = 2 * 1024 * 1024;
const MAX_LINE_LENGTH: usize = 10 * 1024;
const DEFAULT_WS_API_PORT: u16 = 7509;
const WS_TIMEOUT_SECS: u64 = 15;
const WS_RETRY_ATTEMPTS: u32 = 1;
const LOOPBACK_HOSTS: &[&str] = &["127.0.0.1", "[::1]"];
#[derive(Args, Debug, Clone)]
pub struct ReportCommand {
#[arg(long, value_name = "PATH")]
pub local: Option<PathBuf>,
#[arg(long, short = 'm')]
pub message: Option<String>,
#[arg(long)]
pub no_message: bool,
#[arg(long, default_value = DEFAULT_REPORT_SERVER)]
pub server: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct DiagnosticReport {
pub client_timestamp: String,
pub system_info: SystemInfo,
pub version_info: VersionInfo,
pub logs: LogContents,
pub config: Option<String>,
pub network_status: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub network_status_error: Option<String>,
pub user_message: Option<String>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct SystemInfo {
pub os: String,
pub arch: String,
pub hostname: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct VersionInfo {
pub version: String,
pub git_commit: String,
pub git_dirty: bool,
pub build_timestamp: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct LogContents {
pub main_log: Option<String>,
pub error_log: Option<String>,
pub main_log_size_bytes: u64,
pub error_log_size_bytes: u64,
#[serde(default)]
pub main_log_original_size_bytes: u64,
#[serde(default)]
pub error_log_original_size_bytes: u64,
}
#[derive(Deserialize, Debug)]
struct UploadResponse {
code: String,
}
impl ReportCommand {
pub fn run(
&self,
version: &str,
git_commit: &str,
git_dirty: &str,
build_timestamp: &str,
config_dirs: Arc<ConfigPaths>,
) -> Result<()> {
println!("Collecting diagnostic info...");
let report = self.collect_report(
version,
git_commit,
git_dirty,
build_timestamp,
&config_dirs,
)?;
self.print_summary(&report);
if let Some(ref path) = self.local {
self.save_local(&report, path)?;
} else {
let rt = tokio::runtime::Runtime::new()?;
rt.block_on(self.upload_report(&report))?;
}
Ok(())
}
fn collect_report(
&self,
version: &str,
git_commit: &str,
git_dirty: &str,
build_timestamp: &str,
config_dirs: &ConfigPaths,
) -> Result<DiagnosticReport> {
let system_info = SystemInfo {
os: std::env::consts::OS.to_string(),
arch: std::env::consts::ARCH.to_string(),
hostname: hostname::get()
.map(|h| h.to_string_lossy().to_string())
.unwrap_or_else(|_| "unknown".to_string()),
};
let version_info = VersionInfo {
version: version.to_string(),
git_commit: git_commit.to_string(),
git_dirty: git_dirty == " (dirty)",
build_timestamp: build_timestamp.to_string(),
};
let logs = self.collect_logs(config_dirs.log_dir().map(Path::to_path_buf))?;
let config = self.collect_config(&config_dirs.config_dir());
let (network_status, network_status_error) = match self.collect_network_status(&config) {
Ok(diag) => (Some(diag), None),
Err(e) => (None, Some(e)),
};
let user_message = self.get_user_message()?;
let client_timestamp = chrono::Utc::now().to_rfc3339();
Ok(DiagnosticReport {
client_timestamp,
system_info,
version_info,
logs,
config,
network_status,
network_status_error,
user_message,
})
}
fn collect_logs(&self, log_dir: Option<PathBuf>) -> Result<LogContents> {
let log_dir = log_dir
.or_else(get_log_dir)
.context("Unsupported platform for log collection")?;
let main_log_files = find_log_files(&log_dir, "freenet");
let error_log_files = find_log_files(&log_dir, "freenet.error");
let (main_log, main_log_original_size) = read_and_merge_log_files(&main_log_files);
let (error_log, error_log_original_size) = read_and_merge_log_files(&error_log_files);
let main_log_size = main_log.as_ref().map(|s| s.len() as u64).unwrap_or(0);
let error_log_size = error_log.as_ref().map(|s| s.len() as u64).unwrap_or(0);
Ok(LogContents {
main_log,
error_log,
main_log_size_bytes: main_log_size,
error_log_size_bytes: error_log_size,
main_log_original_size_bytes: main_log_original_size,
error_log_original_size_bytes: error_log_original_size,
})
}
fn collect_config(&self, config_dir: &Path) -> Option<String> {
let config_paths = [
Some(config_dir.join("config.toml")),
dirs::config_dir().map(|p| p.join("freenet").join("config.toml")),
dirs::home_dir().map(|p| p.join(".config").join("freenet").join("config.toml")),
];
for path in config_paths.into_iter().flatten() {
if path.exists() {
if let Ok(content) = fs::read_to_string(&path) {
return Some(content);
}
}
}
None
}
fn collect_network_status(&self, config_content: &Option<String>) -> Result<String, String> {
let ws_port = config_content
.as_ref()
.and_then(|c| parse_ws_port_from_config(c))
.unwrap_or(DEFAULT_WS_API_PORT);
let rt = tokio::runtime::Runtime::new()
.map_err(|e| format!("failed to create tokio runtime: {e}"))?;
let worst_case_secs =
LOOPBACK_HOSTS.len() as u64 * (WS_RETRY_ATTEMPTS as u64 + 1) * WS_TIMEOUT_SECS;
print!(" Querying local node diagnostics (up to {worst_case_secs}s)... ");
io::stdout().flush().ok();
let result = rt.block_on(query_with_fallback(
ws_port,
WS_RETRY_ATTEMPTS,
StdDuration::from_secs(WS_TIMEOUT_SECS),
));
match &result {
Ok(_) => println!("ok"),
Err(e) => println!("unreachable ({e})"),
}
result
}
fn get_user_message(&self) -> Result<Option<String>> {
if let Some(ref msg) = self.message {
return Ok(Some(msg.clone()));
}
if self.no_message {
return Ok(None);
}
println!();
println!(
"What issue are you experiencing? (Enter on empty line to finish, or just Enter to skip)"
);
print!("> ");
io::stdout().flush()?;
let stdin = io::stdin();
let mut lines = Vec::new();
for line in stdin.lock().lines() {
let line = line.context("Failed to read input")?;
if line.is_empty() {
break;
}
lines.push(line);
print!("> ");
io::stdout().flush()?;
}
if lines.is_empty() {
Ok(None)
} else {
Ok(Some(lines.join("\n")))
}
}
fn print_summary(&self, report: &DiagnosticReport) {
println!(
" - Version: {} ({}{})",
report.version_info.version,
report.version_info.git_commit,
if report.version_info.git_dirty {
" dirty"
} else {
""
}
);
println!(
" - OS: {} {}",
report.system_info.os, report.system_info.arch
);
let filtered_size = report.logs.main_log_size_bytes + report.logs.error_log_size_bytes;
let original_size =
report.logs.main_log_original_size_bytes + report.logs.error_log_original_size_bytes;
if original_size > filtered_size && filtered_size > 0 {
println!(
" - Logs: {} (last {} min, {} total on disk)",
format_bytes(filtered_size),
LOG_RETENTION_MINUTES,
format_bytes(original_size)
);
} else {
println!(" - Logs: {}", format_bytes(original_size));
}
println!(
" - Config: {}",
if report.config.is_some() {
"found"
} else {
"not found"
}
);
match (&report.network_status, &report.network_status_error) {
(Some(_), _) => println!(" - Node status: running"),
(None, Some(err)) => println!(" - Node status: unreachable ({err})"),
(None, None) => println!(" - Node status: not running or unreachable"),
}
}
fn save_local(&self, report: &DiagnosticReport, path: &PathBuf) -> Result<()> {
let json = serde_json::to_string_pretty(report)?;
fs::write(path, &json).context("Failed to write report to file")?;
println!();
println!("Report saved to: {}", path.display());
Ok(())
}
async fn upload_report(&self, report: &DiagnosticReport) -> Result<()> {
println!();
print!("Uploading report...");
io::stdout().flush()?;
let json = serde_json::to_vec(report)?;
let mut encoder = GzEncoder::new(Vec::new(), Compression::default());
encoder.write_all(&json)?;
let compressed = encoder.finish()?;
let client = reqwest::Client::builder()
.user_agent("freenet-report")
.build()?;
let response = client
.post(&self.server)
.header("Content-Type", "application/json")
.header("Content-Encoding", "gzip")
.body(compressed)
.send()
.await
.context("Failed to upload report")?;
if !response.status().is_success() {
let status = response.status();
let body = response.text().await.unwrap_or_default();
anyhow::bail!("Upload failed: {} - {}", status, body);
}
let upload_response: UploadResponse = response
.json()
.await
.context("Failed to parse upload response")?;
println!(" done");
println!();
println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
println!(" Report code: {}", upload_response.code);
println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
println!();
println!("Share this code with the Freenet team on Matrix.");
Ok(())
}
}
fn find_log_files(log_dir: &PathBuf, prefix: &str) -> Vec<PathBuf> {
let mut files = Vec::new();
let legacy_path = log_dir.join(format!("{}.log", prefix));
if legacy_path.exists() {
files.push(legacy_path);
}
if let Ok(entries) = fs::read_dir(log_dir) {
for entry in entries.flatten() {
let path = entry.path();
if let Some(name) = path.file_name().and_then(|n| n.to_str()) {
if name.starts_with(prefix)
&& name.ends_with(".log")
&& name.len() > prefix.len() + 5
{
let middle = &name[prefix.len()..name.len() - 4];
if middle.starts_with('.') && (middle.len() == 11 || middle.len() == 14) {
files.push(path);
}
}
}
}
}
files.sort_by(|a, b| {
let a_time = fs::metadata(a).and_then(|m| m.modified()).ok();
let b_time = fs::metadata(b).and_then(|m| m.modified()).ok();
b_time.cmp(&a_time)
});
files
}
fn read_and_merge_log_files(files: &[PathBuf]) -> (Option<String>, u64) {
if files.is_empty() {
return (None, 0);
}
let mut total_original_size = 0u64;
let mut all_content = Vec::new();
for file in files.iter().rev() {
let (content, size) = read_log_file(file);
total_original_size += size;
if let Some(content) = content {
all_content.push(content);
}
}
if all_content.is_empty() {
return (None, total_original_size);
}
let merged = all_content.join("\n");
let result = if merged.len() > MAX_TOTAL_LOG_SIZE {
let skip_bytes = merged.len() - MAX_TOTAL_LOG_SIZE;
let safe_skip = merged
.char_indices()
.take_while(|(i, _)| *i <= skip_bytes)
.last()
.map(|(i, _)| i)
.unwrap_or(0);
let truncate_at = merged[safe_skip..]
.find('\n')
.map(|pos| safe_skip + pos + 1)
.unwrap_or(safe_skip);
format!(
"[... {} bytes truncated to fit size limit ...]\n{}",
truncate_at,
&merged[truncate_at..]
)
} else {
merged
};
(Some(result), total_original_size)
}
fn read_log_file(path: &PathBuf) -> (Option<String>, u64) {
let metadata = match fs::metadata(path) {
Ok(m) => m,
Err(_) => return (None, 0),
};
let original_size = metadata.len();
let file = match fs::File::open(path) {
Ok(f) => f,
Err(_) => return (None, original_size),
};
let cutoff = Utc::now() - Duration::minutes(LOG_RETENTION_MINUTES);
let reader = BufReader::new(file);
let mut filtered_lines = Vec::new();
let mut all_lines = Vec::new();
let mut include_line = false;
let mut any_timestamp_found = false;
for line in reader.lines() {
let line = match line {
Ok(l) => l,
Err(_) => continue,
};
if let Some(ts) = extract_timestamp(&line) {
if let Ok(parsed) = DateTime::parse_from_rfc3339(&ts)
.map(|dt| dt.with_timezone(&Utc))
.or_else(|_| ts.parse::<DateTime<Utc>>())
{
any_timestamp_found = true;
include_line = parsed >= cutoff;
}
}
let line = if line.len() > MAX_LINE_LENGTH {
let truncate_at = line
.char_indices()
.take_while(|(i, _)| *i < MAX_LINE_LENGTH)
.last()
.map(|(i, c)| i + c.len_utf8())
.unwrap_or(0);
format!(
"{}... [truncated, {} total bytes]",
&line[..truncate_at],
line.len()
)
} else {
line
};
all_lines.push(line.clone());
if include_line {
filtered_lines.push(line);
}
}
let result_lines = if !any_timestamp_found && !all_lines.is_empty() {
all_lines
} else {
filtered_lines
};
if result_lines.is_empty() {
(None, original_size)
} else {
(Some(result_lines.join("\n")), original_size)
}
}
fn extract_timestamp(line: &str) -> Option<String> {
let mut chars = line.chars().peekable();
while chars.peek() == Some(&'\x1b') {
chars.next(); if chars.next() != Some('[') {
break;
}
for c in chars.by_ref() {
if c == 'm' {
break;
}
}
}
let remaining: String = chars.collect();
if remaining.len() < 19 {
return None;
}
let potential = &remaining[..std::cmp::min(30, remaining.len())];
if potential.len() >= 19
&& potential.chars().nth(4) == Some('-')
&& potential.chars().nth(7) == Some('-')
&& potential.chars().nth(10) == Some('T')
&& potential.chars().nth(13) == Some(':')
&& potential.chars().nth(16) == Some(':')
{
let end = potential.find([' ', '\x1b']).unwrap_or(potential.len());
let ts = &potential[..end];
if ts.ends_with('Z') {
return Some(ts.to_string());
} else {
return Some(format!("{}Z", ts));
}
}
None
}
fn parse_ws_port_from_config(config: &str) -> Option<u16> {
for line in config.lines() {
let line = line.trim();
if line.starts_with("ws-api-port") || line.starts_with("ws_api_port") {
if let Some(value) = line.split('=').nth(1) {
if let Ok(port) = value.trim().parse::<u16>() {
return Some(port);
}
}
}
}
None
}
async fn query_with_fallback(
port: u16,
retry_attempts: u32,
per_attempt_timeout: StdDuration,
) -> Result<String, String> {
let mut errors: Vec<String> = Vec::new();
let total_attempts = retry_attempts.saturating_add(1);
for host in LOOPBACK_HOSTS {
for attempt in 0..total_attempts {
match tokio::time::timeout(per_attempt_timeout, query_node_diagnostics(host, port))
.await
{
Ok(Ok(diag)) => return Ok(diag),
Ok(Err(e)) => {
errors.push(format!("{host}:{port}: {e:#}"));
break;
}
Err(_) => {
errors.push(format!(
"{host}:{port}: timed out after {:?} (attempt {}/{})",
per_attempt_timeout,
attempt + 1,
total_attempts
));
}
}
}
}
Err(errors.join("; "))
}
async fn query_node_diagnostics(host: &str, port: u16) -> Result<String> {
let url = format!("ws://{host}:{port}/v1/contract/command?encodingProtocol=native");
let (stream, _) = connect_async(&url)
.await
.context("Failed to connect to node WebSocket API")?;
let mut client = WebApi::start(stream);
let config = NodeDiagnosticsConfig {
include_node_info: true,
include_network_info: true,
include_subscriptions: true,
contract_keys: vec![],
include_system_metrics: true,
include_detailed_peer_info: true,
include_subscriber_peer_ids: false,
};
client
.send(ClientRequest::NodeQueries(NodeQuery::NodeDiagnostics {
config,
}))
.await
.context("Failed to send diagnostics query")?;
let response = client
.recv()
.await
.context("Failed to receive diagnostics response")?;
let _disconnect = client.send(ClientRequest::Disconnect { cause: None }).await;
match response {
HostResponse::QueryResponse(QueryResponse::NodeDiagnostics(diag)) => {
serde_json::to_string_pretty(&diag).context("Failed to serialize diagnostics")
}
HostResponse::ContractResponse(_)
| HostResponse::DelegateResponse { .. }
| HostResponse::QueryResponse(_)
| HostResponse::Ok
| _ => anyhow::bail!("Unexpected response from node"),
}
}
fn format_bytes(bytes: u64) -> String {
const KB: u64 = 1024;
const MB: u64 = KB * 1024;
if bytes >= MB {
format!("{:.1} MB", bytes as f64 / MB as f64)
} else if bytes >= KB {
format!("{:.1} KB", bytes as f64 / KB as f64)
} else {
format!("{} bytes", bytes)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_format_bytes() {
assert_eq!(format_bytes(0), "0 bytes");
assert_eq!(format_bytes(500), "500 bytes");
assert_eq!(format_bytes(1024), "1.0 KB");
assert_eq!(format_bytes(1536), "1.5 KB");
assert_eq!(format_bytes(1048576), "1.0 MB");
assert_eq!(format_bytes(1572864), "1.5 MB");
}
#[test]
fn test_system_info() {
let info = SystemInfo {
os: std::env::consts::OS.to_string(),
arch: std::env::consts::ARCH.to_string(),
hostname: "test".to_string(),
};
assert!(!info.os.is_empty());
assert!(!info.arch.is_empty());
}
#[test]
fn test_report_serialization() {
let report = DiagnosticReport {
client_timestamp: "2025-01-01T00:00:00Z".to_string(),
system_info: SystemInfo {
os: "linux".to_string(),
arch: "x86_64".to_string(),
hostname: "test".to_string(),
},
version_info: VersionInfo {
version: "0.1.0".to_string(),
git_commit: "abc123".to_string(),
git_dirty: false,
build_timestamp: "2025-01-01".to_string(),
},
logs: LogContents {
main_log: Some("test log".to_string()),
error_log: None,
main_log_size_bytes: 8,
error_log_size_bytes: 0,
main_log_original_size_bytes: 100,
error_log_original_size_bytes: 0,
},
config: None,
network_status: None,
network_status_error: None,
user_message: Some("Test message".to_string()),
};
let json = serde_json::to_string(&report).unwrap();
assert!(json.contains("linux"));
assert!(json.contains("test log"));
}
#[test]
fn test_extract_timestamp_plain() {
let line = "2025-12-26T17:28:28.636476Z INFO freenet: Starting";
assert_eq!(
extract_timestamp(line),
Some("2025-12-26T17:28:28.636476Z".to_string())
);
}
#[test]
fn test_extract_timestamp_with_ansi() {
let line = "\x1b[2m2025-12-26T17:28:28.636476Z\x1b[0m \x1b[32m INFO\x1b[0m freenet";
assert_eq!(
extract_timestamp(line),
Some("2025-12-26T17:28:28.636476Z".to_string())
);
}
#[test]
fn test_extract_timestamp_no_timestamp() {
let line = " at crates/core/src/bin/freenet.rs:136";
assert_eq!(extract_timestamp(line), None);
}
#[test]
fn test_extract_timestamp_adds_z_if_missing() {
let line = "2025-12-26T17:28:28.636476 INFO";
let result = extract_timestamp(line);
assert!(result.is_some());
assert!(result.unwrap().ends_with('Z'));
}
#[test]
fn test_parse_ws_port_from_config() {
let config = r#"
mode = "network"
[ws_api]
ws-api-port = 8080
"#;
assert_eq!(parse_ws_port_from_config(config), Some(8080));
let config = "ws_api_port = 9000";
assert_eq!(parse_ws_port_from_config(config), Some(9000));
let config = "mode = \"network\"";
assert_eq!(parse_ws_port_from_config(config), None);
}
#[test]
fn test_find_log_files_patterns() {
use std::fs;
use tempfile::TempDir;
let temp_dir = TempDir::new().unwrap();
let log_dir = temp_dir.path().to_path_buf();
fs::write(log_dir.join("freenet.log"), "legacy").unwrap();
fs::write(log_dir.join("freenet.2025-12-26.log"), "daily").unwrap();
fs::write(log_dir.join("freenet.2025-12-26-14.log"), "hourly").unwrap();
fs::write(log_dir.join("freenet.error.log"), "error legacy").unwrap();
fs::write(
log_dir.join("freenet.error.2025-12-26-14.log"),
"error hourly",
)
.unwrap();
fs::write(log_dir.join("other.log"), "unrelated").unwrap();
let main_files = find_log_files(&log_dir, "freenet");
let main_names: Vec<_> = main_files
.iter()
.filter_map(|p| p.file_name())
.filter_map(|n| n.to_str())
.collect();
assert!(
main_names.contains(&"freenet.log"),
"Should find legacy format"
);
assert!(
main_names.contains(&"freenet.2025-12-26.log"),
"Should find daily format"
);
assert!(
main_names.contains(&"freenet.2025-12-26-14.log"),
"Should find hourly format"
);
assert!(
!main_names.iter().any(|n| n.contains("error")),
"Should not match error logs with 'freenet' prefix"
);
assert!(
!main_names.contains(&"other.log"),
"Should not match unrelated files"
);
let error_files = find_log_files(&log_dir, "freenet.error");
let error_names: Vec<_> = error_files
.iter()
.filter_map(|p| p.file_name())
.filter_map(|n| n.to_str())
.collect();
assert!(
error_names.contains(&"freenet.error.log"),
"Should find error legacy format"
);
assert!(
error_names.contains(&"freenet.error.2025-12-26-14.log"),
"Should find error hourly format"
);
}
#[test]
fn test_read_log_file_no_timestamps_includes_all_content() {
use tempfile::TempDir;
let temp_dir = TempDir::new().unwrap();
let log_path = temp_dir.path().join("freenet.error.log");
let panic_content = "\
thread 'main' panicked at 'called `Result::unwrap()` on an `Err` value: AddrInUse', src/main.rs:42
stack backtrace:
0: std::panicking::begin_panic_handler
1: core::panicking::panic_fmt
2: core::result::unwrap_failed
3: freenet::main";
fs::write(&log_path, panic_content).unwrap();
let (content, original_size) = read_log_file(&log_path);
assert!(
content.is_some(),
"Files with no timestamps should still be included"
);
let content = content.unwrap();
assert!(
content.contains("panicked"),
"Panic message should be preserved"
);
assert!(
content.contains("stack backtrace"),
"Backtrace should be preserved"
);
assert_eq!(original_size, panic_content.len() as u64);
}
const TEST_TIMEOUT: StdDuration = StdDuration::from_millis(200);
#[tokio::test]
async fn test_query_with_fallback_connection_refused_reports_error() {
let port = 59111;
let result = query_with_fallback(port, 0, TEST_TIMEOUT).await;
let err = result.expect_err("no listener → must return Err, not silent None");
assert!(
err.contains("127.0.0.1") && err.contains("[::1]"),
"error should mention BOTH loopback hosts, got: {err}"
);
assert!(
err.contains(&port.to_string()),
"error should include the port number, got: {err}"
);
assert!(!err.is_empty());
}
#[tokio::test]
async fn test_query_with_fallback_timeout_reports_error() {
use tokio::net::TcpListener;
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let port = listener.local_addr().unwrap().port();
let _handle = tokio::spawn(async move {
loop {
let (_stream, _) = match listener.accept().await {
Ok(v) => v,
Err(_) => break,
};
std::mem::forget(_stream);
}
});
let result = query_with_fallback(port, 0, TEST_TIMEOUT).await;
let err = result.expect_err("handshake never completes → must return Err");
assert!(
err.contains("timed out") || err.contains("timeout"),
"error should identify timeout, got: {err}"
);
assert!(
err.contains(&port.to_string()),
"error should include the port number, got: {err}"
);
}
#[tokio::test]
async fn test_query_with_fallback_retries_on_timeout() {
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use tokio::net::TcpListener;
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let port = listener.local_addr().unwrap().port();
let accepts = Arc::new(AtomicUsize::new(0));
let accepts_clone = accepts.clone();
let _handle = tokio::spawn(async move {
loop {
let (_stream, _) = match listener.accept().await {
Ok(v) => v,
Err(_) => break,
};
accepts_clone.fetch_add(1, Ordering::SeqCst);
std::mem::forget(_stream);
}
});
let result = query_with_fallback(port, 1, TEST_TIMEOUT).await;
assert!(result.is_err());
tokio::time::sleep(StdDuration::from_millis(50)).await;
let total_accepts = accepts.load(Ordering::SeqCst);
assert_eq!(
total_accepts, 2,
"retry_attempts=1 must produce 2 v4 accepts (initial + 1 retry); got {total_accepts}"
);
let err = result.unwrap_err();
assert!(
err.contains("attempt 1/2") && err.contains("attempt 2/2"),
"error should identify both attempts, got: {err}"
);
}
#[test]
fn test_network_status_error_serialization() {
let base_report = || DiagnosticReport {
client_timestamp: "2026-04-17T00:00:00Z".to_string(),
system_info: SystemInfo {
os: "linux".into(),
arch: "x86_64".into(),
hostname: "test".into(),
},
version_info: VersionInfo {
version: "0.2.46".into(),
git_commit: "abc".into(),
git_dirty: false,
build_timestamp: "".into(),
},
logs: LogContents {
main_log: None,
error_log: None,
main_log_size_bytes: 0,
error_log_size_bytes: 0,
main_log_original_size_bytes: 0,
error_log_original_size_bytes: 0,
},
config: None,
network_status: None,
network_status_error: None,
user_message: None,
};
let report = base_report();
let json = serde_json::to_string(&report).unwrap();
assert!(
!json.contains("network_status_error"),
"None should be skipped; got {json}"
);
let mut report = base_report();
report.network_status_error = Some("timed out after 15s".to_string());
let json = serde_json::to_string(&report).unwrap();
assert!(
json.contains("network_status_error"),
"Some should appear in JSON; got {json}"
);
assert!(json.contains("timed out after 15s"));
let restored: DiagnosticReport = serde_json::from_str(&json).unwrap();
assert_eq!(
restored.network_status_error.as_deref(),
Some("timed out after 15s")
);
}
#[test]
fn test_read_log_file_old_timestamps_excluded() {
use tempfile::TempDir;
let temp_dir = TempDir::new().unwrap();
let log_path = temp_dir.path().join("freenet.log");
let old_content = "\
2020-01-01T00:00:00.000000Z INFO freenet: Old log entry 1
2020-01-01T00:00:01.000000Z INFO freenet: Old log entry 2";
fs::write(&log_path, old_content).unwrap();
let (content, original_size) = read_log_file(&log_path);
assert!(
content.is_none(),
"Old timestamped entries should be filtered out"
);
assert_eq!(original_size, old_content.len() as u64);
}
}