1use anyhow::{Context, Result};
10use serde::{Deserialize, Serialize};
11use std::fs::{self, File};
12use std::io::{Read, Write};
13use std::path::{Path, PathBuf};
14use std::time::{SystemTime, UNIX_EPOCH};
15
16#[derive(Debug, Serialize, Deserialize)]
18pub struct BackupInfo {
19 pub created_at: u64,
20 pub node_version: String,
21 pub data_dir: PathBuf,
22 pub output_path: PathBuf,
23 pub size_bytes: u64,
24 pub checksum: String,
25}
26
27#[derive(Debug, Serialize, Deserialize)]
29pub struct DiagnosticsInfo {
30 pub collected_at: u64,
31 pub node_version: String,
32 pub output_path: PathBuf,
33 pub components: Vec<String>,
34}
35
36pub fn keygen() -> Result<(String, String)> {
40 use commonware_cryptography::{PrivateKeyExt, Signer};
41
42 let seed: u64 = rand::random();
44 let private_key = commonware_cryptography::ed25519::PrivateKey::from_seed(seed);
45 let public_key = private_key.public_key();
46
47 let private_hex = hex::encode(private_key.as_ref());
48 let public_hex = hex::encode(public_key.as_ref());
49
50 Ok((private_hex, public_hex))
51}
52
53pub fn keygen_to_file(output_path: &Path) -> Result<String> {
55 let (private_key, public_key) = keygen()?;
56
57 let mut file = File::create(output_path).context("Failed to create key file")?;
58
59 writeln!(file, "{}", private_key)?;
61 writeln!(file, "{}", public_key)?;
62
63 Ok(public_key)
64}
65
66pub fn create_backup(data_dir: &Path, output_path: &Path) -> Result<BackupInfo> {
70 use sha2::{Digest, Sha256};
71
72 if !data_dir.exists() {
73 anyhow::bail!("Data directory does not exist: {}", data_dir.display());
74 }
75
76 if let Some(parent) = output_path.parent() {
78 fs::create_dir_all(parent)?;
79 }
80
81 let tar_file = File::create(output_path).context("Failed to create backup file")?;
83 let encoder = flate2::write::GzEncoder::new(tar_file, flate2::Compression::default());
84 let mut tar = tar::Builder::new(encoder);
85
86 tar.append_dir_all(".", data_dir)
88 .context("Failed to add data directory to backup")?;
89
90 tar.finish().context("Failed to finalize backup")?;
91 drop(tar);
92
93 let mut file = File::open(output_path)?;
95 let mut hasher = Sha256::new();
96 let mut buffer = [0u8; 8192];
97
98 loop {
99 let bytes_read = file.read(&mut buffer)?;
100 if bytes_read == 0 {
101 break;
102 }
103 hasher.update(&buffer[..bytes_read]);
104 }
105
106 let checksum = hex::encode(hasher.finalize());
107 let metadata = fs::metadata(output_path)?;
108
109 let timestamp = SystemTime::now()
110 .duration_since(UNIX_EPOCH)
111 .map(|d| d.as_secs())
112 .unwrap_or(0);
113
114 Ok(BackupInfo {
115 created_at: timestamp,
116 node_version: env!("CARGO_PKG_VERSION").to_string(),
117 data_dir: data_dir.to_path_buf(),
118 output_path: output_path.to_path_buf(),
119 size_bytes: metadata.len(),
120 checksum,
121 })
122}
123
124pub fn verify_backup(backup_path: &Path) -> Result<bool> {
126 if !backup_path.exists() {
127 anyhow::bail!("Backup file does not exist: {}", backup_path.display());
128 }
129
130 let file = File::open(backup_path)?;
132 let decoder = flate2::read::GzDecoder::new(file);
133 let mut archive = tar::Archive::new(decoder);
134
135 let mut entry_count = 0;
137 for entry in archive.entries()? {
138 let entry = entry.context("Failed to read archive entry")?;
139 let _path = entry.path()?;
140 entry_count += 1;
141 }
142
143 tracing::info!(entries = entry_count, "Backup verification complete");
144
145 Ok(true)
146}
147
148pub fn restore_backup(backup_path: &Path, target_dir: &Path, verify: bool) -> Result<()> {
150 if !backup_path.exists() {
151 anyhow::bail!("Backup file does not exist: {}", backup_path.display());
152 }
153
154 if verify {
156 verify_backup(backup_path)?;
157 }
158
159 fs::create_dir_all(target_dir)?;
161
162 let file = File::open(backup_path)?;
164 let decoder = flate2::read::GzDecoder::new(file);
165 let mut archive = tar::Archive::new(decoder);
166
167 archive
168 .unpack(target_dir)
169 .context("Failed to extract backup")?;
170
171 tracing::info!(
172 backup = %backup_path.display(),
173 target = %target_dir.display(),
174 "Backup restored successfully"
175 );
176
177 Ok(())
178}
179
180pub fn collect_diagnostics(
182 data_dir: &Path,
183 output_path: &Path,
184 include_logs: bool,
185 include_metrics: bool,
186) -> Result<DiagnosticsInfo> {
187 use std::process::Command;
188
189 let temp_dir = tempfile::tempdir()?;
191 let diag_dir = temp_dir.path();
192
193 let mut components = Vec::new();
194
195 let sys_info_path = diag_dir.join("system-info.txt");
197 let mut sys_file = File::create(&sys_info_path)?;
198
199 writeln!(sys_file, "=== Guts Node Diagnostics ===")?;
200 writeln!(sys_file, "Timestamp: {:?}", SystemTime::now())?;
201 writeln!(sys_file, "Node Version: {}", env!("CARGO_PKG_VERSION"))?;
202 writeln!(sys_file, "Data Directory: {}", data_dir.display())?;
203 writeln!(sys_file)?;
204
205 writeln!(sys_file, "=== System Information ===")?;
207 #[cfg(unix)]
208 {
209 if let Ok(output) = Command::new("uname").arg("-a").output() {
210 writeln!(sys_file, "OS: {}", String::from_utf8_lossy(&output.stdout))?;
211 }
212 if let Ok(output) = Command::new("free").arg("-h").output() {
213 writeln!(
214 sys_file,
215 "Memory:\n{}",
216 String::from_utf8_lossy(&output.stdout)
217 )?;
218 }
219 if let Ok(output) = Command::new("df").arg("-h").output() {
220 writeln!(
221 sys_file,
222 "Disk:\n{}",
223 String::from_utf8_lossy(&output.stdout)
224 )?;
225 }
226 }
227 components.push("system-info".to_string());
228
229 let config_paths = ["config.yaml", "config.yml", "config.json"];
231 for config_name in &config_paths {
232 let config_path = data_dir.parent().unwrap_or(data_dir).join(config_name);
233 if config_path.exists() {
234 let dest = diag_dir.join(config_name);
235 fs::copy(&config_path, &dest)?;
236 components.push(format!("config:{}", config_name));
237 }
238 }
239
240 let data_info_path = diag_dir.join("data-dir-info.txt");
242 let mut data_file = File::create(&data_info_path)?;
243 writeln!(data_file, "=== Data Directory Structure ===")?;
244
245 fn list_dir(dir: &Path, prefix: &str, file: &mut File, depth: usize) -> Result<()> {
246 if depth > 3 {
247 return Ok(());
248 }
249 if let Ok(entries) = fs::read_dir(dir) {
250 for entry in entries.flatten() {
251 let path = entry.path();
252 let name = path.file_name().unwrap_or_default().to_string_lossy();
253
254 if path.is_dir() {
255 writeln!(file, "{}{}/", prefix, name)?;
256 list_dir(&path, &format!("{} ", prefix), file, depth + 1)?;
257 } else {
258 let size = fs::metadata(&path).map(|m| m.len()).unwrap_or(0);
259 writeln!(file, "{}{} ({} bytes)", prefix, name, size)?;
260 }
261 }
262 }
263 Ok(())
264 }
265
266 if data_dir.exists() {
267 list_dir(data_dir, "", &mut data_file, 0)?;
268 }
269 components.push("data-dir-info".to_string());
270
271 if include_logs {
273 #[cfg(unix)]
275 {
276 if let Ok(output) = Command::new("journalctl")
277 .args(["-u", "guts-node", "--since", "1 hour ago", "-n", "1000"])
278 .output()
279 {
280 let log_path = diag_dir.join("journal.log");
281 fs::write(&log_path, &output.stdout)?;
282 components.push("journal-logs".to_string());
283 }
284 }
285 }
286
287 if include_metrics {
289 let metrics_content = "# Metrics collection placeholder\n# Connect to http://localhost:9090/metrics to fetch live metrics\n";
291 let metrics_path = diag_dir.join("metrics.txt");
292 fs::write(&metrics_path, metrics_content)?;
293 components.push("metrics".to_string());
294 }
295
296 if let Some(parent) = output_path.parent() {
298 fs::create_dir_all(parent)?;
299 }
300
301 let tar_file = File::create(output_path)?;
302 let encoder = flate2::write::GzEncoder::new(tar_file, flate2::Compression::default());
303 let mut tar = tar::Builder::new(encoder);
304
305 tar.append_dir_all("diagnostics", diag_dir)?;
306 tar.finish()?;
307
308 let timestamp = SystemTime::now()
309 .duration_since(UNIX_EPOCH)
310 .map(|d| d.as_secs())
311 .unwrap_or(0);
312
313 Ok(DiagnosticsInfo {
314 collected_at: timestamp,
315 node_version: env!("CARGO_PKG_VERSION").to_string(),
316 output_path: output_path.to_path_buf(),
317 components,
318 })
319}
320
321#[derive(Debug, Serialize, Deserialize)]
323pub struct NodeStatus {
324 pub version: String,
325 pub uptime_secs: u64,
326 pub data_dir: PathBuf,
327 pub storage: StorageStatus,
328 pub consensus: Option<ConsensusStatus>,
329 pub p2p: Option<P2pStatus>,
330}
331
332#[derive(Debug, Serialize, Deserialize)]
333pub struct StorageStatus {
334 pub total_bytes: u64,
335 pub available_bytes: u64,
336 pub usage_percent: f64,
337}
338
339#[derive(Debug, Serialize, Deserialize)]
340pub struct ConsensusStatus {
341 pub enabled: bool,
342 pub block_height: u64,
343 pub synced: bool,
344}
345
346#[derive(Debug, Serialize, Deserialize)]
347pub struct P2pStatus {
348 pub peer_count: usize,
349 pub listening_addr: String,
350}
351
352pub fn get_status(data_dir: &Path) -> Result<NodeStatus> {
354 let storage = if data_dir.exists() {
356 let dir_size = calculate_dir_size(data_dir)?;
357 StorageStatus {
358 total_bytes: dir_size,
359 available_bytes: 0, usage_percent: 0.0,
361 }
362 } else {
363 StorageStatus {
364 total_bytes: 0,
365 available_bytes: 0,
366 usage_percent: 0.0,
367 }
368 };
369
370 Ok(NodeStatus {
371 version: env!("CARGO_PKG_VERSION").to_string(),
372 uptime_secs: 0, data_dir: data_dir.to_path_buf(),
374 storage,
375 consensus: None,
376 p2p: None,
377 })
378}
379
380fn calculate_dir_size(dir: &Path) -> Result<u64> {
382 let mut total = 0u64;
383
384 if dir.is_dir() {
385 for entry in fs::read_dir(dir)? {
386 let entry = entry?;
387 let path = entry.path();
388
389 if path.is_dir() {
390 total += calculate_dir_size(&path)?;
391 } else {
392 total += fs::metadata(&path)?.len();
393 }
394 }
395 }
396
397 Ok(total)
398}
399
400pub fn verify_data(data_dir: &Path, full: bool) -> Result<VerifyResult> {
402 if !data_dir.exists() {
403 anyhow::bail!("Data directory does not exist: {}", data_dir.display());
404 }
405
406 let errors: Vec<String> = Vec::new();
407 let mut warnings = Vec::new();
408 let mut objects_checked = 0u64;
409
410 let required_dirs = ["objects", "refs"];
412 for dir_name in &required_dirs {
413 let dir_path = data_dir.join(dir_name);
414 if !dir_path.exists() {
415 warnings.push(format!("Missing directory: {}", dir_name));
416 }
417 }
418
419 if full {
421 let objects_dir = data_dir.join("objects");
422 if objects_dir.exists() {
423 for entry in walkdir::WalkDir::new(&objects_dir)
424 .into_iter()
425 .filter_map(|e| e.ok())
426 {
427 if entry.file_type().is_file() {
428 objects_checked += 1;
429 }
431 }
432 }
433 }
434
435 Ok(VerifyResult {
436 valid: errors.is_empty(),
437 objects_checked,
438 errors,
439 warnings,
440 })
441}
442
443#[derive(Debug, Serialize, Deserialize)]
444pub struct VerifyResult {
445 pub valid: bool,
446 pub objects_checked: u64,
447 pub errors: Vec<String>,
448 pub warnings: Vec<String>,
449}
450
451#[cfg(test)]
452mod tests {
453 use super::*;
454 use tempfile::tempdir;
455
456 #[test]
457 fn test_keygen() {
458 let (private, public) = keygen().unwrap();
459
460 assert!(!private.is_empty());
462 assert!(!public.is_empty());
463
464 assert_ne!(private, public);
466 }
467
468 #[test]
469 fn test_keygen_to_file() {
470 let temp = tempdir().unwrap();
471 let key_path = temp.path().join("node.key");
472
473 let public_key = keygen_to_file(&key_path).unwrap();
474
475 assert!(key_path.exists());
476 assert!(!public_key.is_empty());
477
478 let contents = fs::read_to_string(&key_path).unwrap();
480 let lines: Vec<&str> = contents.lines().collect();
481 assert_eq!(lines.len(), 2);
482 assert!(!lines[0].is_empty()); assert_eq!(lines[1], public_key); }
485}