guts_node/
operator.rs

1//! Operator commands for Guts node administration.
2//!
3//! This module provides commands for:
4//! - Key generation and management
5//! - Backup and restore operations
6//! - Diagnostics collection
7//! - Storage maintenance
8
9use anyhow::{Context, Result};
10use serde::{Deserialize, Serialize};
11use std::fs::{self, File};
12use std::io::{Read, Write};
13use std::path::{Path, PathBuf};
14use std::time::{SystemTime, UNIX_EPOCH};
15
16/// Result of a backup operation
17#[derive(Debug, Serialize, Deserialize)]
18pub struct BackupInfo {
19    pub created_at: u64,
20    pub node_version: String,
21    pub data_dir: PathBuf,
22    pub output_path: PathBuf,
23    pub size_bytes: u64,
24    pub checksum: String,
25}
26
27/// Result of diagnostics collection
28#[derive(Debug, Serialize, Deserialize)]
29pub struct DiagnosticsInfo {
30    pub collected_at: u64,
31    pub node_version: String,
32    pub output_path: PathBuf,
33    pub components: Vec<String>,
34}
35
36/// Generate a new Ed25519 keypair for node identity.
37///
38/// Returns the private and public keys as hex-encoded strings.
39pub fn keygen() -> Result<(String, String)> {
40    use commonware_cryptography::{PrivateKeyExt, Signer};
41
42    // Generate a random seed
43    let seed: u64 = rand::random();
44    let private_key = commonware_cryptography::ed25519::PrivateKey::from_seed(seed);
45    let public_key = private_key.public_key();
46
47    let private_hex = hex::encode(private_key.as_ref());
48    let public_hex = hex::encode(public_key.as_ref());
49
50    Ok((private_hex, public_hex))
51}
52
53/// Generate a keypair and write to file.
54pub fn keygen_to_file(output_path: &Path) -> Result<String> {
55    let (private_key, public_key) = keygen()?;
56
57    let mut file = File::create(output_path).context("Failed to create key file")?;
58
59    // Write private key on first line, public key on second
60    writeln!(file, "{}", private_key)?;
61    writeln!(file, "{}", public_key)?;
62
63    Ok(public_key)
64}
65
66/// Create a backup of the node's data directory.
67///
68/// This creates a compressed tarball of the data directory.
69pub fn create_backup(data_dir: &Path, output_path: &Path) -> Result<BackupInfo> {
70    use sha2::{Digest, Sha256};
71
72    if !data_dir.exists() {
73        anyhow::bail!("Data directory does not exist: {}", data_dir.display());
74    }
75
76    // Create parent directories if needed
77    if let Some(parent) = output_path.parent() {
78        fs::create_dir_all(parent)?;
79    }
80
81    // Create the tarball
82    let tar_file = File::create(output_path).context("Failed to create backup file")?;
83    let encoder = flate2::write::GzEncoder::new(tar_file, flate2::Compression::default());
84    let mut tar = tar::Builder::new(encoder);
85
86    // Add the data directory to the tarball
87    tar.append_dir_all(".", data_dir)
88        .context("Failed to add data directory to backup")?;
89
90    tar.finish().context("Failed to finalize backup")?;
91    drop(tar);
92
93    // Calculate checksum
94    let mut file = File::open(output_path)?;
95    let mut hasher = Sha256::new();
96    let mut buffer = [0u8; 8192];
97
98    loop {
99        let bytes_read = file.read(&mut buffer)?;
100        if bytes_read == 0 {
101            break;
102        }
103        hasher.update(&buffer[..bytes_read]);
104    }
105
106    let checksum = hex::encode(hasher.finalize());
107    let metadata = fs::metadata(output_path)?;
108
109    let timestamp = SystemTime::now()
110        .duration_since(UNIX_EPOCH)
111        .map(|d| d.as_secs())
112        .unwrap_or(0);
113
114    Ok(BackupInfo {
115        created_at: timestamp,
116        node_version: env!("CARGO_PKG_VERSION").to_string(),
117        data_dir: data_dir.to_path_buf(),
118        output_path: output_path.to_path_buf(),
119        size_bytes: metadata.len(),
120        checksum,
121    })
122}
123
124/// Verify backup integrity.
125pub fn verify_backup(backup_path: &Path) -> Result<bool> {
126    if !backup_path.exists() {
127        anyhow::bail!("Backup file does not exist: {}", backup_path.display());
128    }
129
130    // Try to open and read the archive
131    let file = File::open(backup_path)?;
132    let decoder = flate2::read::GzDecoder::new(file);
133    let mut archive = tar::Archive::new(decoder);
134
135    // Verify all entries can be read
136    let mut entry_count = 0;
137    for entry in archive.entries()? {
138        let entry = entry.context("Failed to read archive entry")?;
139        let _path = entry.path()?;
140        entry_count += 1;
141    }
142
143    tracing::info!(entries = entry_count, "Backup verification complete");
144
145    Ok(true)
146}
147
148/// Restore data from a backup.
149pub fn restore_backup(backup_path: &Path, target_dir: &Path, verify: bool) -> Result<()> {
150    if !backup_path.exists() {
151        anyhow::bail!("Backup file does not exist: {}", backup_path.display());
152    }
153
154    // Verify backup first if requested
155    if verify {
156        verify_backup(backup_path)?;
157    }
158
159    // Create target directory
160    fs::create_dir_all(target_dir)?;
161
162    // Extract the archive
163    let file = File::open(backup_path)?;
164    let decoder = flate2::read::GzDecoder::new(file);
165    let mut archive = tar::Archive::new(decoder);
166
167    archive
168        .unpack(target_dir)
169        .context("Failed to extract backup")?;
170
171    tracing::info!(
172        backup = %backup_path.display(),
173        target = %target_dir.display(),
174        "Backup restored successfully"
175    );
176
177    Ok(())
178}
179
180/// Collect node diagnostics for troubleshooting.
181pub fn collect_diagnostics(
182    data_dir: &Path,
183    output_path: &Path,
184    include_logs: bool,
185    include_metrics: bool,
186) -> Result<DiagnosticsInfo> {
187    use std::process::Command;
188
189    // Create temp directory for diagnostics
190    let temp_dir = tempfile::tempdir()?;
191    let diag_dir = temp_dir.path();
192
193    let mut components = Vec::new();
194
195    // Collect system information
196    let sys_info_path = diag_dir.join("system-info.txt");
197    let mut sys_file = File::create(&sys_info_path)?;
198
199    writeln!(sys_file, "=== Guts Node Diagnostics ===")?;
200    writeln!(sys_file, "Timestamp: {:?}", SystemTime::now())?;
201    writeln!(sys_file, "Node Version: {}", env!("CARGO_PKG_VERSION"))?;
202    writeln!(sys_file, "Data Directory: {}", data_dir.display())?;
203    writeln!(sys_file)?;
204
205    // System info
206    writeln!(sys_file, "=== System Information ===")?;
207    #[cfg(unix)]
208    {
209        if let Ok(output) = Command::new("uname").arg("-a").output() {
210            writeln!(sys_file, "OS: {}", String::from_utf8_lossy(&output.stdout))?;
211        }
212        if let Ok(output) = Command::new("free").arg("-h").output() {
213            writeln!(
214                sys_file,
215                "Memory:\n{}",
216                String::from_utf8_lossy(&output.stdout)
217            )?;
218        }
219        if let Ok(output) = Command::new("df").arg("-h").output() {
220            writeln!(
221                sys_file,
222                "Disk:\n{}",
223                String::from_utf8_lossy(&output.stdout)
224            )?;
225        }
226    }
227    components.push("system-info".to_string());
228
229    // Collect configuration (if exists)
230    let config_paths = ["config.yaml", "config.yml", "config.json"];
231    for config_name in &config_paths {
232        let config_path = data_dir.parent().unwrap_or(data_dir).join(config_name);
233        if config_path.exists() {
234            let dest = diag_dir.join(config_name);
235            fs::copy(&config_path, &dest)?;
236            components.push(format!("config:{}", config_name));
237        }
238    }
239
240    // Collect data directory info
241    let data_info_path = diag_dir.join("data-dir-info.txt");
242    let mut data_file = File::create(&data_info_path)?;
243    writeln!(data_file, "=== Data Directory Structure ===")?;
244
245    fn list_dir(dir: &Path, prefix: &str, file: &mut File, depth: usize) -> Result<()> {
246        if depth > 3 {
247            return Ok(());
248        }
249        if let Ok(entries) = fs::read_dir(dir) {
250            for entry in entries.flatten() {
251                let path = entry.path();
252                let name = path.file_name().unwrap_or_default().to_string_lossy();
253
254                if path.is_dir() {
255                    writeln!(file, "{}{}/", prefix, name)?;
256                    list_dir(&path, &format!("{}  ", prefix), file, depth + 1)?;
257                } else {
258                    let size = fs::metadata(&path).map(|m| m.len()).unwrap_or(0);
259                    writeln!(file, "{}{} ({} bytes)", prefix, name, size)?;
260                }
261            }
262        }
263        Ok(())
264    }
265
266    if data_dir.exists() {
267        list_dir(data_dir, "", &mut data_file, 0)?;
268    }
269    components.push("data-dir-info".to_string());
270
271    // Collect logs if requested
272    if include_logs {
273        // Try to get recent journal logs
274        #[cfg(unix)]
275        {
276            if let Ok(output) = Command::new("journalctl")
277                .args(["-u", "guts-node", "--since", "1 hour ago", "-n", "1000"])
278                .output()
279            {
280                let log_path = diag_dir.join("journal.log");
281                fs::write(&log_path, &output.stdout)?;
282                components.push("journal-logs".to_string());
283            }
284        }
285    }
286
287    // Collect metrics if requested
288    if include_metrics {
289        // Try to fetch metrics from the local endpoint
290        let metrics_content = "# Metrics collection placeholder\n# Connect to http://localhost:9090/metrics to fetch live metrics\n";
291        let metrics_path = diag_dir.join("metrics.txt");
292        fs::write(&metrics_path, metrics_content)?;
293        components.push("metrics".to_string());
294    }
295
296    // Create the output tarball
297    if let Some(parent) = output_path.parent() {
298        fs::create_dir_all(parent)?;
299    }
300
301    let tar_file = File::create(output_path)?;
302    let encoder = flate2::write::GzEncoder::new(tar_file, flate2::Compression::default());
303    let mut tar = tar::Builder::new(encoder);
304
305    tar.append_dir_all("diagnostics", diag_dir)?;
306    tar.finish()?;
307
308    let timestamp = SystemTime::now()
309        .duration_since(UNIX_EPOCH)
310        .map(|d| d.as_secs())
311        .unwrap_or(0);
312
313    Ok(DiagnosticsInfo {
314        collected_at: timestamp,
315        node_version: env!("CARGO_PKG_VERSION").to_string(),
316        output_path: output_path.to_path_buf(),
317        components,
318    })
319}
320
321/// Node status information
322#[derive(Debug, Serialize, Deserialize)]
323pub struct NodeStatus {
324    pub version: String,
325    pub uptime_secs: u64,
326    pub data_dir: PathBuf,
327    pub storage: StorageStatus,
328    pub consensus: Option<ConsensusStatus>,
329    pub p2p: Option<P2pStatus>,
330}
331
332#[derive(Debug, Serialize, Deserialize)]
333pub struct StorageStatus {
334    pub total_bytes: u64,
335    pub available_bytes: u64,
336    pub usage_percent: f64,
337}
338
339#[derive(Debug, Serialize, Deserialize)]
340pub struct ConsensusStatus {
341    pub enabled: bool,
342    pub block_height: u64,
343    pub synced: bool,
344}
345
346#[derive(Debug, Serialize, Deserialize)]
347pub struct P2pStatus {
348    pub peer_count: usize,
349    pub listening_addr: String,
350}
351
352/// Get current node status (offline mode - reads from disk)
353pub fn get_status(data_dir: &Path) -> Result<NodeStatus> {
354    // Check storage usage
355    let storage = if data_dir.exists() {
356        let dir_size = calculate_dir_size(data_dir)?;
357        StorageStatus {
358            total_bytes: dir_size,
359            available_bytes: 0, // Would need statvfs for accurate filesystem info
360            usage_percent: 0.0,
361        }
362    } else {
363        StorageStatus {
364            total_bytes: 0,
365            available_bytes: 0,
366            usage_percent: 0.0,
367        }
368    };
369
370    Ok(NodeStatus {
371        version: env!("CARGO_PKG_VERSION").to_string(),
372        uptime_secs: 0, // Can't determine in offline mode
373        data_dir: data_dir.to_path_buf(),
374        storage,
375        consensus: None,
376        p2p: None,
377    })
378}
379
380/// Calculate total size of a directory recursively
381fn calculate_dir_size(dir: &Path) -> Result<u64> {
382    let mut total = 0u64;
383
384    if dir.is_dir() {
385        for entry in fs::read_dir(dir)? {
386            let entry = entry?;
387            let path = entry.path();
388
389            if path.is_dir() {
390                total += calculate_dir_size(&path)?;
391            } else {
392                total += fs::metadata(&path)?.len();
393            }
394        }
395    }
396
397    Ok(total)
398}
399
400/// Verify data integrity
401pub fn verify_data(data_dir: &Path, full: bool) -> Result<VerifyResult> {
402    if !data_dir.exists() {
403        anyhow::bail!("Data directory does not exist: {}", data_dir.display());
404    }
405
406    let errors: Vec<String> = Vec::new();
407    let mut warnings = Vec::new();
408    let mut objects_checked = 0u64;
409
410    // Check for required subdirectories
411    let required_dirs = ["objects", "refs"];
412    for dir_name in &required_dirs {
413        let dir_path = data_dir.join(dir_name);
414        if !dir_path.exists() {
415            warnings.push(format!("Missing directory: {}", dir_name));
416        }
417    }
418
419    // If full verification, check all objects
420    if full {
421        let objects_dir = data_dir.join("objects");
422        if objects_dir.exists() {
423            for entry in walkdir::WalkDir::new(&objects_dir)
424                .into_iter()
425                .filter_map(|e| e.ok())
426            {
427                if entry.file_type().is_file() {
428                    objects_checked += 1;
429                    // In a real implementation, would verify object checksums
430                }
431            }
432        }
433    }
434
435    Ok(VerifyResult {
436        valid: errors.is_empty(),
437        objects_checked,
438        errors,
439        warnings,
440    })
441}
442
443#[derive(Debug, Serialize, Deserialize)]
444pub struct VerifyResult {
445    pub valid: bool,
446    pub objects_checked: u64,
447    pub errors: Vec<String>,
448    pub warnings: Vec<String>,
449}
450
451#[cfg(test)]
452mod tests {
453    use super::*;
454    use tempfile::tempdir;
455
456    #[test]
457    fn test_keygen() {
458        let (private, public) = keygen().unwrap();
459
460        // Keys should be hex-encoded
461        assert!(!private.is_empty());
462        assert!(!public.is_empty());
463
464        // Private key should be different from public
465        assert_ne!(private, public);
466    }
467
468    #[test]
469    fn test_keygen_to_file() {
470        let temp = tempdir().unwrap();
471        let key_path = temp.path().join("node.key");
472
473        let public_key = keygen_to_file(&key_path).unwrap();
474
475        assert!(key_path.exists());
476        assert!(!public_key.is_empty());
477
478        // Read and verify file contents
479        let contents = fs::read_to_string(&key_path).unwrap();
480        let lines: Vec<&str> = contents.lines().collect();
481        assert_eq!(lines.len(), 2);
482        assert!(!lines[0].is_empty()); // Private key
483        assert_eq!(lines[1], public_key); // Public key
484    }
485}