void-cli 0.0.3

CLI for void — anonymous encrypted source control
//! Daemon integration for the CLI.
//!
//! Two modes:
//! 1. **Persistent**: `void daemon start` — runs the node as a foreground process
//!    with logging. Other CLI commands connect to it via the control protocol.
//! 2. **Ephemeral**: When no persistent daemon is running, push/pull start a
//!    temporary in-process node for the duration of the command.

use std::collections::HashMap;
use std::sync::{Arc, Mutex};

use void_core::store::{NetworkRemoteStore, RemoteStore};
use void_daemon::bitswap::behaviour::BitswapStore;
use void_daemon::control::DaemonInfo;
use void_daemon::{
    DaemonClient, DaemonClientTransport, VoidNode, VoidNodeConfig, VoidNodeTransport,
};

// ---------------------------------------------------------------------------
// CliBlockStore — bridges FsStore to BitswapStore for the daemon
// ---------------------------------------------------------------------------

#[derive(Clone)]
pub struct CliBlockStore {
    objects_dir: std::path::PathBuf,
    cache: Arc<Mutex<HashMap<Vec<u8>, Vec<u8>>>>,
}

impl CliBlockStore {
    pub fn new(void_dir: &std::path::Path) -> Self {
        Self {
            objects_dir: void_dir.join("objects"),
            cache: Arc::new(Mutex::new(HashMap::new())),
        }
    }

    fn object_path(&self, cid: &::cid::Cid) -> std::path::PathBuf {
        void_core::store::FsStore::object_path(&self.objects_dir, &cid.to_string())
    }
}

impl BitswapStore for CliBlockStore {
    fn contains(&self, cid: &::cid::Cid) -> bool {
        if self.cache.lock().unwrap().contains_key(&cid.to_bytes()) {
            return true;
        }
        self.object_path(cid).exists()
    }

    fn get(&self, cid: &::cid::Cid) -> Option<Vec<u8>> {
        if let Some(data) = self.cache.lock().unwrap().get(&cid.to_bytes()) {
            return Some(data.clone());
        }
        std::fs::read(self.object_path(cid)).ok()
    }

    fn insert(&self, cid: ::cid::Cid, data: Vec<u8>) {
        let path = self.object_path(&cid);
        if let Some(parent) = path.parent() {
            let _ = std::fs::create_dir_all(parent);
        }
        let _ = std::fs::write(&path, &data);
        self.cache.lock().unwrap().insert(cid.to_bytes(), data);
    }
}

// ---------------------------------------------------------------------------
// Start ephemeral daemon (for push/pull when no persistent daemon running)
// ---------------------------------------------------------------------------

/// Start an ephemeral in-process daemon. Returns a RemoteStore + Runtime.
/// The runtime must stay alive for the duration of the operation.
pub fn start_ephemeral(
    void_dir: &std::path::Path,
) -> Result<(Arc<dyn RemoteStore>, tokio::runtime::Runtime), String> {
    let rt = tokio::runtime::Builder::new_multi_thread()
        .enable_all()
        .build()
        .map_err(|e| format!("failed to create tokio runtime: {e}"))?;

    let store = CliBlockStore::new(void_dir);
    let config = VoidNodeConfig {
        listen: vec!["/ip4/0.0.0.0/tcp/0".parse().unwrap()],
        ..VoidNodeConfig::default()
    };

    let node = rt
        .block_on(VoidNode::start(store, config))
        .map_err(|e| format!("failed to start daemon: {e}"))?;

    let transport = Arc::new(VoidNodeTransport(node));
    let remote = NetworkRemoteStore::new(transport, rt.handle().clone());

    Ok((Arc::new(remote), rt))
}

// ---------------------------------------------------------------------------
// Connect to running persistent daemon
// ---------------------------------------------------------------------------

/// Try to connect to a running persistent daemon.
/// Returns None if no daemon is running.
pub fn connect_to_daemon(
    rt: &tokio::runtime::Runtime,
) -> Option<Arc<dyn RemoteStore>> {
    let info_path = DaemonInfo::default_path()?;
    let info = match DaemonInfo::load(&info_path) {
        Ok(info) => info,
        Err(e) => {
            eprintln!("daemon: failed to read daemon.json: {e}");
            return None;
        }
    };

    if !info.is_alive() {
        eprintln!("daemon: pid {} is not running, removing stale lock", info.pid);
        DaemonInfo::remove(&info_path);
        return None;
    }

    eprintln!("daemon: found running daemon pid={} peer={}", info.pid, info.peer_id);
    eprintln!("daemon: connecting to {}...", info.listen_addrs.first().unwrap_or(&"?".into()));

    match rt.block_on(DaemonClient::connect_from_info(&info)) {
        Ok(client) => {
            eprintln!("daemon: connected");
            let transport = Arc::new(DaemonClientTransport(client));
            let remote = NetworkRemoteStore::new(transport, rt.handle().clone());
            Some(Arc::new(remote))
        }
        Err(e) => {
            eprintln!("daemon: connection failed: {e}");
            None
        }
    }
}

/// Auto-detect: connect to running daemon, or start ephemeral.
pub fn start_daemon(
    void_dir: &std::path::Path,
) -> Result<(Arc<dyn RemoteStore>, tokio::runtime::Runtime), String> {
    let rt = tokio::runtime::Builder::new_multi_thread()
        .enable_all()
        .build()
        .map_err(|e| format!("failed to create tokio runtime: {e}"))?;

    // Try connecting to a running daemon first.
    if let Some(remote) = connect_to_daemon(&rt) {
        eprintln!("Connected to running daemon");
        return Ok((remote, rt));
    }

    // No daemon running — start ephemeral.
    eprintln!("No running daemon found, starting ephemeral node...");
    let store = CliBlockStore::new(void_dir);
    let config = VoidNodeConfig {
        listen: vec!["/ip4/0.0.0.0/tcp/0".parse().unwrap()],
        ..VoidNodeConfig::default()
    };

    let node = rt
        .block_on(VoidNode::start(store, config))
        .map_err(|e| format!("failed to start daemon: {e}"))?;

    let transport = Arc::new(VoidNodeTransport(node));
    let remote = NetworkRemoteStore::new(transport, rt.handle().clone());

    Ok((Arc::new(remote), rt))
}

// BlockTransport adapters (VoidNodeTransport, DaemonClientTransport)
// live in void_daemon::transport — imported above.