render-session-mcp 0.2.0

rmcp server (stdio) for render-session: viewer.lease/list/release, report.write, draft.write tools.
use std::{path::PathBuf, sync::Arc};

use anyhow::Result;
use rmcp::{
    handler::server::wrapper::Parameters,
    model::{CallToolResult, Content},
    schemars, tool, tool_handler, tool_router,
    transport::stdio,
    ServerHandler, ServiceExt,
};
use tokio::sync::Mutex;
use tracing_subscriber::EnvFilter;

use render_session_core::{lease::LeaseStore, writer};

// ---------------------------------------------------------------------------
// Tool parameter structs
// ---------------------------------------------------------------------------

/// Parameters for viewer.lease tool.
#[derive(serde::Deserialize, schemars::JsonSchema)]
struct LeaseParams {
    /// TCP port to lease (default: 8000, retries +1 up to 5 times)
    port: Option<u16>,
    /// Project directory containing render-site/
    dir: String,
    /// Phase 4 (c): viewer-internal watcher tick (seconds). When set, the
    /// spawned viewer runs `auto_capture_once` every <tick> seconds in the
    /// background. Omit (None) to disable the watcher.
    #[serde(default)]
    tick: Option<u64>,
}

/// Parameters for viewer.release tool.
#[derive(serde::Deserialize, schemars::JsonSchema)]
struct ReleaseParams {
    /// TCP port to release
    port: u16,
}

/// Parameters for report.write tool.
#[derive(serde::Deserialize, schemars::JsonSchema)]
struct WriteReportParams {
    /// Project directory containing render-site/
    dir: String,
    /// Report title (used for slug + filename)
    title: String,
    /// Report body in Markdown
    body: String,
}

/// Parameters for draft.write tool.
#[derive(serde::Deserialize, schemars::JsonSchema)]
struct WriteDraftParams {
    /// Project directory containing render-site/
    dir: String,
    /// Issue ID used as the filename base (ASCII alphanumeric, dash, underscore only)
    issue_id: String,
    /// Draft body in Markdown
    body: String,
}

// ---------------------------------------------------------------------------
// Server
// ---------------------------------------------------------------------------

/// MCP server for render-session.
#[derive(Clone)]
struct RenderSessionServer {
    lease_store: Arc<Mutex<LeaseStore>>,
}

#[tool_router]
impl RenderSessionServer {
    /// Acquire a port lease and spawn the HTTP viewer.
    #[tool(
        name = "viewer.lease",
        description = "Acquire a port lease and spawn the HTTP viewer"
    )]
    async fn viewer_lease(
        &self,
        Parameters(p): Parameters<LeaseParams>,
    ) -> Result<CallToolResult, rmcp::ErrorData> {
        let dir = PathBuf::from(&p.dir);
        let mut store = self.lease_store.lock().await;
        let entry = store.acquire(p.port, &dir, p.tick).await.map_err(|e| {
            tracing::error!(error = %e, dir = %p.dir, "viewer.lease failed");
            rmcp::ErrorData::internal_error(e.to_string(), None)
        })?;
        let payload = serde_json::to_string(&entry).map_err(|e| {
            tracing::error!(error = %e, "viewer.lease serialize failed");
            rmcp::ErrorData::internal_error(e.to_string(), None)
        })?;
        tracing::info!(
            port = entry.port,
            pid = entry.pid,
            dir = %p.dir,
            tick_secs = ?p.tick,
            "viewer.lease acquired"
        );
        Ok(CallToolResult::success(vec![Content::text(payload)]))
    }

    /// List active viewer leases.
    #[tool(
        name = "viewer.list",
        description = "List active viewer leases",
        annotations(read_only_hint = true, idempotent_hint = true)
    )]
    async fn viewer_list(&self) -> Result<CallToolResult, rmcp::ErrorData> {
        let store = self.lease_store.lock().await;
        let entries = store.list();
        let payload = serde_json::to_string(entries).map_err(|e| {
            tracing::error!(error = %e, "viewer.list serialize failed");
            rmcp::ErrorData::internal_error(e.to_string(), None)
        })?;
        Ok(CallToolResult::success(vec![Content::text(payload)]))
    }

    /// Release a viewer lease and stop the child process.
    #[tool(
        name = "viewer.release",
        description = "Release a viewer lease and stop the child process",
        annotations(destructive_hint = true)
    )]
    async fn viewer_release(
        &self,
        Parameters(p): Parameters<ReleaseParams>,
    ) -> Result<CallToolResult, rmcp::ErrorData> {
        let mut store = self.lease_store.lock().await;
        store.release(p.port).await.map_err(|e| {
            tracing::error!(error = %e, port = p.port, "viewer.release failed");
            rmcp::ErrorData::internal_error(e.to_string(), None)
        })?;
        tracing::info!(port = p.port, "viewer.release completed");
        Ok(CallToolResult::success(vec![Content::text(format!(
            "released port {}",
            p.port
        ))]))
    }

    /// Write a report Markdown file to <dir>/render-site/reports/.
    #[tool(
        name = "report.write",
        description = "Write a report Markdown file to <dir>/render-site/reports/"
    )]
    async fn report_write(
        &self,
        Parameters(p): Parameters<WriteReportParams>,
    ) -> Result<CallToolResult, rmcp::ErrorData> {
        let path = writer::write_report(PathBuf::from(&p.dir).as_path(), &p.title, &p.body)
            .await
            .map_err(|e| {
                tracing::error!(error = %e, dir = %p.dir, title = %p.title, "report.write failed");
                rmcp::ErrorData::internal_error(e.to_string(), None)
            })?;
        tracing::info!(file = %path.display(), "report.write completed");
        Ok(CallToolResult::success(vec![Content::text(
            path.display().to_string(),
        )]))
    }

    /// Write a draft Markdown file to <dir>/render-site/drafts/ with auto-incremented version.
    #[tool(
        name = "draft.write",
        description = "Write a draft Markdown file to <dir>/render-site/drafts/ with auto-incremented version",
        annotations(
            idempotent_hint = false,
            destructive_hint = false,
            read_only_hint = false
        )
    )]
    async fn draft_write(
        &self,
        Parameters(p): Parameters<WriteDraftParams>,
    ) -> Result<CallToolResult, rmcp::ErrorData> {
        let path = tokio::task::spawn_blocking(move || {
            writer::write_draft(PathBuf::from(&p.dir).as_path(), &p.issue_id, &p.body)
        })
        .await
        .map_err(|e| {
            tracing::error!(error = %e, "draft.write spawn_blocking failed");
            rmcp::ErrorData::internal_error(e.to_string(), None)
        })?
        .map_err(|e| {
            tracing::error!(error = %e, "draft.write failed");
            rmcp::ErrorData::internal_error(e.to_string(), None)
        })?;
        tracing::info!(file = %path.display(), "draft.write completed");
        Ok(CallToolResult::success(vec![Content::text(
            path.display().to_string(),
        )]))
    }
}

#[tool_handler]
impl ServerHandler for RenderSessionServer {}

// ---------------------------------------------------------------------------
// Entry point
// ---------------------------------------------------------------------------

#[tokio::main]
async fn main() -> Result<()> {
    tracing_subscriber::fmt()
        .with_env_filter(EnvFilter::from_default_env())
        .with_writer(std::io::stderr)
        .with_ansi(false)
        .init();

    tracing::info!("render-session-mcp starting");

    let path = render_session_core::lease::store_path().map_err(|e| {
        tracing::error!(error = %e, "failed to resolve lease store path");
        anyhow::anyhow!(e.to_string())
    })?;
    let store = LeaseStore::load(path).await.map_err(|e| {
        tracing::error!(error = %e, "failed to load lease store");
        anyhow::anyhow!(e.to_string())
    })?;

    let server = RenderSessionServer {
        lease_store: Arc::new(Mutex::new(store)),
    };

    let service = server
        .serve(stdio())
        .await
        .inspect_err(|e| tracing::error!("serve error: {:?}", e))?;

    service.waiting().await?;
    Ok(())
}

// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------

#[cfg(test)]
mod tests {
    use std::sync::Arc;

    use tempfile::tempdir;
    use tokio::sync::Mutex;

    use render_session_core::lease::LeaseStore;

    fn make_store(path: std::path::PathBuf) -> LeaseStore {
        // SAFETY: only for tests — we construct the internal struct directly via
        // the same fields (entries + path) the struct exposes.
        // We use a helper that matches the struct layout.
        struct StoreBuilder;
        impl StoreBuilder {
            fn build(path: std::path::PathBuf) -> LeaseStore {
                // LeaseStore::load initialises from disk; we need an in-memory empty store.
                // Use the public load path with a non-existent path so it returns empty.
                // We run the async fn synchronously here via block_in_place.
                tokio::task::block_in_place(|| {
                    tokio::runtime::Handle::current()
                        .block_on(LeaseStore::load(path))
                        .expect("load empty store")
                })
            }
        }
        StoreBuilder::build(path)
    }

    /// Verify that LeaseParams deserializes correctly with a numeric port.
    #[test]
    fn lease_params_deserialize() {
        let json = r#"{"port": 8080, "dir": "/tmp/test"}"#;
        let p: super::LeaseParams = serde_json::from_str(json).expect("deserialize");
        assert_eq!(p.port, Some(8080));
        assert_eq!(p.dir, "/tmp/test");
        assert_eq!(p.tick, None);
    }

    /// Verify that LeaseParams deserializes correctly with an absent port.
    #[test]
    fn lease_params_optional_port() {
        let json = r#"{"dir": "/tmp/test"}"#;
        let p: super::LeaseParams = serde_json::from_str(json).expect("deserialize");
        assert_eq!(p.port, None);
        assert_eq!(p.dir, "/tmp/test");
        assert_eq!(p.tick, None);
    }

    /// Verify that LeaseParams deserializes tick field correctly (Phase 4 (c)).
    #[test]
    fn lease_params_with_tick() {
        let json = r#"{"port": 8080, "dir": "/tmp/test", "tick": 5}"#;
        let p: super::LeaseParams = serde_json::from_str(json).expect("deserialize");
        assert_eq!(p.port, Some(8080));
        assert_eq!(p.dir, "/tmp/test");
        assert_eq!(p.tick, Some(5));
    }

    /// Verify that holding a MutexGuard across an .await point serialises two tasks.
    ///
    /// Task A acquires the lock and holds it for a short sleep (simulating async I/O).
    /// Task B attempts to acquire the lock concurrently.
    /// After task A releases the lock, task B must acquire it.
    /// Both tasks complete without deadlock within the timeout.
    #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
    async fn test_mutex_guard_held_across_await() {
        let td = tempdir().expect("tempdir");
        let path = td.path().join("leases.json");
        let store = Arc::new(Mutex::new(make_store(path)));

        let store_a = Arc::clone(&store);
        let store_b = Arc::clone(&store);

        // Counters to track lock acquisition order.
        let counter = Arc::new(std::sync::atomic::AtomicU32::new(0));
        let counter_a = Arc::clone(&counter);
        let counter_b = Arc::clone(&counter);

        let task_a = tokio::spawn(async move {
            let _guard = store_a.lock().await;
            // Simulate async work while holding the lock.
            tokio::time::sleep(tokio::time::Duration::from_millis(50)).await;
            // Task A should acquire the lock first (seq == 0) or at least hold it exclusively.
            counter_a.fetch_add(1, std::sync::atomic::Ordering::SeqCst)
        });

        // Give task A a head-start so it almost certainly acquires the lock first.
        tokio::time::sleep(tokio::time::Duration::from_millis(5)).await;

        let task_b = tokio::spawn(async move {
            let _guard = store_b.lock().await;
            counter_b.fetch_add(1, std::sync::atomic::Ordering::SeqCst)
        });

        let result = tokio::time::timeout(tokio::time::Duration::from_secs(10), async {
            let seq_a = task_a.await.expect("task_a");
            let seq_b = task_b.await.expect("task_b");
            (seq_a, seq_b)
        })
        .await
        .expect("tasks must complete within timeout (deadlock check)");

        // The two sequence numbers must be 0 and 1 (exclusive access, no concurrent entry).
        let (s_a, s_b) = result;
        assert!(
            (s_a == 0 && s_b == 1) || (s_a == 1 && s_b == 0),
            "tasks must have taken the lock in strict serial order: s_a={s_a}, s_b={s_b}"
        );
    }
}