crtx 0.1.0

CLI for the Cortex supervisory memory substrate.
//! Integration coverage for `cortex models list`.
//!
//! These tests shell out to the compiled `cortex` binary. Each test
//! controls the environment precisely so the command's config-resolution
//! logic produces a deterministic outcome regardless of what is installed on
//! the host.
//!
//! The real-Ollama integration test is `#[ignore]`; it is run manually or in
//! a CI job that spins up an Ollama instance.

use std::path::PathBuf;
use std::process::Command;

fn cortex_bin() -> PathBuf {
    PathBuf::from(env!("CARGO_BIN_EXE_cortex"))
}

/// Run `cortex` with the given args and a fully-isolated environment that has
/// no `[llm]` configuration: no `CORTEX_CONFIG`, no `CORTEX_LLM_*` vars, and
/// a non-existent XDG config home so the default config file path resolves to
/// a missing file (which the loader treats as absent config).
fn run_no_config(args: &[&str]) -> std::process::Output {
    // Use a temp directory that does not contain a cortex config file.
    let tmp = tempfile::tempdir().expect("tempdir");

    Command::new(cortex_bin())
        .args(args)
        // Point XDG_CONFIG_HOME to an empty temp dir so the default
        // `$XDG_CONFIG_HOME/cortex/config.toml` does not exist.
        .env("XDG_CONFIG_HOME", tmp.path())
        // Unset env-layer LLM overrides so they cannot bleed in from the
        // developer's shell.
        .env_remove("CORTEX_LLM_BACKEND")
        .env_remove("CORTEX_LLM_MODEL")
        .env_remove("CORTEX_LLM_ENDPOINT")
        .env_remove("CORTEX_CONFIG")
        // Keep HOME predictable.
        .env("HOME", tmp.path())
        .output()
        .expect("spawn cortex")
}

fn assert_exit(out: &std::process::Output, expected: i32) {
    let code = out.status.code().expect("process exited via signal");
    assert_eq!(
        code,
        expected,
        "expected exit {expected}, got {code}\nstdout: {}\nstderr: {}",
        String::from_utf8_lossy(&out.stdout),
        String::from_utf8_lossy(&out.stderr),
    );
}

/// When no `[llm]` config exists the command exits 0 and prints guidance text
/// explaining how to configure a backend — it does not attempt a network
/// connection or return a non-zero exit code.
#[test]
fn models_list_without_config_prints_guidance() {
    let out = run_no_config(&["models", "list"]);
    assert_exit(&out, 0);

    let stdout = String::from_utf8_lossy(&out.stdout);
    // The guidance must mention the TOML stanza operators need to add.
    assert!(
        stdout.contains("[llm]"),
        "guidance must mention [llm] TOML section; got:\n{stdout}"
    );
    assert!(
        stdout.contains("backend"),
        "guidance must mention the backend key; got:\n{stdout}"
    );
    assert!(
        stdout.contains("ollama"),
        "guidance must mention ollama as a backend option; got:\n{stdout}"
    );
}

/// When no backend is configured and `--backend ollama` is given explicitly,
/// the command attempts to contact the default loopback endpoint. On a CI host
/// without Ollama running this results in a transport error (exit != 0), which
/// proves the flag is honoured and the network path is taken.
///
/// We do NOT assert the exact exit code here because on a machine where Ollama
/// happens to be running the command may succeed. We only check that the
/// guidance path (exit 0 with printed config snippet) is NOT taken.
#[test]
fn models_list_explicit_ollama_backend_skips_guidance() {
    let out = run_no_config(&["models", "--backend", "ollama", "list"]);
    // clap puts the global flag before the subcommand; test both orderings.
    let stdout = String::from_utf8_lossy(&out.stdout);

    // If exit 0 and guidance text is in stdout: that is the offline path
    // which should NOT be taken when --backend is explicit.
    let is_guidance = stdout.contains("No LLM backend configured");
    assert!(
        !is_guidance,
        "explicit --backend ollama must NOT fall through to the no-config guidance path"
    );
}

/// Same guard for the subcommand-first ordering used in the spec:
/// `cortex models list --backend ollama`.
#[test]
fn models_list_subcommand_first_explicit_ollama_skips_guidance() {
    let out = run_no_config(&["models", "list", "--backend", "ollama"]);
    let stdout = String::from_utf8_lossy(&out.stdout);
    let is_guidance = stdout.contains("No LLM backend configured");
    assert!(
        !is_guidance,
        "explicit --backend ollama must NOT fall through to the no-config guidance path"
    );
}

/// `cortex models` with no subcommand exits with a usage error (clap's
/// default behaviour for missing required subcommands).
#[test]
fn models_without_subcommand_exits_usage() {
    let out = run_no_config(&["models"]);
    // clap exits 2 for missing subcommands.
    assert_exit(&out, 2);
}

/// Live Ollama integration test — requires a running Ollama instance.
///
/// Run with:
/// ```sh
/// cargo test -p cortex-mem --test cli_models -- --ignored
/// ```
#[test]
#[ignore = "requires a running local Ollama instance"]
fn models_list_ollama_live_returns_model_list() {
    let out = run_no_config(&["models", "list", "--backend", "ollama"]);
    assert_exit(&out, 0);
    let stdout = String::from_utf8_lossy(&out.stdout);
    assert!(
        stdout.contains("Available models (ollama @"),
        "live output must contain the header; got:\n{stdout}"
    );
}