use std::path::PathBuf;
use std::process::Command;
fn cortex_bin() -> PathBuf {
PathBuf::from(env!("CARGO_BIN_EXE_cortex"))
}
fn run_no_config(args: &[&str]) -> std::process::Output {
let tmp = tempfile::tempdir().expect("tempdir");
Command::new(cortex_bin())
.args(args)
.env("XDG_CONFIG_HOME", tmp.path())
.env_remove("CORTEX_LLM_BACKEND")
.env_remove("CORTEX_LLM_MODEL")
.env_remove("CORTEX_LLM_ENDPOINT")
.env_remove("CORTEX_CONFIG")
.env("HOME", tmp.path())
.output()
.expect("spawn cortex")
}
fn assert_exit(out: &std::process::Output, expected: i32) {
let code = out.status.code().expect("process exited via signal");
assert_eq!(
code,
expected,
"expected exit {expected}, got {code}\nstdout: {}\nstderr: {}",
String::from_utf8_lossy(&out.stdout),
String::from_utf8_lossy(&out.stderr),
);
}
#[test]
fn models_list_without_config_prints_guidance() {
let out = run_no_config(&["models", "list"]);
assert_exit(&out, 0);
let stdout = String::from_utf8_lossy(&out.stdout);
assert!(
stdout.contains("[llm]"),
"guidance must mention [llm] TOML section; got:\n{stdout}"
);
assert!(
stdout.contains("backend"),
"guidance must mention the backend key; got:\n{stdout}"
);
assert!(
stdout.contains("ollama"),
"guidance must mention ollama as a backend option; got:\n{stdout}"
);
}
#[test]
fn models_list_explicit_ollama_backend_skips_guidance() {
let out = run_no_config(&["models", "--backend", "ollama", "list"]);
let stdout = String::from_utf8_lossy(&out.stdout);
let is_guidance = stdout.contains("No LLM backend configured");
assert!(
!is_guidance,
"explicit --backend ollama must NOT fall through to the no-config guidance path"
);
}
#[test]
fn models_list_subcommand_first_explicit_ollama_skips_guidance() {
let out = run_no_config(&["models", "list", "--backend", "ollama"]);
let stdout = String::from_utf8_lossy(&out.stdout);
let is_guidance = stdout.contains("No LLM backend configured");
assert!(
!is_guidance,
"explicit --backend ollama must NOT fall through to the no-config guidance path"
);
}
#[test]
fn models_without_subcommand_exits_usage() {
let out = run_no_config(&["models"]);
assert_exit(&out, 2);
}
#[test]
#[ignore = "requires a running local Ollama instance"]
fn models_list_ollama_live_returns_model_list() {
let out = run_no_config(&["models", "list", "--backend", "ollama"]);
assert_exit(&out, 0);
let stdout = String::from_utf8_lossy(&out.stdout);
assert!(
stdout.contains("Available models (ollama @"),
"live output must contain the header; got:\n{stdout}"
);
}