Skip to main content

harn_cli/cli/
eval.rs

1//! Clap definitions for `harn eval` and its subcommands.
2//!
3//! The bare form `harn eval <path>` evaluates a run record, run directory,
4//! eval manifest, or `.harn` pipeline (legacy entrypoint, dispatched through
5//! `eval_run_record`). The `harn eval prompt <file> --fleet <models>`
6//! subcommand renders (and optionally runs / judges) a single
7//! `.harn.prompt` template against a fleet of models so authors can compare
8//! the wire envelope each capability profile materializes.
9
10use std::path::PathBuf;
11
12use clap::{Args, Subcommand, ValueEnum};
13
14#[derive(Debug, Args)]
15#[command(args_conflicts_with_subcommands = true)]
16pub struct EvalArgs {
17    /// Run record path, run directory, eval manifest path, or `.harn` pipeline.
18    /// Required unless a subcommand (e.g. `prompt`) is used.
19    pub path: Option<String>,
20    /// Optional baseline run record for diffing.
21    #[arg(long)]
22    pub compare: Option<String>,
23    /// Run a pipeline twice and compare the baseline against this structural experiment.
24    #[arg(long = "structural-experiment")]
25    pub structural_experiment: Option<String>,
26    /// Replay LLM responses from a JSONL fixture file when `path` is a `.harn` pipeline.
27    #[arg(
28        long = "llm-mock",
29        value_name = "PATH",
30        conflicts_with = "llm_mock_record"
31    )]
32    pub llm_mock: Option<String>,
33    /// Record executed LLM responses into a JSONL fixture file when `path` is a `.harn` pipeline.
34    #[arg(
35        long = "llm-mock-record",
36        value_name = "PATH",
37        conflicts_with = "llm_mock"
38    )]
39    pub llm_mock_record: Option<String>,
40    /// Positional arguments forwarded to `harn run <pipeline.harn> -- ...` when
41    /// `path` is a pipeline file and `--structural-experiment` is set.
42    #[arg(last = true)]
43    pub argv: Vec<String>,
44    #[command(subcommand)]
45    pub command: Option<EvalCommand>,
46}
47
48#[derive(Debug, Subcommand)]
49pub enum EvalCommand {
50    /// Render and optionally run a `.harn.prompt` across a fleet of models.
51    Prompt(EvalPromptArgs),
52}
53
54#[derive(Debug, Args)]
55pub struct EvalPromptArgs {
56    /// Path to a `.harn.prompt` (or `.prompt`) template.
57    pub file: PathBuf,
58    /// Fleet of model selectors (comma-separated, repeatable).
59    /// Each entry is either a model alias (`claude-opus-4-7`) or a
60    /// `provider:model` selector (`ollama:qwen3.5`). Mutually exclusive
61    /// with `--fleet-name`.
62    #[arg(
63        long,
64        value_delimiter = ',',
65        required_unless_present = "fleet_name",
66        conflicts_with = "fleet_name"
67    )]
68    pub fleet: Vec<String>,
69    /// Named fleet from `harn.toml` `[eval.fleets.<name>]`.
70    #[arg(long = "fleet-name")]
71    pub fleet_name: Option<String>,
72    /// JSON file with bindings injected into the template scope.
73    #[arg(long)]
74    pub bindings: Option<PathBuf>,
75    /// Prompt context-quality fixture(s) that score artifact selection,
76    /// stale/noisy rejection, budget adherence, and logical-section shape.
77    #[arg(long = "context-fixture")]
78    pub context_fixture: Vec<PathBuf>,
79    /// Evaluation mode.
80    #[arg(long, value_enum, default_value_t = EvalPromptMode::Render)]
81    pub mode: EvalPromptMode,
82    /// Output format.
83    #[arg(long, value_enum, default_value_t = EvalPromptOutput::Terminal)]
84    pub output: EvalPromptOutput,
85    /// Output destination for HTML / JSON (defaults to stdout).
86    #[arg(long = "out-file", short = 'o')]
87    pub out_file: Option<PathBuf>,
88    /// Maximum concurrent model invocations in run/judge modes.
89    #[arg(long, default_value_t = 4)]
90    pub max_concurrent: usize,
91    /// Optional judge prompt template. When unset, a built-in equivalence
92    /// judge is used.
93    #[arg(long = "judge-template")]
94    pub judge_template: Option<PathBuf>,
95    /// Model used for `--mode judge` evaluation.
96    #[arg(long = "judge-model", default_value = "claude-opus-4-7")]
97    pub judge_model: String,
98    /// Maximum tokens for `--mode run` / `--mode judge` calls.
99    #[arg(long = "max-tokens", default_value_t = 1024)]
100    pub max_tokens: i64,
101    /// Treat unauthenticated providers as errors rather than skipping them.
102    #[arg(long = "fail-on-unauthorized")]
103    pub fail_on_unauthorized: bool,
104}
105
106#[derive(Debug, Clone, Copy, ValueEnum)]
107pub enum EvalPromptMode {
108    /// Render the template against each model's capability profile.
109    Render,
110    /// Render + execute against each model and collect outputs.
111    Run,
112    /// Render + run + LLM-as-judge equivalence scoring.
113    Judge,
114}
115
116#[derive(Debug, Clone, Copy, ValueEnum)]
117pub enum EvalPromptOutput {
118    Terminal,
119    Json,
120    Html,
121}