pub struct Args {Show 47 fields
pub prompt: String,
pub transform: String,
pub model: String,
pub provider: Provider,
pub visual: bool,
pub heatmap: bool,
pub orchestrator: bool,
pub web: bool,
pub port: u16,
pub research: bool,
pub runs: u32,
pub output: String,
pub system_a: Option<String>,
pub top_logprobs: u8,
pub system_b: Option<String>,
pub db: Option<String>,
pub significance: bool,
pub heatmap_export: Option<String>,
pub heatmap_min_confidence: f32,
pub heatmap_sort_by: String,
pub record: Option<String>,
pub replay: Option<String>,
pub rate: Option<f64>,
pub seed: Option<u64>,
pub log_db: Option<String>,
pub baseline: bool,
pub prompt_file: Option<String>,
pub diff_terminal: bool,
pub json_stream: bool,
pub completions: Option<Shell>,
pub rate_range: Option<String>,
pub dry_run: bool,
pub template: Option<String>,
pub min_confidence: Option<f64>,
pub format: String,
pub collapse_window: usize,
pub orchestrator_url: String,
pub max_retries: u32,
pub anthropic_max_tokens: u32,
pub synonym_file: Option<String>,
pub api_key: Option<String>,
pub replay_speed: f64,
pub timeout: u64,
pub export_timeseries: Option<String>,
pub json_schema: bool,
pub list_models: Option<String>,
pub validate_config: bool,
}Fields§
§prompt: StringInput prompt to send to the LLM (optional when using –web)
transform: StringTransformation type (reverse, uppercase, mock, noise)
model: StringModel name (e.g. gpt-4, claude-sonnet-4-20250514)
provider: ProviderLLM provider: openai or anthropic
visual: boolEnable visual mode with color-coded tokens
heatmap: boolEnable token importance heatmap
orchestrator: boolRoute through tokio-prompt-orchestrator MCP pipeline at localhost:3000
web: boolLaunch web UI on localhost instead of terminal output
port: u16Port for the web UI server
research: boolEnable headless research mode — runs N times and outputs JSON stats
runs: u32Number of runs in research mode
output: StringOutput file path for research JSON (defaults to stdout)
system_a: Option<String>System prompt A for A/B experiment mode
top_logprobs: u8Number of top alternative tokens to return per position (OpenAI only, 0–20)
system_b: Option<String>System prompt B for A/B experiment mode
db: Option<String>Path to SQLite database for persisting experiment results (optional)
significance: boolCompute statistical significance (two-sample t-test) when ≥2 A/B runs available
heatmap_export: Option<String>Export per-position token confidence heatmap to CSV at this path
heatmap_min_confidence: f32Minimum average confidence to include a position in heatmap CSV export (0.0–1.0)
heatmap_sort_by: StringSort heatmap CSV rows by “position” (default) or “confidence”
record: Option<String>Record token events to a JSON replay file at this path
replay: Option<String>Replay token events from a JSON file (bypasses live LLM call)
rate: Option<f64>Fraction of tokens to intercept and transform (0.0–1.0, default 0.5). At 0.5 every other token is transformed; at 0.3 roughly one in three. Uses a deterministic Bresenham spread so results are reproducible when combined with –seed.
Stored as Option<f64> so the config-file loader can distinguish
“the user explicitly passed –rate” from “the user left it at the
default”. The effective value is rate.unwrap_or(0.5).
seed: Option<u64>Fixed RNG seed for reproducible Noise/Chaos transforms. Omit to use entropy-seeded randomness (default behaviour).
log_db: Option<String>Path to SQLite experiment log database (requires sqlite-log feature)
baseline: boolEnable per-position confidence baseline comparison (research mode)
prompt_file: Option<String>Path to a file with one prompt per line for batch research
diff_terminal: boolRun two parallel streams (OpenAI + Anthropic) and print side-by-side diff in terminal
json_stream: boolPrint one JSON line per token to stdout instead of colored text
completions: Option<Shell>Generate shell completions for the given shell and exit
rate_range: Option<String>Rate range for stochastic experiments, e.g. “0.3-0.7”. When set, the interceptor randomly picks a rate in [min, max] for each run. Overrides –rate when provided. Format: “MIN-MAX” (e.g. “0.2-0.8”).
dry_run: boolDry-run mode: show what transforms would be applied without calling any API. Applies the configured transform to a sample token list and prints results.
template: Option<String>Prompt template with {input} placeholder. When set, the positional prompt is substituted into the template. Example: “Answer this: {input}”
min_confidence: Option<f64>Only transform tokens whose API confidence is below this threshold. Tokens with confidence >= threshold are passed through unchanged. When no confidence data is available (Anthropic), falls back to rate-based selection. Range: 0.0–1.0. Example: –min-confidence 0.8
format: StringOutput format for research mode: “json” (default), “jsonl” (one JSON object per line).
collapse_window: usizeNumber of consecutive low-confidence tokens to consider a “collapse” in research mode. Default: 5.
orchestrator_url: StringBase URL for the MCP orchestrator pipeline (default: http://localhost:3000).
max_retries: u32Maximum API retry attempts on 429/5xx errors (default: 3).
anthropic_max_tokens: u32Maximum tokens in the Anthropic response (default: 4096). Ignored when using the OpenAI provider.
synonym_file: Option<String>Path to a TSV or key=value file of additional synonym pairs to merge with the built-in map.
Format: one word\treplacement or word = replacement pair per line.
api_key: Option<String>Optional API key required for /api/ endpoints in web UI mode.
When set, requests to /api/* must include Authorization: Bearer <key>.
replay_speed: f64Replay speed multiplier for –replay mode. 1.0 = real-time, 2.0 = double speed, 0.0 = instant.
timeout: u64Stream hang timeout in seconds. The stream is forcibly dropped if no token arrives within this duration. Default: 120 (2 minutes). Set to 0 to disable.
export_timeseries: Option<String>Export per-run timeseries data to a CSV file at this path. Columns: run,token_index,confidence,perplexity
json_schema: boolPrint the embedded research JSON schema and exit.
list_models: Option<String>List known models for a provider: “openai”, “anthropic”, or “all”.
validate_config: boolValidate configuration (print resolved values and exit).
Trait Implementations§
Source§impl Args for Args
impl Args for Args
Source§fn augment_args<'b>(__clap_app: Command) -> Command
fn augment_args<'b>(__clap_app: Command) -> Command
Source§fn augment_args_for_update<'b>(__clap_app: Command) -> Command
fn augment_args_for_update<'b>(__clap_app: Command) -> Command
Command so it can instantiate self via
FromArgMatches::update_from_arg_matches_mut Read moreSource§impl CommandFactory for Args
impl CommandFactory for Args
Source§impl FromArgMatches for Args
impl FromArgMatches for Args
Source§fn from_arg_matches(__clap_arg_matches: &ArgMatches) -> Result<Self, Error>
fn from_arg_matches(__clap_arg_matches: &ArgMatches) -> Result<Self, Error>
Source§fn from_arg_matches_mut(
__clap_arg_matches: &mut ArgMatches,
) -> Result<Self, Error>
fn from_arg_matches_mut( __clap_arg_matches: &mut ArgMatches, ) -> Result<Self, Error>
Source§fn update_from_arg_matches(
&mut self,
__clap_arg_matches: &ArgMatches,
) -> Result<(), Error>
fn update_from_arg_matches( &mut self, __clap_arg_matches: &ArgMatches, ) -> Result<(), Error>
ArgMatches to self.Source§fn update_from_arg_matches_mut(
&mut self,
__clap_arg_matches: &mut ArgMatches,
) -> Result<(), Error>
fn update_from_arg_matches_mut( &mut self, __clap_arg_matches: &mut ArgMatches, ) -> Result<(), Error>
ArgMatches to self.