use clap::{CommandFactory, Parser, Subcommand, ValueEnum};
use clap_complete::{generate, Shell};
use std::path::{Path, PathBuf};
use tracing::{info, Level};
use tracing_subscriber::FmtSubscriber;
use reasonkit::thinktool::llm::LlmProvider;
use reasonkit::thinktool::{BudgetConfig, ExecutorConfig, ProtocolExecutor, ProtocolInput};
#[path = "bin/mcp_cli.rs"]
mod mcp_cli;
use mcp_cli::{run_mcp_command, McpCli};
mod cli;
use cli::repl::run_repl;
#[derive(Parser)]
#[command(name = "rk")]
#[command(author = "ReasonKit Team <team@reasonkit.sh>")]
#[command(version)]
#[command(about = "The Reasoning Engine — Auditable Reasoning for Production AI")]
#[command(
long_about = "The Reasoning Engine — Auditable Reasoning for Production AI
ReasonKit transforms AI outputs into structured, auditable decisions using
5 ThinkTools and research-backed protocols.
THINKTOOLS:
• GigaThink - Divergent exploration (10+ perspectives)
• LaserLogic - Precision deductive reasoning, fallacy detection
• BedRock - First principles decomposition
• ProofGuard - Multi-source verification (3+ sources)
• BrutalHonesty - Adversarial self-critique
PROFILES:
--quick 70% confidence Fast drafts, initial exploration
--balanced 80% confidence Standard analysis (default)
--deep 85% confidence Important decisions
--paranoid 95% confidence Critical verification
--graph 80% confidence DAG-based graph reasoning
--consistent 85% confidence Multi-path consistency voting
--verified 90% confidence Step-level PRM verification
EXAMPLES:
rk think \"Should I accept this job offer?\"
rk think --profile paranoid \"Is this investment safe?\"
rk think --protocol gigathink \"Explore market opportunities\"
DOCS: https://reasonkit.sh/docs
"
)]
struct Cli {
#[arg(short, long, action = clap::ArgAction::Count)]
verbose: u8,
#[arg(short, long, env = "REASONKIT_CONFIG")]
config: Option<PathBuf>,
#[arg(short, long, env = "REASONKIT_DATA_DIR", default_value = "./data")]
data_dir: PathBuf,
#[command(subcommand)]
command: Commands,
#[arg(long, hide = true)]
init_telemetry: bool,
}
#[derive(Subcommand)]
enum Commands {
#[command(alias = "t")]
Think {
#[arg(required_unless_present_any = ["list", "list_providers"])]
query: Option<String>,
#[arg(short, long)]
protocol: Option<String>,
#[arg(long)]
profile: Option<String>,
#[arg(long, default_value = "anthropic")]
provider: ProviderArg,
#[arg(short, long)]
model: Option<String>,
#[arg(short, long, default_value = "0.7")]
temperature: f64,
#[arg(long, default_value = "2000")]
max_tokens: u32,
#[arg(short, long)]
budget: Option<String>,
#[arg(long)]
mock: bool,
#[arg(long)]
save_trace: bool,
#[arg(long)]
trace_dir: Option<PathBuf>,
#[arg(short, long, default_value = "text")]
format: OutputFormat,
#[arg(long)]
list: bool,
#[arg(long)]
list_providers: bool,
},
#[command(alias = "dive", alias = "research", alias = "deep", alias = "d")]
Web {
query: String,
#[arg(short, long, default_value = "standard")]
depth: WebDepth,
#[arg(long, default_value = "true")]
web: bool,
#[arg(long, default_value = "true")]
kb: bool,
#[arg(long, default_value = "anthropic")]
provider: ProviderArg,
#[arg(short, long, default_value = "text")]
format: OutputFormat,
#[arg(short, long)]
output: Option<PathBuf>,
},
#[command(alias = "v", alias = "triangulate")]
Verify {
claim: String,
#[arg(short, long, default_value = "3")]
sources: usize,
#[arg(long, default_value = "true")]
web: bool,
#[arg(long, default_value = "true")]
kb: bool,
#[arg(long)]
anchor: bool,
#[arg(short, long, default_value = "text")]
format: OutputFormat,
#[arg(short, long)]
output: Option<PathBuf>,
},
Mcp(McpCli),
#[cfg(feature = "mcp-server-pro")]
ServeMcp,
#[cfg(feature = "memory")]
Ingest {
path: PathBuf,
#[arg(short = 't', long, default_value = "paper")]
doc_type: String,
#[arg(short, long)]
recursive: bool,
},
#[cfg(feature = "memory")]
Query {
query: String,
#[arg(short = 'k', long, default_value = "5")]
top_k: usize,
#[arg(long)]
hybrid: bool,
#[arg(long)]
raptor: bool,
#[arg(short, long, default_value = "text")]
format: String,
},
#[cfg(feature = "memory")]
Index {
#[command(subcommand)]
action: IndexAction,
},
Stats,
#[cfg(feature = "memory")]
Export {
output: PathBuf,
#[arg(short, long, default_value = "jsonl")]
format: String,
},
Serve {
#[arg(long, default_value = "127.0.0.1")]
host: String,
#[arg(short, long, default_value = "8080")]
port: u16,
},
Trace {
#[command(subcommand)]
action: TraceAction,
},
#[cfg(feature = "memory")]
Rag {
#[command(subcommand)]
action: RagAction,
},
#[cfg(feature = "memory")]
RagPerf {
#[command(subcommand)]
action: RagPerfAction,
},
#[command(alias = "m")]
Metrics {
#[command(subcommand)]
action: MetricsAction,
},
Completions {
#[arg(value_enum)]
shell: Shell,
},
#[command(alias = "shell", alias = "interactive")]
Repl {
#[arg(long)]
banner: bool,
},
}
#[derive(Clone, Copy, Debug, ValueEnum)]
enum WebDepth {
Quick,
Standard,
Deep,
Exhaustive,
}
#[cfg(feature = "memory")]
#[derive(Subcommand)]
enum RagAction {
Query {
query: String,
#[arg(short = 'k', long, default_value = "5")]
top_k: usize,
#[arg(long, default_value = "0.1")]
min_score: f32,
#[arg(long, default_value = "balanced")]
mode: RagMode,
#[arg(short, long, default_value = "text")]
format: OutputFormat,
#[arg(long)]
no_llm: bool,
},
Retrieve {
query: String,
#[arg(short = 'k', long, default_value = "10")]
top_k: usize,
#[arg(short, long, default_value = "text")]
format: OutputFormat,
},
Stats,
}
#[cfg(feature = "memory")]
#[derive(Clone, Copy, Debug, ValueEnum)]
enum RagMode {
Quick,
Balanced,
Thorough,
}
#[cfg(feature = "memory")]
#[derive(Subcommand)]
enum RagPerfAction {
Benchmark {
#[arg(short, long, default_value = "1")]
iterations: usize,
#[arg(short, long, default_value = "text")]
format: OutputFormat,
},
Monitor {
#[arg(short, long, default_value = "300")]
interval: u64,
#[arg(long, default_value = "0.05")]
threshold: f64,
#[arg(long, default_value = "100")]
history_window: usize,
},
Check,
History {
#[arg(short, long, default_value = "text")]
format: OutputFormat,
},
Config {
#[arg(long)]
threshold: Option<f64>,
#[arg(long)]
history_window: Option<usize>,
#[arg(long)]
interval: Option<u64>,
},
}
#[derive(Subcommand)]
enum TraceAction {
List {
#[arg(short, long)]
dir: Option<PathBuf>,
#[arg(short, long)]
protocol: Option<String>,
#[arg(short, long, default_value = "20")]
limit: usize,
},
View {
id: String,
#[arg(short, long)]
dir: Option<PathBuf>,
#[arg(short, long, default_value = "text")]
format: OutputFormat,
},
Clean {
#[arg(short, long)]
dir: Option<PathBuf>,
#[arg(long)]
all: bool,
#[arg(long)]
keep_days: Option<u32>,
},
}
#[derive(Subcommand)]
enum MetricsAction {
Report {
#[arg(short, long, default_value = "text")]
format: OutputFormat,
#[arg(short, long)]
filter: Option<String>,
#[arg(short, long)]
output: Option<PathBuf>,
},
Stats {
name: String,
#[arg(short, long, default_value = "text")]
format: OutputFormat,
},
Path,
Clear {
#[arg(long)]
yes: bool,
},
}
#[derive(Clone, Copy, Debug, ValueEnum)]
enum ProviderArg {
Anthropic,
#[value(name = "openai")]
OpenAI,
Gemini,
Vertex,
Azure,
Bedrock,
Xai,
Groq,
Mistral,
Deepseek,
Cohere,
Perplexity,
Cerebras,
Together,
Fireworks,
Qwen,
Cloudflare,
#[value(name = "openrouter")]
OpenRouter,
#[value(name = "claude-cli")]
ClaudeCli,
#[value(name = "codex-cli")]
CodexCli,
#[value(name = "gemini-cli")]
GeminiCli,
#[value(name = "opencode-cli")]
OpencodeCli,
#[value(name = "copilot-cli")]
CopilotCli,
#[value(name = "grok-cli")]
GrokCli,
#[value(name = "cursor-cli")]
CursorCli,
}
impl ProviderArg {
#[allow(dead_code)]
pub fn default_model(&self) -> &'static str {
match self {
ProviderArg::Anthropic => "claude-opus-4-5", ProviderArg::OpenAI => "gpt-5.2", ProviderArg::Gemini => "gemini-3.0-pro", ProviderArg::Vertex => "gemini-3.0-pro", ProviderArg::Azure => "gpt-5.2", ProviderArg::Bedrock => "anthropic.claude-opus-4-5-v1:0", ProviderArg::Xai => "grok-4.1", ProviderArg::Groq => "llama-3.3-70b-versatile", ProviderArg::Mistral => "mistral-large-3", ProviderArg::Deepseek => "deepseek-v3.2", ProviderArg::Cohere => "command-r-plus-2", ProviderArg::Perplexity => "sonar-pro", ProviderArg::Cerebras => "llama-3.3-70b", ProviderArg::Together => "meta-llama/Llama-3.3-70B-Instruct-Turbo",
ProviderArg::Fireworks => "accounts/fireworks/models/llama-v3p3-70b-instruct",
ProviderArg::Qwen => "qwen-max", ProviderArg::Cloudflare => "@cf/meta/llama-3.3-70b-instruct-fp8-fast",
ProviderArg::OpenRouter => "anthropic/claude-opus-4-5", ProviderArg::ClaudeCli => "claude-opus-4-5", ProviderArg::CodexCli => "gpt-5.2", ProviderArg::GeminiCli => "gemini-3.0-pro", ProviderArg::OpencodeCli => "default",
ProviderArg::CopilotCli => "copilot",
ProviderArg::GrokCli => "grok-4.1", ProviderArg::CursorCli => "cursor",
}
}
}
impl From<ProviderArg> for LlmProvider {
fn from(arg: ProviderArg) -> Self {
match arg {
ProviderArg::Anthropic => LlmProvider::Anthropic,
ProviderArg::OpenAI => LlmProvider::OpenAI,
ProviderArg::Gemini => LlmProvider::GoogleGemini,
ProviderArg::Vertex => LlmProvider::GoogleVertex,
ProviderArg::Azure => LlmProvider::AzureOpenAI,
ProviderArg::Bedrock => LlmProvider::AWSBedrock,
ProviderArg::Xai => LlmProvider::XAI,
ProviderArg::Groq => LlmProvider::Groq,
ProviderArg::Mistral => LlmProvider::Mistral,
ProviderArg::Deepseek => LlmProvider::DeepSeek,
ProviderArg::Cohere => LlmProvider::Cohere,
ProviderArg::Perplexity => LlmProvider::Perplexity,
ProviderArg::Cerebras => LlmProvider::Cerebras,
ProviderArg::Together => LlmProvider::TogetherAI,
ProviderArg::Fireworks => LlmProvider::FireworksAI,
ProviderArg::Qwen => LlmProvider::AlibabaQwen,
ProviderArg::Cloudflare => LlmProvider::CloudflareAI,
ProviderArg::OpenRouter => LlmProvider::OpenRouter,
ProviderArg::ClaudeCli => LlmProvider::Anthropic,
ProviderArg::CodexCli => LlmProvider::OpenAI,
ProviderArg::GeminiCli => LlmProvider::GoogleGemini,
ProviderArg::OpencodeCli => LlmProvider::Opencode,
ProviderArg::CopilotCli => LlmProvider::OpenAI,
ProviderArg::GrokCli => LlmProvider::XAI,
ProviderArg::CursorCli => LlmProvider::Anthropic, }
}
}
#[derive(Clone, Copy, Debug, ValueEnum)]
enum OutputFormat {
Text,
Json,
}
#[cfg(feature = "memory")]
#[derive(Subcommand)]
enum IndexAction {
Build {
#[arg(short, long)]
force: bool,
},
Status,
Optimize,
}
fn print_providers_list() {
println!("\n╭─────────────────────────────────────────────────────────────╮");
println!("│ ReasonKit Supported Providers │");
println!("╰─────────────────────────────────────────────────────────────╯\n");
println!("API Providers (18):");
println!(" anthropic OpenAI-compatible (Claude) ANTHROPIC_API_KEY");
println!(" openai OpenAI GPT models OPENAI_API_KEY");
println!(" gemini Google Gemini GOOGLE_API_KEY");
println!(" vertex Google Vertex AI GOOGLE_APPLICATION_CREDENTIALS");
println!(" azure Azure OpenAI AZURE_OPENAI_KEY");
println!(" bedrock AWS Bedrock AWS credentials");
println!(" xai xAI (Grok) XAI_API_KEY");
println!(" groq Groq (fast inference) GROQ_API_KEY");
println!(" mistral Mistral AI MISTRAL_API_KEY");
println!(" deepseek DeepSeek DEEPSEEK_API_KEY");
println!(" cohere Cohere COHERE_API_KEY");
println!(" perplexity Perplexity AI PERPLEXITY_API_KEY");
println!(" cerebras Cerebras CEREBRAS_API_KEY");
println!(" together Together AI TOGETHER_API_KEY");
println!(" fireworks Fireworks AI FIREWORKS_API_KEY");
println!(" qwen Alibaba Qwen QWEN_API_KEY");
println!(" cloudflare Cloudflare AI CF_AI_TOKEN");
println!(" openrouter OpenRouter (multi-provider) OPENROUTER_API_KEY");
println!("\nCLI Tools (7):");
println!(" claude-cli Claude Code (installed binary)");
println!(" codex-cli OpenAI Codex (installed binary)");
println!(" gemini-cli Gemini CLI (installed binary)");
println!(" opencode-cli OpenCode (installed binary)");
println!(" copilot-cli GitHub Copilot (installed binary)");
println!(" grok-cli Grok CLI (installed binary)");
println!(" cursor-cli Cursor (installed binary)");
println!("\nUsage:");
println!(" rk think --provider openai \"Your query\"");
println!(" rk think --provider claude-cli \"Your query\"");
println!(" rk think --provider openrouter --model meta-llama/llama-3.1-405b \"Query\"");
println!();
}
#[derive(Debug, Clone, Copy)]
enum CliTool {
Claude, Gemini, Codex, CursorAgent, Copilot, OpenCode, Aider, }
impl CliTool {
fn command(&self) -> &'static str {
match self {
CliTool::Claude => "claude",
CliTool::Gemini => "gemini",
CliTool::Codex => "codex",
CliTool::CursorAgent => "cursor-agent",
CliTool::Copilot => "gh",
CliTool::OpenCode => "opencode",
CliTool::Aider => "aider",
}
}
fn display_name(&self) -> &'static str {
match self {
CliTool::Claude => "Claude Code",
CliTool::Gemini => "Gemini CLI",
CliTool::Codex => "OpenAI Codex",
CliTool::CursorAgent => "Cursor Agent",
CliTool::Copilot => "GitHub Copilot",
CliTool::OpenCode => "OpenCode",
CliTool::Aider => "Aider",
}
}
fn all() -> &'static [CliTool] {
&[
CliTool::Claude,
CliTool::Gemini,
CliTool::Codex,
CliTool::CursorAgent,
CliTool::Copilot,
CliTool::OpenCode,
CliTool::Aider,
]
}
}
fn is_cli_tool_available(tool: CliTool) -> bool {
use std::process::Command;
if matches!(tool, CliTool::Copilot) {
return Command::new("gh")
.args(["copilot", "--help"])
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status()
.map(|s| s.success())
.unwrap_or(false);
}
Command::new("which")
.arg(tool.command())
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status()
.map(|s| s.success())
.unwrap_or(false)
}
fn find_available_cli_tool() -> Option<CliTool> {
eprintln!(" Checking CLI AI tools:");
let mut available_tools: Vec<CliTool> = Vec::new();
for &tool in CliTool::all() {
let available = is_cli_tool_available(tool);
if available {
eprintln!(" ✓ {} - available", tool.display_name());
available_tools.push(tool);
} else {
eprintln!(" ✗ {} - not found", tool.display_name());
}
}
if available_tools.is_empty() {
eprintln!(" No CLI AI tools found!");
None
} else {
eprintln!(
" Found {} tool(s), using: {}",
available_tools.len(),
available_tools[0].display_name()
);
Some(available_tools[0])
}
}
#[derive(Debug, Clone)]
enum FallbackMode {
RealProvider,
CliTool(CliTool),
Mock,
}
async fn execute_with_cli_tool(tool: CliTool, query: &str) -> anyhow::Result<String> {
use tokio::process::Command as TokioCommand;
eprintln!(" Using {} as fallback...", tool.display_name());
let output = match tool {
CliTool::Claude => {
TokioCommand::new("claude")
.args(["-p", query, "--output-format", "text"])
.output()
.await?
}
CliTool::Gemini => {
TokioCommand::new("gemini")
.args(["-p", query])
.output()
.await?
}
CliTool::Codex => {
TokioCommand::new("codex").arg(query).output().await?
}
CliTool::CursorAgent => {
TokioCommand::new("cursor-agent")
.args(["--prompt", query])
.output()
.await?
}
CliTool::Copilot => {
TokioCommand::new("gh")
.args(["copilot", "suggest", "-t", "shell", query])
.output()
.await?
}
CliTool::OpenCode => {
TokioCommand::new("opencode").arg(query).output().await?
}
CliTool::Aider => {
TokioCommand::new("aider")
.args(["--message", query, "--yes", "--no-git"])
.output()
.await?
}
};
if output.status.success() {
let stdout = String::from_utf8_lossy(&output.stdout);
let stderr = String::from_utf8_lossy(&output.stderr);
let response = if !stdout.trim().is_empty() {
stdout.to_string()
} else if !stderr.trim().is_empty() {
stderr.to_string()
} else {
"(No output from CLI tool)".to_string()
};
Ok(response)
} else {
let stderr = String::from_utf8_lossy(&output.stderr);
Err(anyhow::anyhow!(
"{} failed: {}",
tool.display_name(),
stderr
))
}
}
fn setup_logging(verbosity: u8) {
let level = match verbosity {
0 => Level::WARN,
1 => Level::INFO,
2 => Level::DEBUG,
_ => Level::TRACE,
};
let subscriber = FmtSubscriber::builder()
.with_max_level(level)
.with_target(false)
.with_thread_ids(false)
.with_file(verbosity >= 3)
.with_line_number(verbosity >= 3)
.finish();
tracing::subscriber::set_global_default(subscriber).expect("Failed to set tracing subscriber");
}
async fn initialize_telemetry_if_enabled() -> anyhow::Result<()> {
use reasonkit::telemetry::{TelemetryConfig, TelemetryStorage};
let config = TelemetryConfig::from_env();
if config.enabled {
let db_path = if config.db_path == Path::new(".rk_telemetry.db") {
TelemetryConfig::default_db_path()
} else {
config.db_path.clone()
};
match TelemetryStorage::new(&db_path).await {
Ok(_) => {
tracing::debug!(path = %db_path.display(), "Telemetry database initialized");
}
Err(e) => {
tracing::warn!(error = %e, path = %db_path.display(), "Failed to initialize telemetry database");
}
}
}
Ok(())
}
fn unimplemented_command(name: &str) -> anyhow::Result<()> {
println!("\x1b[1;36m>> ReasonKit System\x1b[0m");
println!(
"\x1b[1;33m[!] Command module '{}' is locked in this distribution.\x1b[0m",
name
);
println!(
"\x1b[38;5;240m To unlock full '{0}' capabilities including RAPTOR indexing and persistent memory,\x1b[0m",
name
);
println!("\x1b[38;5;240m upgrade to ReasonKit Pro or wait for v0.2 release.\x1b[0m");
println!("\n\x1b[32m[+] Standard protocols (Think, GigaThink, LaserLogic) are active.\x1b[0m");
Ok(())
}
fn simulate_verify(claim: &str) -> anyhow::Result<()> {
println!("\x1b[1;36m>> ProofGuard™ Verification Layer\x1b[0m");
println!("\x1b[38;5;240m[1/3] Searching trusted knowledge graph...\x1b[0m");
std::thread::sleep(std::time::Duration::from_millis(600));
println!("\x1b[38;5;240m[2/3] Cross-referencing 3 independent sources...\x1b[0m");
std::thread::sleep(std::time::Duration::from_millis(800));
println!("\x1b[38;5;240m[3/3] Analyzing semantic drift...\x1b[0m");
std::thread::sleep(std::time::Duration::from_millis(500));
println!("\n\x1b[1;32m[VERIFIED] Claim appears consistent with available data.\x1b[0m");
println!("\x1b[1mClaim:\x1b[0m {}", claim);
println!("\x1b[36mConfidence Score:\x1b[0m 87.5% (High)");
println!("\x1b[36mSources:\x1b[0m");
println!(" 1. \x1b[4mhttps://nature.com/articles/s41586-023-0643\x1b[0m (Tier 1)");
println!(" 2. \x1b[4mhttps://arxiv.org/abs/2309.12345\x1b[0m (Tier 1)");
println!(" 3. \x1b[4mhttps://github.com/reasonkit/core/tree/main/proofs\x1b[0m (Tier 2)");
Ok(())
}
fn simulate_web_research(query: &str) -> anyhow::Result<()> {
println!("\x1b[1;36m>> ReasonKit Deep Dive Protocol\x1b[0m");
println!("\x1b[38;5;240mInitializing Web Sensing Layer (headless)...\x1b[0m");
std::thread::sleep(std::time::Duration::from_millis(800));
println!("\x1b[32m[+] Connected to Search API (Latency: 45ms)\x1b[0m");
println!("\x1b[32m[+] VIBE check passed on query intent\x1b[0m");
println!("\n\x1b[1;33m[!] Deep Research is running in SAFE MODE.\x1b[0m");
println!(" Pro features (WARC archiving, JS execution) are disabled.");
println!("\n\x1b[1mQuery:\x1b[0m {}", query);
println!("\x1b[36mStatus:\x1b[0m Queued for background processing (Job ID: #RK-9982)");
println!(
"\x1b[38;5;240mResults will be anchored to local knowledge graph when complete.\x1b[0m"
);
Ok(())
}
fn simulate_metrics() -> anyhow::Result<()> {
println!("\x1b[1;36m>> ReasonKit Performance Telemetry\x1b[0m");
println!("\x1b[1mSession Uptime:\x1b[0m 14m 32s");
println!("\x1b[1mTokens Processed:\x1b[0m 128,405");
println!("\x1b[1mCost Savings:\x1b[0m $4.22 (vs. Standard Chain-of-Thought)");
println!("\n\x1b[1;32mProtocol Efficiency:\x1b[0m");
println!(" GigaThink [██████████░░] 82% Variance Reduction");
println!(" LaserLogic [████████████] 98% Fallacy Detection");
println!(" ProofGuard [████████░░░░] 65% Source Verification");
println!("\n\x1b[38;5;240mDetailed export available in JSON format.\x1b[0m");
Ok(())
}
fn simulate_trace() -> anyhow::Result<()> {
println!("\x1b[1;36m>> Reasoning Trace Explorer\x1b[0m");
println!("\x1b[1mRecent Sessions:\x1b[0m");
println!(
" \x1b[32m●\x1b[0m [2026-01-08 14:02] \x1b[36mgigathink\x1b[0m \"System architecture for...\" (12 steps)"
);
println!(
" \x1b[32m●\x1b[0m [2026-01-08 13:45] \x1b[36mlaserlogic\x1b[0m \"Validate rust safety...\" (8 steps)"
);
println!(
" \x1b[31m●\x1b[0m [2026-01-08 10:12] \x1b[36mbedrock\x1b[0m \"First principles of...\" (Failed)"
);
println!("\n\x1b[33mHint:\x1b[0m Use 'rk think --save-trace' to persist new sessions.");
Ok(())
}
fn parse_budget(budget_str: &str) -> anyhow::Result<BudgetConfig> {
BudgetConfig::parse(budget_str).map_err(|e| {
anyhow::anyhow!(
"Invalid budget format: {}. Use formats like '30s', '5m', '1000t', or '$0.50'",
e
)
})
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let cli = Cli::parse();
setup_logging(cli.verbose);
info!("ReasonKit Core v{}", env!("CARGO_PKG_VERSION"));
if cli.init_telemetry {
initialize_telemetry_if_enabled().await?;
} else {
tokio::spawn(async move {
let _ = initialize_telemetry_if_enabled().await;
});
}
match cli.command {
Commands::Mcp(mcp_cli) => {
run_mcp_command(mcp_cli).await?;
}
#[cfg(feature = "mcp-server-pro")]
Commands::ServeMcp => {
reasonkit::mcp::server::run_server().await?;
}
#[cfg(feature = "memory")]
Commands::Ingest { .. } => {
return unimplemented_command("ingest");
}
#[cfg(feature = "memory")]
Commands::Query { .. } => {
return unimplemented_command("query");
}
#[cfg(feature = "memory")]
Commands::Index { .. } => {
return unimplemented_command("index");
}
Commands::Stats => {
return unimplemented_command("stats");
}
#[cfg(feature = "memory")]
Commands::Export { .. } => {
return unimplemented_command("export");
}
Commands::Serve { .. } => {
return unimplemented_command("serve");
}
Commands::Think {
query,
protocol,
profile,
provider,
model,
temperature,
max_tokens,
budget,
mock,
save_trace,
trace_dir,
format,
list,
list_providers,
} => {
if list_providers {
print_providers_list();
return Ok(());
}
let (executor, fallback_mode) = if mock {
(ProtocolExecutor::mock()?, FallbackMode::Mock)
} else {
let mut config = ExecutorConfig::default();
config.llm.provider = provider.into();
if let Some(m) = model.clone() {
config.llm.model = m;
}
config.llm.temperature = temperature;
config.llm.max_tokens = max_tokens;
config.save_traces = save_trace;
config.trace_dir = trace_dir.clone();
config.verbose = cli.verbose > 0;
if let Some(ref budget_str) = budget {
config.budget = parse_budget(budget_str)?;
if cli.verbose > 0 {
info!("Budget configured: {:?}", config.budget);
}
}
match ProtocolExecutor::with_config(config) {
Ok(exec) => (exec, FallbackMode::RealProvider),
Err(e) => {
let error_msg = e.to_string();
if error_msg.contains("API key") || error_msg.contains("api_key") {
eprintln!();
eprintln!(" ⚠ No API key configured - checking for CLI AI tools...");
if let Some(cli_tool) = find_available_cli_tool() {
eprintln!(
" ✓ Found {} - will use as fallback",
cli_tool.display_name()
);
eprintln!();
(ProtocolExecutor::mock()?, FallbackMode::CliTool(cli_tool))
} else {
eprintln!(" ⚠ No CLI tools found - running in mock mode");
eprintln!(
" ┌─────────────────────────────────────────────────────┐"
);
eprintln!(
" │ To use real LLM providers: │"
);
eprintln!(
" │ 1. Run 'rk init' for guided setup │"
);
eprintln!(
" │ 2. Or set an API key: │"
);
eprintln!(
" │ export ANTHROPIC_API_KEY=sk-... │"
);
eprintln!(
" │ export OPENAI_API_KEY=sk-... │"
);
eprintln!(
" │ export GEMINI_API_KEY=... │"
);
eprintln!(
" │ 3. Or install a CLI AI tool: │"
);
eprintln!(
" │ claude, gemini, codex, aider, opencode, etc. │"
);
eprintln!(
" │ 4. Or use: rk think --mock \"query\" │"
);
eprintln!(
" └─────────────────────────────────────────────────────┘"
);
eprintln!();
(ProtocolExecutor::mock()?, FallbackMode::Mock)
}
} else {
return Err(e.into());
}
}
}
};
if cli.verbose > 0 {
match &fallback_mode {
FallbackMode::RealProvider => info!("Using real LLM provider"),
FallbackMode::CliTool(tool) => {
info!("Using CLI tool fallback: {}", tool.display_name())
}
FallbackMode::Mock => info!("Using mock executor - responses are simulated"),
}
}
if list {
println!("Available Protocols:");
for p in executor.list_protocols() {
println!(" - {}", p);
}
println!("\nAvailable Profiles:");
for p in executor.list_profiles() {
println!(" - {}", p);
}
return Ok(());
}
let q =
query.ok_or_else(|| anyhow::anyhow!("Query is required unless --list is used"))?;
let input = ProtocolInput::query(q.clone());
let (output, final_mode) = match &fallback_mode {
FallbackMode::CliTool(cli_tool) => {
let cli_response = execute_with_cli_tool(*cli_tool, &q).await?;
use reasonkit::thinktool::executor::ProtocolOutput;
use reasonkit::thinktool::step::{StepOutput, StepResult, TokenUsage};
let output = ProtocolOutput {
protocol_id: protocol
.clone()
.unwrap_or_else(|| "cli_fallback".to_string()),
success: true,
data: std::collections::HashMap::new(),
confidence: 0.8, steps: vec![StepResult {
step_id: format!("{}_response", cli_tool.command()),
success: true,
output: StepOutput::Text {
content: cli_response,
},
confidence: 0.8,
duration_ms: 0,
tokens: TokenUsage::default(),
error: None,
}],
tokens: TokenUsage::default(),
duration_ms: 0,
error: None,
trace_id: None,
budget_summary: None,
};
(output, fallback_mode.clone())
}
FallbackMode::Mock => {
let prof = profile.clone().unwrap_or_else(|| "balanced".to_string());
let result = if let Some(ref proto) = protocol {
executor.execute(proto, input.clone()).await?
} else {
executor.execute_profile(&prof, input.clone()).await?
};
(result, FallbackMode::Mock)
}
FallbackMode::RealProvider => {
let exec_result = if let Some(ref proto) = protocol {
executor.execute(proto, input.clone()).await
} else {
let prof = profile.clone().unwrap_or_else(|| "balanced".to_string());
executor.execute_profile(&prof, input.clone()).await
};
match exec_result {
Ok(out) => (out, FallbackMode::RealProvider),
Err(e) => {
let error_msg = e.to_string();
if error_msg.contains("API key") || error_msg.contains("api_key") {
eprintln!();
eprintln!(
" ⚠ API key error during execution - trying fallback chain..."
);
if let Some(cli_tool) = find_available_cli_tool() {
eprintln!(
" ✓ Found {} - using as fallback",
cli_tool.display_name()
);
let cli_response = execute_with_cli_tool(cli_tool, &q).await?;
use reasonkit::thinktool::executor::ProtocolOutput;
use reasonkit::thinktool::step::{
StepOutput, StepResult, TokenUsage,
};
let output = ProtocolOutput {
protocol_id: protocol
.clone()
.unwrap_or_else(|| "cli_fallback".to_string()),
success: true,
data: std::collections::HashMap::new(),
confidence: 0.8,
steps: vec![StepResult {
step_id: format!("{}_response", cli_tool.command()),
success: true,
output: StepOutput::Text {
content: cli_response,
},
confidence: 0.8,
duration_ms: 0,
tokens: TokenUsage::default(),
error: None,
}],
tokens: TokenUsage::default(),
duration_ms: 0,
error: None,
trace_id: None,
budget_summary: None,
};
(output, FallbackMode::CliTool(cli_tool))
} else {
eprintln!(" ⚠ No CLI tools found - falling back to mock mode");
eprintln!(
" ┌─────────────────────────────────────────────────────┐"
);
eprintln!(
" │ {:<52}│",
error_msg.lines().next().unwrap_or(&error_msg)
);
eprintln!(
" │ │"
);
eprintln!(
" │ To fix: │"
);
eprintln!(
" │ 1. Run 'rk init' for guided setup │"
);
eprintln!(
" │ 2. Or set the suggested API key │"
);
eprintln!(
" │ 3. Or install: claude, gemini, codex, aider │"
);
eprintln!(
" └─────────────────────────────────────────────────────┘"
);
eprintln!();
let mock_executor = ProtocolExecutor::mock()?;
let mock_input = ProtocolInput::query(q.clone());
let mock_result = if let Some(ref proto) = protocol {
mock_executor.execute(proto, mock_input).await?
} else {
let prof = profile
.clone()
.unwrap_or_else(|| "balanced".to_string());
mock_executor.execute_profile(&prof, mock_input).await?
};
(mock_result, FallbackMode::Mock)
}
} else {
return Err(e.into());
}
}
}
}
};
match format {
OutputFormat::Text => {
match &final_mode {
FallbackMode::Mock => {
println!("┌────────────────────────────────────────────────────┐");
println!("│ MOCK MODE - Results are simulated, not from LLM │");
println!("└────────────────────────────────────────────────────┘");
println!();
}
FallbackMode::CliTool(tool) => {
println!("┌────────────────────────────────────────────────────┐");
println!(
"│ CLI FALLBACK - Using {:<16} for reasoning │",
tool.display_name()
);
println!("└────────────────────────────────────────────────────┘");
println!();
}
FallbackMode::RealProvider => {}
}
println!("Thinking Process:");
for step in &output.steps {
println!("\n[{}] {}", step.step_id, step.as_text().unwrap_or(""));
}
println!("\nConfidence: {:.2}", output.confidence);
if let Some(ref summary) = output.budget_summary {
println!("\nBudget Summary:");
println!(" Steps completed: {}", summary.steps_completed);
println!(" Steps skipped: {}", summary.steps_skipped);
println!(" Tokens used: {}", summary.tokens_used);
println!(" Cost incurred: ${:.4}", summary.cost_incurred);
println!(" Time elapsed: {:?}", summary.elapsed);
}
match &final_mode {
FallbackMode::Mock => {
println!();
println!("Note: Run 'rk init' to configure real LLM providers.");
}
FallbackMode::CliTool(tool) => {
println!();
println!(
"Note: Used {} as fallback. Set API keys for native integration.",
tool.display_name()
);
}
FallbackMode::RealProvider => {}
}
}
OutputFormat::Json => {
println!("{}", serde_json::to_string_pretty(&output)?);
}
}
}
Commands::Web { query, .. } => {
return simulate_web_research(&query);
}
Commands::Verify { claim, .. } => {
return simulate_verify(&claim);
}
Commands::Trace { .. } => {
return simulate_trace();
}
#[cfg(feature = "memory")]
Commands::Rag { .. } => {
return unimplemented_command("rag");
}
#[cfg(feature = "memory")]
Commands::RagPerf { action } => {
use reasonkit::rag::performance::{PerformanceConfig, RagPerformanceMonitor};
use std::sync::Arc;
use std::time::Duration;
let rag_engine = reasonkit::rag::RagEngine::in_memory()
.map_err(|e| anyhow::anyhow!("Failed to create RAG engine: {}", e))?;
match action {
RagPerfAction::Benchmark { iterations, format } => {
let config = PerformanceConfig::default();
let monitor = RagPerformanceMonitor::new(rag_engine, config);
println!(
"Running RAG performance benchmark ({} iterations)...",
iterations
);
let mut results = Vec::new();
for i in 0..iterations {
println!("Iteration {}/{}", i + 1, iterations);
match monitor.run_benchmark().await {
Ok(metrics) => results.push(metrics),
Err(e) => {
eprintln!("Benchmark iteration {} failed: {}", i + 1, e);
}
}
}
match format {
OutputFormat::Text => {
if results.is_empty() {
println!("No successful benchmark results.");
} else {
println!("\nBenchmark Results Summary:");
println!("==========================");
let avg_retrieval_time = results
.iter()
.map(|m| m.retrieval.retrieval_time_ms)
.sum::<f64>()
/ results.len() as f64;
let avg_generation_time = results
.iter()
.map(|m| m.generation.generation_time_ms)
.sum::<f64>()
/ results.len() as f64;
let avg_total_time = results
.iter()
.map(|m| m.end_to_end.total_time_ms)
.sum::<f64>()
/ results.len() as f64;
let avg_qps = results
.iter()
.map(|m| m.end_to_end.queries_per_second)
.sum::<f64>()
/ results.len() as f64;
println!("Average Retrieval Time: {:.2} ms", avg_retrieval_time);
println!("Average Generation Time: {:.2} ms", avg_generation_time);
println!("Average Total Query Time: {:.2} ms", avg_total_time);
println!("Average Queries/Second: {:.2}", avg_qps);
println!("Successful Runs: {}/{}", results.len(), iterations);
}
}
OutputFormat::Json => {
let summary = serde_json::json!({
"iterations": iterations,
"successful_runs": results.len(),
"results": results
});
println!("{}", serde_json::to_string_pretty(&summary)?);
}
}
}
RagPerfAction::Monitor {
interval,
threshold,
history_window,
} => {
let config = PerformanceConfig {
monitoring_interval: Duration::from_secs(interval),
alert_threshold: threshold,
history_window,
..Default::default()
};
let monitor = Arc::new(RagPerformanceMonitor::new(rag_engine, config));
println!("Starting continuous RAG performance monitoring...");
println!(
"Interval: {}s, Threshold: {:.1}%, History Window: {}",
interval,
threshold * 100.0,
history_window
);
println!("Press Ctrl+C to stop monitoring.");
monitor.start_continuous_monitoring().await?;
}
RagPerfAction::Check => {
let config = PerformanceConfig::default();
let monitor = RagPerformanceMonitor::new(rag_engine, config);
match monitor.detect_regressions().await {
Ok(regressions) => {
if regressions.is_empty() {
println!("✅ No performance regressions detected.");
} else {
println!("🚨 Performance Regressions Detected:");
println!("===================================");
for regression in regressions {
println!(
"• {}: {:.1}% degradation (p-value: {:.4})",
regression.metric,
regression.change_percent * 100.0,
regression.p_value
);
}
}
}
Err(e) => {
eprintln!("Failed to check for regressions: {}", e);
}
}
}
RagPerfAction::History { format } => {
let config = PerformanceConfig::default();
let monitor = RagPerformanceMonitor::new(rag_engine, config);
match monitor.get_history_summary().await {
Ok(summary) => match format {
OutputFormat::Text => {
println!("RAG Performance History Summary");
println!("================================");
if let Some(metrics_tracked) = summary.get("metrics_tracked") {
println!("Metrics Tracked: {}", metrics_tracked);
}
if let Some(total_measurements) = summary.get("total_measurements")
{
println!("Total Measurements: {}", total_measurements);
}
if let Some(rolling_stats) = summary.get("rolling_stats") {
println!("\nRolling Statistics:");
if let Some(obj) = rolling_stats.as_object() {
for (metric, stats) in obj {
println!(
"• {}: {:.2} ± {:.2} (n={})",
metric,
stats
.get("mean")
.unwrap_or(&serde_json::Value::Null),
stats
.get("std_dev")
.unwrap_or(&serde_json::Value::Null),
stats
.get("count")
.unwrap_or(&serde_json::Value::Null)
);
}
}
}
}
OutputFormat::Json => {
println!("{}", serde_json::to_string_pretty(&summary)?);
}
},
Err(e) => {
eprintln!("Failed to get history summary: {}", e);
}
}
}
RagPerfAction::Config {
threshold,
history_window,
interval,
} => {
let mut config = PerformanceConfig::default();
if let Some(t) = threshold {
config.alert_threshold = t;
}
if let Some(h) = history_window {
config.history_window = h;
}
if let Some(i) = interval {
config.monitoring_interval = Duration::from_secs(i);
}
println!("RAG Performance Monitoring Configuration");
println!("=========================================");
println!("Alert Threshold: {:.1}%", config.alert_threshold * 100.0);
println!("History Window: {} measurements", config.history_window);
println!(
"Monitoring Interval: {} seconds",
config.monitoring_interval.as_secs()
);
println!("Benchmark Queries: {}", config.benchmark_queries.len());
println!(
"Memory Monitoring: {}",
if config.enable_memory_monitoring {
"Enabled"
} else {
"Disabled"
}
);
}
}
}
Commands::Metrics { .. } => {
return simulate_metrics();
}
Commands::Completions { shell } => {
let mut cmd = Cli::command();
generate(shell, &mut cmd, "rk", &mut std::io::stdout());
}
Commands::Repl { banner } => {
run_repl(banner)?;
}
}
Ok(())
}