use async_trait::async_trait;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration, Instant};
use thiserror::Error;
use tokio::sync::{broadcast, RwLock};
use uuid::Uuid;
#[derive(Error, Debug)]
pub enum ReasoningError {
#[error("ThinkTool '{tool}' failed: {message}")]
ThinkToolFailed { tool: String, message: String },
#[error("Memory query failed: {0}")]
MemoryQueryFailed(String),
#[error("Profile '{0}' not found")]
ProfileNotFound(String),
#[error("Channel error: {0}")]
ChannelError(String),
#[error("Execution timed out after {0:?}")]
Timeout(Duration),
#[error("Configuration error: {0}")]
Config(String),
#[error("LLM error: {0}")]
LlmError(String),
#[error("Execution cancelled")]
Cancelled,
#[error("Confidence {actual:.2} below threshold {required:.2}")]
ConfidenceBelowThreshold { actual: f64, required: f64 },
}
pub type Result<T> = std::result::Result<T, ReasoningError>;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, Default)]
#[serde(rename_all = "lowercase")]
pub enum Profile {
Quick,
#[default]
Balanced,
Deep,
Paranoid,
}
impl Profile {
pub fn thinktool_chain(&self) -> Vec<&'static str> {
match self {
Profile::Quick => vec!["gigathink", "laserlogic"],
Profile::Balanced => vec!["gigathink", "laserlogic", "bedrock", "proofguard"],
Profile::Deep => vec![
"gigathink",
"laserlogic",
"bedrock",
"proofguard",
"brutalhonesty",
],
Profile::Paranoid => vec![
"gigathink",
"laserlogic",
"bedrock",
"proofguard",
"brutalhonesty",
"proofguard", ],
}
}
pub fn min_confidence(&self) -> f64 {
match self {
Profile::Quick => 0.70,
Profile::Balanced => 0.80,
Profile::Deep => 0.85,
Profile::Paranoid => 0.95,
}
}
pub fn token_budget(&self) -> u32 {
match self {
Profile::Quick => 3_000,
Profile::Balanced => 8_000,
Profile::Deep => 12_000,
Profile::Paranoid => 25_000,
}
}
pub fn parse_profile(s: &str) -> Option<Self> {
match s.to_lowercase().as_str() {
"quick" | "q" => Some(Profile::Quick),
"balanced" | "b" => Some(Profile::Balanced),
"deep" | "d" => Some(Profile::Deep),
"paranoid" | "p" => Some(Profile::Paranoid),
_ => None,
}
}
}
impl std::fmt::Display for Profile {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Profile::Quick => write!(f, "quick"),
Profile::Balanced => write!(f, "balanced"),
Profile::Deep => write!(f, "deep"),
Profile::Paranoid => write!(f, "paranoid"),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReasoningConfig {
#[serde(default)]
pub default_profile: Profile,
#[serde(default = "default_timeout")]
pub timeout: Duration,
#[serde(default = "default_true")]
pub enable_parallel: bool,
#[serde(default = "default_max_concurrent")]
pub max_concurrent: usize,
#[serde(default)]
pub enable_memory: bool,
#[serde(default = "default_memory_top_k")]
pub memory_top_k: usize,
#[serde(default = "default_memory_min_score")]
pub memory_min_score: f32,
#[serde(default = "default_true")]
pub enable_streaming: bool,
#[serde(default = "default_stream_buffer")]
pub stream_buffer_size: usize,
#[serde(default = "default_temperature")]
pub temperature: f64,
#[serde(default = "default_max_tokens")]
pub max_tokens: u32,
#[serde(default = "default_true")]
pub retry_on_failure: bool,
#[serde(default = "default_max_retries")]
pub max_retries: u32,
}
fn default_timeout() -> Duration {
Duration::from_secs(300) }
fn default_true() -> bool {
true
}
fn default_max_concurrent() -> usize {
4
}
fn default_memory_top_k() -> usize {
10
}
fn default_memory_min_score() -> f32 {
0.5
}
fn default_stream_buffer() -> usize {
32
}
fn default_temperature() -> f64 {
0.7
}
fn default_max_tokens() -> u32 {
2048
}
fn default_max_retries() -> u32 {
2
}
impl Default for ReasoningConfig {
fn default() -> Self {
Self {
default_profile: Profile::Balanced,
timeout: default_timeout(),
enable_parallel: true,
max_concurrent: default_max_concurrent(),
enable_memory: false,
memory_top_k: default_memory_top_k(),
memory_min_score: default_memory_min_score(),
enable_streaming: true,
stream_buffer_size: default_stream_buffer(),
temperature: default_temperature(),
max_tokens: default_max_tokens(),
retry_on_failure: true,
max_retries: default_max_retries(),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MemoryContext {
pub chunks: Vec<MemoryChunk>,
pub query: String,
pub retrieval_time_ms: u64,
pub used_raptor: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MemoryChunk {
pub id: Uuid,
pub doc_id: Uuid,
pub text: String,
pub score: f32,
pub source: Option<String>,
}
#[async_trait]
pub trait MemoryProvider: Send + Sync {
async fn query(&self, query: &str, top_k: usize, min_score: f32) -> Result<MemoryContext>;
async fn store_session(&self, session: &ReasoningSession) -> Result<()>;
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ThinkToolResult {
pub tool_id: String,
pub content: String,
pub confidence: f64,
pub duration_ms: u64,
pub tokens: TokenUsage,
pub structured: Option<serde_json::Value>,
pub notes: Vec<String>,
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct TokenUsage {
pub input_tokens: u32,
pub output_tokens: u32,
pub total_tokens: u32,
pub cost_usd: f64,
}
impl TokenUsage {
pub fn add(&mut self, other: &TokenUsage) {
self.input_tokens += other.input_tokens;
self.output_tokens += other.output_tokens;
self.total_tokens += other.total_tokens;
self.cost_usd += other.cost_usd;
}
}
#[async_trait]
pub trait ThinkToolExecutor: Send + Sync {
async fn execute(
&self,
tool_id: &str,
input: &str,
context: &ExecutionContext,
) -> Result<ThinkToolResult>;
fn available_tools(&self) -> Vec<&str>;
}
#[derive(Debug, Clone)]
pub struct ExecutionContext {
pub session_id: Uuid,
pub profile: Profile,
pub memory: Option<MemoryContext>,
pub previous_outputs: HashMap<String, ThinkToolResult>,
pub temperature: f64,
pub max_tokens: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum ReasoningEvent {
SessionStarted {
session_id: Uuid,
profile: Profile,
prompt: String,
},
MemoryQueried {
chunks_found: usize,
retrieval_time_ms: u64,
},
StepStarted {
step_index: usize,
total_steps: usize,
tool_id: String,
},
StepCompleted {
step_index: usize,
tool_id: String,
confidence: f64,
duration_ms: u64,
},
PartialOutput { tool_id: String, delta: String },
Warning { message: String },
DecisionReached {
confidence: f64,
total_duration_ms: u64,
},
Error { message: String },
SessionCompleted { success: bool },
}
pub struct StreamHandle {
receiver: broadcast::Receiver<ReasoningEvent>,
session_id: Uuid,
}
impl StreamHandle {
pub async fn next(&mut self) -> Option<ReasoningEvent> {
loop {
match self.receiver.recv().await {
Ok(event) => return Some(event),
Err(broadcast::error::RecvError::Closed) => return None,
Err(broadcast::error::RecvError::Lagged(_)) => {
continue;
}
}
}
}
pub fn session_id(&self) -> Uuid {
self.session_id
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum StepKind {
MemoryQuery,
ThinkTool { tool_id: String },
Synthesis,
Validation,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReasoningStep {
pub index: usize,
pub kind: StepKind,
pub input: String,
pub output: String,
pub confidence: f64,
pub duration_ms: u64,
pub tokens: TokenUsage,
pub success: bool,
pub error: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Decision {
pub id: Uuid,
pub session_id: Uuid,
pub prompt: String,
pub profile: Profile,
pub conclusion: String,
pub confidence: f64,
pub steps: Vec<ReasoningStep>,
pub total_tokens: TokenUsage,
pub total_duration_ms: u64,
pub memory_context: Option<MemoryContext>,
pub success: bool,
pub insights: Vec<String>,
pub caveats: Vec<String>,
pub timestamp: chrono::DateTime<chrono::Utc>,
}
impl Decision {
pub fn meets_threshold(&self) -> bool {
self.confidence >= self.profile.min_confidence()
}
pub fn summary(&self) -> String {
format!(
"[{}] {} (confidence: {:.0}%, {} steps, {}ms)",
self.profile,
if self.success { "SUCCESS" } else { "FAILED" },
self.confidence * 100.0,
self.steps.len(),
self.total_duration_ms
)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReasoningSession {
pub id: Uuid,
pub profile: Profile,
pub prompt: String,
pub current_step: usize,
pub steps: Vec<ReasoningStep>,
pub memory_context: Option<MemoryContext>,
pub total_tokens: TokenUsage,
pub started_at: chrono::DateTime<chrono::Utc>,
pub completed: bool,
pub decision: Option<Decision>,
}
impl ReasoningSession {
pub fn new(prompt: &str, profile: Profile) -> Self {
Self {
id: Uuid::new_v4(),
profile,
prompt: prompt.to_string(),
current_step: 0,
steps: Vec::new(),
memory_context: None,
total_tokens: TokenUsage::default(),
started_at: chrono::Utc::now(),
completed: false,
decision: None,
}
}
pub fn add_step(&mut self, step: ReasoningStep) {
self.total_tokens.add(&step.tokens);
self.steps.push(step);
self.current_step += 1;
}
pub fn current_confidence(&self) -> f64 {
if self.steps.is_empty() {
0.0
} else {
self.steps.iter().map(|s| s.confidence).sum::<f64>() / self.steps.len() as f64
}
}
pub fn complete(&mut self, conclusion: String, insights: Vec<String>, caveats: Vec<String>) {
let total_duration_ms = (chrono::Utc::now() - self.started_at).num_milliseconds() as u64;
self.decision = Some(Decision {
id: Uuid::new_v4(),
session_id: self.id,
prompt: self.prompt.clone(),
profile: self.profile,
conclusion,
confidence: self.current_confidence(),
steps: self.steps.clone(),
total_tokens: self.total_tokens.clone(),
total_duration_ms,
memory_context: self.memory_context.clone(),
success: true,
insights,
caveats,
timestamp: chrono::Utc::now(),
});
self.completed = true;
}
}
pub struct ReasoningLoopBuilder {
config: ReasoningConfig,
executor: Option<Arc<dyn ThinkToolExecutor>>,
memory: Option<Arc<dyn MemoryProvider>>,
}
impl ReasoningLoopBuilder {
pub fn new() -> Self {
Self {
config: ReasoningConfig::default(),
executor: None,
memory: None,
}
}
pub fn with_profile(mut self, profile: Profile) -> Self {
self.config.default_profile = profile;
self
}
pub fn with_timeout(mut self, timeout: Duration) -> Self {
self.config.timeout = timeout;
self
}
pub fn with_parallel(mut self, enabled: bool, max_concurrent: usize) -> Self {
self.config.enable_parallel = enabled;
self.config.max_concurrent = max_concurrent;
self
}
pub fn with_executor(mut self, executor: Arc<dyn ThinkToolExecutor>) -> Self {
self.executor = Some(executor);
self
}
pub fn with_memory(mut self, memory: Arc<dyn MemoryProvider>) -> Self {
self.memory = Some(memory);
self.config.enable_memory = true;
self
}
pub fn with_memory_config(mut self, top_k: usize, min_score: f32) -> Self {
self.config.memory_top_k = top_k;
self.config.memory_min_score = min_score;
self
}
pub fn with_streaming(mut self, enabled: bool, buffer_size: usize) -> Self {
self.config.enable_streaming = enabled;
self.config.stream_buffer_size = buffer_size;
self
}
pub fn with_llm_params(mut self, temperature: f64, max_tokens: u32) -> Self {
self.config.temperature = temperature;
self.config.max_tokens = max_tokens;
self
}
pub fn with_retries(mut self, enabled: bool, max_retries: u32) -> Self {
self.config.retry_on_failure = enabled;
self.config.max_retries = max_retries;
self
}
pub fn with_config(mut self, config: ReasoningConfig) -> Self {
self.config = config;
self
}
pub fn build(self) -> Result<ReasoningLoop> {
let executor = self
.executor
.ok_or_else(|| ReasoningError::Config("ThinkTool executor required".into()))?;
Ok(ReasoningLoop {
config: self.config,
executor,
memory: self.memory,
active_sessions: Arc::new(RwLock::new(HashMap::new())),
_event_sender: None,
})
}
}
impl Default for ReasoningLoopBuilder {
fn default() -> Self {
Self::new()
}
}
pub struct ReasoningLoop {
config: ReasoningConfig,
executor: Arc<dyn ThinkToolExecutor>,
memory: Option<Arc<dyn MemoryProvider>>,
active_sessions: Arc<RwLock<HashMap<Uuid, ReasoningSession>>>,
_event_sender: Option<broadcast::Sender<ReasoningEvent>>,
}
impl ReasoningLoop {
pub fn builder() -> ReasoningLoopBuilder {
ReasoningLoopBuilder::new()
}
pub fn config(&self) -> &ReasoningConfig {
&self.config
}
pub async fn reason_stream(&self, prompt: &str) -> Result<(StreamHandle, Decision)> {
self.reason_stream_with_profile(prompt, self.config.default_profile)
.await
}
pub async fn reason_stream_with_profile(
&self,
prompt: &str,
profile: Profile,
) -> Result<(StreamHandle, Decision)> {
let (tx, rx) = broadcast::channel(self.config.stream_buffer_size);
let session = ReasoningSession::new(prompt, profile);
let session_id = session.id;
{
let mut sessions = self.active_sessions.write().await;
sessions.insert(session_id, session.clone());
}
let _ = tx.send(ReasoningEvent::SessionStarted {
session_id,
profile,
prompt: prompt.to_string(),
});
let decision = self.execute_loop(session, Some(&tx)).await?;
let _ = tx.send(ReasoningEvent::SessionCompleted {
success: decision.success,
});
{
let mut sessions = self.active_sessions.write().await;
sessions.remove(&session_id);
}
Ok((
StreamHandle {
receiver: rx,
session_id,
},
decision,
))
}
pub async fn reason(&self, prompt: &str) -> Result<Decision> {
self.reason_with_profile(prompt, self.config.default_profile)
.await
}
pub async fn reason_with_profile(&self, prompt: &str, profile: Profile) -> Result<Decision> {
let session = ReasoningSession::new(prompt, profile);
self.execute_loop(session, None).await
}
async fn execute_loop(
&self,
mut session: ReasoningSession,
event_tx: Option<&broadcast::Sender<ReasoningEvent>>,
) -> Result<Decision> {
let start = Instant::now();
let profile = session.profile;
if self.config.enable_memory {
if let Some(ref memory) = self.memory {
let mem_start = Instant::now();
match memory
.query(
&session.prompt,
self.config.memory_top_k,
self.config.memory_min_score,
)
.await
{
Ok(context) => {
let retrieval_time = mem_start.elapsed().as_millis() as u64;
if let Some(tx) = event_tx {
let _ = tx.send(ReasoningEvent::MemoryQueried {
chunks_found: context.chunks.len(),
retrieval_time_ms: retrieval_time,
});
}
session.memory_context = Some(context);
}
Err(e) => {
if let Some(tx) = event_tx {
let _ = tx.send(ReasoningEvent::Warning {
message: format!("Memory query failed: {}", e),
});
}
}
}
}
}
let tools = profile.thinktool_chain();
let total_steps = tools.len();
let mut previous_outputs: HashMap<String, ThinkToolResult> = HashMap::new();
for (step_idx, tool_id) in tools.iter().enumerate() {
if start.elapsed() > self.config.timeout {
return Err(ReasoningError::Timeout(self.config.timeout));
}
if let Some(tx) = event_tx {
let _ = tx.send(ReasoningEvent::StepStarted {
step_index: step_idx,
total_steps,
tool_id: tool_id.to_string(),
});
}
let input = self.build_step_input(&session, &previous_outputs, step_idx);
let context = ExecutionContext {
session_id: session.id,
profile,
memory: session.memory_context.clone(),
previous_outputs: previous_outputs.clone(),
temperature: self.config.temperature,
max_tokens: self.config.max_tokens,
};
let result = self.execute_with_retry(tool_id, &input, &context).await?;
let step = ReasoningStep {
index: step_idx,
kind: StepKind::ThinkTool {
tool_id: tool_id.to_string(),
},
input: input.clone(),
output: result.content.clone(),
confidence: result.confidence,
duration_ms: result.duration_ms,
tokens: result.tokens.clone(),
success: true,
error: None,
};
if let Some(tx) = event_tx {
let _ = tx.send(ReasoningEvent::StepCompleted {
step_index: step_idx,
tool_id: tool_id.to_string(),
confidence: result.confidence,
duration_ms: result.duration_ms,
});
}
previous_outputs.insert(tool_id.to_string(), result);
session.add_step(step);
}
let (conclusion, insights, caveats) = self.synthesize_decision(&session, &previous_outputs);
session.complete(conclusion, insights, caveats);
let decision = session
.decision
.clone()
.expect("decision should be set after complete");
if let Some(tx) = event_tx {
let _ = tx.send(ReasoningEvent::DecisionReached {
confidence: decision.confidence,
total_duration_ms: decision.total_duration_ms,
});
}
if self.config.enable_memory {
if let Some(ref memory) = self.memory {
let _ = memory.store_session(&session).await;
}
}
Ok(decision)
}
fn build_step_input(
&self,
session: &ReasoningSession,
previous_outputs: &HashMap<String, ThinkToolResult>,
step_idx: usize,
) -> String {
let mut input = session.prompt.clone();
if let Some(ref memory) = session.memory_context {
if !memory.chunks.is_empty() {
input.push_str("\n\n--- RELEVANT CONTEXT ---\n");
for chunk in memory.chunks.iter().take(3) {
input.push_str(&format!("- {}\n", chunk.text));
}
}
}
if step_idx > 0 {
input.push_str("\n\n--- PREVIOUS ANALYSIS ---\n");
for (tool_id, result) in previous_outputs {
let content = if result.content.len() > 500 {
format!("{}...", &result.content[..500])
} else {
result.content.clone()
};
input.push_str(&format!(
"[{}] (confidence: {:.0}%)\n{}\n\n",
tool_id,
result.confidence * 100.0,
content
));
}
}
input
}
async fn execute_with_retry(
&self,
tool_id: &str,
input: &str,
context: &ExecutionContext,
) -> Result<ThinkToolResult> {
let mut last_error = None;
for attempt in 0..=self.config.max_retries {
match self.executor.execute(tool_id, input, context).await {
Ok(result) => return Ok(result),
Err(e) => {
if !self.config.retry_on_failure || attempt == self.config.max_retries {
return Err(e);
}
last_error = Some(e);
let delay = Duration::from_millis(100 * 2u64.pow(attempt));
tokio::time::sleep(delay).await;
}
}
}
Err(
last_error.unwrap_or_else(|| ReasoningError::ThinkToolFailed {
tool: tool_id.to_string(),
message: "Unknown error".into(),
}),
)
}
fn synthesize_decision(
&self,
session: &ReasoningSession,
outputs: &HashMap<String, ThinkToolResult>,
) -> (String, Vec<String>, Vec<String>) {
let conclusion = outputs
.values()
.last()
.map(|r| r.content.clone())
.unwrap_or_else(|| "No conclusion reached".to_string());
let mut insights = Vec::new();
if let Some(gt) = outputs.get("gigathink") {
if let Some(structured) = >.structured {
if let Some(perspectives) = structured.get("perspectives") {
if let Some(arr) = perspectives.as_array() {
for p in arr.iter().take(3) {
if let Some(s) = p.as_str() {
insights.push(s.to_string());
}
}
}
}
}
}
let mut caveats = Vec::new();
if let Some(bh) = outputs.get("brutalhonesty") {
if bh.content.to_lowercase().contains("caveat")
|| bh.content.to_lowercase().contains("limitation")
{
caveats.push("See BrutalHonesty analysis for detailed limitations".to_string());
}
}
let confidence = session.current_confidence();
if confidence < session.profile.min_confidence() {
caveats.push(format!(
"Confidence ({:.0}%) below target ({:.0}%)",
confidence * 100.0,
session.profile.min_confidence() * 100.0
));
}
(conclusion, insights, caveats)
}
pub async fn get_session(&self, session_id: Uuid) -> Option<ReasoningSession> {
let sessions = self.active_sessions.read().await;
sessions.get(&session_id).cloned()
}
pub async fn cancel_session(&self, session_id: Uuid) -> Result<()> {
let mut sessions = self.active_sessions.write().await;
if sessions.remove(&session_id).is_some() {
Ok(())
} else {
Err(ReasoningError::Config(format!(
"Session {} not found",
session_id
)))
}
}
pub async fn active_session_count(&self) -> usize {
let sessions = self.active_sessions.read().await;
sessions.len()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_profile_chains() {
assert_eq!(Profile::Quick.thinktool_chain().len(), 2);
assert_eq!(Profile::Balanced.thinktool_chain().len(), 4);
assert_eq!(Profile::Deep.thinktool_chain().len(), 5);
assert_eq!(Profile::Paranoid.thinktool_chain().len(), 6);
}
#[test]
fn test_profile_confidence() {
assert_eq!(Profile::Quick.min_confidence(), 0.70);
assert_eq!(Profile::Balanced.min_confidence(), 0.80);
assert_eq!(Profile::Deep.min_confidence(), 0.85);
assert_eq!(Profile::Paranoid.min_confidence(), 0.95);
}
#[test]
fn test_profile_from_str() {
assert_eq!(Profile::parse_profile("quick"), Some(Profile::Quick));
assert_eq!(Profile::parse_profile("Q"), Some(Profile::Quick));
assert_eq!(Profile::parse_profile("balanced"), Some(Profile::Balanced));
assert_eq!(Profile::parse_profile("PARANOID"), Some(Profile::Paranoid));
assert_eq!(Profile::parse_profile("invalid"), None);
}
#[test]
fn test_config_defaults() {
let config = ReasoningConfig::default();
assert_eq!(config.default_profile, Profile::Balanced);
assert!(config.enable_parallel);
assert_eq!(config.max_concurrent, 4);
assert!(!config.enable_memory);
}
#[test]
fn test_session_creation() {
let session = ReasoningSession::new("Test prompt", Profile::Balanced);
assert!(!session.completed);
assert_eq!(session.current_step, 0);
assert!(session.steps.is_empty());
}
#[test]
fn test_session_confidence() {
let mut session = ReasoningSession::new("Test", Profile::Balanced);
assert_eq!(session.current_confidence(), 0.0);
session.add_step(ReasoningStep {
index: 0,
kind: StepKind::ThinkTool {
tool_id: "gigathink".into(),
},
input: "test".into(),
output: "output".into(),
confidence: 0.8,
duration_ms: 100,
tokens: TokenUsage::default(),
success: true,
error: None,
});
session.add_step(ReasoningStep {
index: 1,
kind: StepKind::ThinkTool {
tool_id: "laserlogic".into(),
},
input: "test".into(),
output: "output".into(),
confidence: 0.9,
duration_ms: 100,
tokens: TokenUsage::default(),
success: true,
error: None,
});
assert!((session.current_confidence() - 0.85).abs() < 1e-9);
}
#[test]
fn test_token_usage_add() {
let mut total = TokenUsage {
input_tokens: 100,
output_tokens: 50,
total_tokens: 150,
cost_usd: 0.001,
};
let step = TokenUsage {
input_tokens: 200,
output_tokens: 100,
total_tokens: 300,
cost_usd: 0.002,
};
total.add(&step);
assert_eq!(total.input_tokens, 300);
assert_eq!(total.output_tokens, 150);
assert_eq!(total.total_tokens, 450);
assert!((total.cost_usd - 0.003).abs() < 0.0001);
}
#[test]
fn test_decision_meets_threshold() {
let decision = Decision {
id: Uuid::new_v4(),
session_id: Uuid::new_v4(),
prompt: "test".into(),
profile: Profile::Balanced,
conclusion: "test conclusion".into(),
confidence: 0.85,
steps: vec![],
total_tokens: TokenUsage::default(),
total_duration_ms: 1000,
memory_context: None,
success: true,
insights: vec![],
caveats: vec![],
timestamp: chrono::Utc::now(),
};
assert!(decision.meets_threshold());
let low_confidence = Decision {
confidence: 0.75,
..decision.clone()
};
assert!(!low_confidence.meets_threshold()); }
}