use anyhow::Result;
use async_trait::async_trait;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct OrchestrationConfig {
pub default_provider: LlmProvider,
pub max_tokens: usize,
pub temperature: f32,
pub streaming: bool,
pub timeout_secs: u64,
}
impl Default for OrchestrationConfig {
fn default() -> Self {
Self {
default_provider: LlmProvider::OpenAI,
max_tokens: 4096,
temperature: 0.7,
streaming: true,
timeout_secs: 120,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum LlmProvider {
OpenAI,
Anthropic,
Ollama,
DeepSeek,
Groq,
Custom,
}
#[async_trait]
pub trait ReasoningAgent: Send + Sync {
async fn reason(
&self,
prompt: &str,
context: Option<&ReasoningContext>,
) -> Result<ReasoningResponse>;
async fn reason_stream(
&self,
prompt: &str,
context: Option<&ReasoningContext>,
) -> Result<ReasoningStream>;
fn provider(&self) -> LlmProvider;
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ReasoningContext {
pub documents: Vec<String>,
pub thinktool_profile: Option<String>,
pub history: Vec<ReasoningStep>,
pub metadata: std::collections::HashMap<String, serde_json::Value>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReasoningStep {
pub tool: String,
pub input: String,
pub output: String,
pub confidence: f32,
pub timestamp: chrono::DateTime<chrono::Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReasoningResponse {
pub content: String,
pub model: String,
pub provider: LlmProvider,
pub tokens_used: TokenUsage,
pub reasoning_steps: Vec<ReasoningStep>,
pub confidence: f32,
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct TokenUsage {
pub prompt_tokens: usize,
pub completion_tokens: usize,
pub total_tokens: usize,
}
pub struct ReasoningStream {
_inner: (),
}
pub struct PipelineBuilder {
config: OrchestrationConfig,
tools: Vec<String>,
rag_enabled: bool,
}
impl PipelineBuilder {
pub fn new() -> Self {
Self {
config: OrchestrationConfig::default(),
tools: Vec::new(),
rag_enabled: false,
}
}
pub fn with_config(mut self, config: OrchestrationConfig) -> Self {
self.config = config;
self
}
pub fn with_thinktool(mut self, tool: &str) -> Self {
self.tools.push(tool.to_string());
self
}
pub fn with_rag(mut self) -> Self {
self.rag_enabled = true;
self
}
pub fn build(self) -> Result<ReasoningPipeline> {
Ok(ReasoningPipeline {
config: self.config,
tools: self.tools,
rag_enabled: self.rag_enabled,
})
}
}
impl Default for PipelineBuilder {
fn default() -> Self {
Self::new()
}
}
pub struct ReasoningPipeline {
config: OrchestrationConfig,
tools: Vec<String>,
rag_enabled: bool,
}
impl ReasoningPipeline {
pub async fn execute(&self, input: &str) -> Result<ReasoningResponse> {
tracing::info!(
provider = ?self.config.default_provider,
tools = ?self.tools,
rag = self.rag_enabled,
"Executing reasoning pipeline"
);
Ok(ReasoningResponse {
content: format!("Pipeline executed for: {}", input),
model: "gpt-4".to_string(),
provider: self.config.default_provider,
tokens_used: TokenUsage::default(),
reasoning_steps: Vec::new(),
confidence: 0.85,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_config_default() {
let config = OrchestrationConfig::default();
assert_eq!(config.max_tokens, 4096);
assert_eq!(config.default_provider, LlmProvider::OpenAI);
}
#[test]
fn test_pipeline_builder() {
let pipeline = PipelineBuilder::new()
.with_thinktool("GigaThink")
.with_thinktool("LaserLogic")
.with_rag()
.build()
.unwrap();
assert!(pipeline.rag_enabled);
assert_eq!(pipeline.tools.len(), 2);
}
}