agent_core/controller/session/
config.rs

1// Session configuration types
2
3use super::compactor::{LLMCompactorConfig, ToolCompaction};
4
5/// LLM provider type
6#[derive(Debug, Clone, PartialEq)]
7pub enum LLMProvider {
8    Anthropic,
9    OpenAI,
10}
11
12/// Configuration for conversation compaction
13#[derive(Debug, Clone)]
14pub struct CompactionConfig {
15    /// Context utilization threshold (0.0-1.0) that triggers compaction.
16    /// For example, 0.75 means compact when 75% of context is used.
17    pub threshold: f64,
18    /// Number of recent turns to preserve during compaction.
19    pub keep_recent_turns: usize,
20    /// Strategy for handling old tool results.
21    pub tool_compaction: ToolCompaction,
22}
23
24impl Default for CompactionConfig {
25    fn default() -> Self {
26        Self {
27            threshold: 0.75,
28            keep_recent_turns: 5,
29            tool_compaction: ToolCompaction::Summarize,
30        }
31    }
32}
33
34/// Type of compaction strategy to use.
35#[derive(Debug, Clone)]
36pub enum CompactorType {
37    /// Simple threshold-based compaction that summarizes/redacts old tool results.
38    Threshold(CompactionConfig),
39    /// LLM-based conversation summarization that uses an LLM to create intelligent summaries.
40    LLM(LLMCompactorConfig),
41}
42
43impl Default for CompactorType {
44    fn default() -> Self {
45        CompactorType::Threshold(CompactionConfig::default())
46    }
47}
48
49/// Configuration for creating an LLM session
50#[derive(Debug, Clone)]
51pub struct LLMSessionConfig {
52    /// The LLM provider to use
53    pub provider: LLMProvider,
54    /// API key for the provider
55    pub api_key: String,
56    /// Model to use (e.g., "claude-3-sonnet-20240229", "gpt-4")
57    pub model: String,
58    /// Default maximum tokens for responses
59    pub max_tokens: Option<u32>,
60    /// Default system prompt
61    pub system_prompt: Option<String>,
62    /// Default temperature
63    pub temperature: Option<f32>,
64    /// Enable streaming responses
65    pub streaming: bool,
66    /// Model's context window size (for compaction decisions)
67    pub context_limit: i32,
68    /// Compaction configuration (None to disable compaction)
69    pub compaction: Option<CompactorType>,
70}
71
72impl LLMSessionConfig {
73    /// Creates a new Anthropic session config
74    pub fn anthropic(api_key: impl Into<String>, model: impl Into<String>) -> Self {
75        Self {
76            provider: LLMProvider::Anthropic,
77            api_key: api_key.into(),
78            model: model.into(),
79            max_tokens: Some(4096),
80            system_prompt: None,
81            temperature: None,
82            streaming: true,
83            context_limit: 200_000, // Claude default
84            compaction: Some(CompactorType::default()),
85        }
86    }
87
88    /// Creates a new OpenAI session config
89    pub fn openai(api_key: impl Into<String>, model: impl Into<String>) -> Self {
90        Self {
91            provider: LLMProvider::OpenAI,
92            api_key: api_key.into(),
93            model: model.into(),
94            max_tokens: Some(4096),
95            system_prompt: None,
96            temperature: None,
97            streaming: false, // OpenAI streaming not yet implemented
98            context_limit: 128_000, // GPT-4 default
99            compaction: Some(CompactorType::default()),
100        }
101    }
102
103    /// Enable or disable streaming
104    pub fn with_streaming(mut self, streaming: bool) -> Self {
105        self.streaming = streaming;
106        self
107    }
108
109    /// Sets the default max tokens
110    pub fn with_max_tokens(mut self, max_tokens: u32) -> Self {
111        self.max_tokens = Some(max_tokens);
112        self
113    }
114
115    /// Sets the default system prompt
116    pub fn with_system_prompt(mut self, prompt: impl Into<String>) -> Self {
117        self.system_prompt = Some(prompt.into());
118        self
119    }
120
121    /// Sets the default temperature
122    pub fn with_temperature(mut self, temperature: f32) -> Self {
123        self.temperature = Some(temperature);
124        self
125    }
126
127    /// Sets the model's context window size
128    pub fn with_context_limit(mut self, context_limit: i32) -> Self {
129        self.context_limit = context_limit;
130        self
131    }
132
133    /// Enables threshold compaction with custom configuration
134    pub fn with_threshold_compaction(mut self, config: CompactionConfig) -> Self {
135        self.compaction = Some(CompactorType::Threshold(config));
136        self
137    }
138
139    /// Enables LLM-based compaction with custom configuration
140    pub fn with_llm_compaction(mut self, config: LLMCompactorConfig) -> Self {
141        self.compaction = Some(CompactorType::LLM(config));
142        self
143    }
144
145    /// Enables compaction with the specified compactor type
146    pub fn with_compaction(mut self, compactor_type: CompactorType) -> Self {
147        self.compaction = Some(compactor_type);
148        self
149    }
150
151    /// Disables compaction
152    pub fn without_compaction(mut self) -> Self {
153        self.compaction = None;
154        self
155    }
156}
157
158#[cfg(test)]
159mod tests {
160    use super::*;
161
162    #[test]
163    fn test_anthropic_config() {
164        let config = LLMSessionConfig::anthropic("test-key", "claude-3-sonnet")
165            .with_max_tokens(2048)
166            .with_system_prompt("You are helpful.");
167
168        assert_eq!(config.provider, LLMProvider::Anthropic);
169        assert_eq!(config.api_key, "test-key");
170        assert_eq!(config.model, "claude-3-sonnet");
171        assert_eq!(config.max_tokens, Some(2048));
172        assert_eq!(config.system_prompt, Some("You are helpful.".to_string()));
173    }
174
175    #[test]
176    fn test_openai_config() {
177        let config = LLMSessionConfig::openai("test-key", "gpt-4");
178
179        assert_eq!(config.provider, LLMProvider::OpenAI);
180        assert_eq!(config.model, "gpt-4");
181    }
182}