Skip to main content

noether_engine/llm/
mod.rs

1pub mod anthropic;
2pub mod cli_provider;
3pub mod mistral;
4pub mod openai;
5pub mod vertex;
6
7// Back-compat re-export: phase 5 shipped with a `claude_cli` module
8// that has since been generalised into `cli_provider`. Keep the old
9// name working so any downstream import still compiles until the
10// broader consolidation lands (see docs/research/llm-here.md).
11#[deprecated(note = "use crate::llm::cli_provider instead")]
12pub use cli_provider as claude_cli;
13
14use serde::{Deserialize, Serialize};
15
16#[derive(Debug, thiserror::Error)]
17pub enum LlmError {
18    #[error("LLM provider error: {0}")]
19    Provider(String),
20    #[error("HTTP error: {0}")]
21    Http(String),
22    #[error("response parse error: {0}")]
23    Parse(String),
24}
25
26#[derive(Debug, Clone, Serialize, Deserialize)]
27pub enum Role {
28    System,
29    User,
30    Assistant,
31}
32
33#[derive(Debug, Clone, Serialize, Deserialize)]
34pub struct Message {
35    pub role: Role,
36    pub content: String,
37}
38
39impl Message {
40    pub fn system(content: impl Into<String>) -> Self {
41        Self {
42            role: Role::System,
43            content: content.into(),
44        }
45    }
46
47    pub fn user(content: impl Into<String>) -> Self {
48        Self {
49            role: Role::User,
50            content: content.into(),
51        }
52    }
53
54    pub fn assistant(content: impl Into<String>) -> Self {
55        Self {
56            role: Role::Assistant,
57            content: content.into(),
58        }
59    }
60}
61
62#[derive(Debug, Clone)]
63pub struct LlmConfig {
64    pub model: String,
65    pub max_tokens: u32,
66    pub temperature: f32,
67}
68
69impl Default for LlmConfig {
70    fn default() -> Self {
71        Self {
72            // mistral-small-2503: fastest + cheapest on europe-west4 ($0.05/1K calls).
73            // Override with VERTEX_AI_MODEL=gemini-2.5-flash or =mistral-medium-3, etc.
74            model: std::env::var("VERTEX_AI_MODEL").unwrap_or_else(|_| "mistral-small-2503".into()),
75            max_tokens: 8192,
76            temperature: 0.2,
77        }
78    }
79}
80
81/// Trait for LLM text completion.
82pub trait LlmProvider: Send + Sync {
83    fn complete(&self, messages: &[Message], config: &LlmConfig) -> Result<String, LlmError>;
84}
85
86/// Mock LLM provider for testing.
87/// Returns the pre-configured response regardless of input.
88pub struct MockLlmProvider {
89    response: String,
90}
91
92impl MockLlmProvider {
93    pub fn new(response: impl Into<String>) -> Self {
94        Self {
95            response: response.into(),
96        }
97    }
98}
99
100impl LlmProvider for MockLlmProvider {
101    fn complete(&self, _messages: &[Message], _config: &LlmConfig) -> Result<String, LlmError> {
102        Ok(self.response.clone())
103    }
104}
105
106/// Mock LLM provider that returns responses from a queue.
107/// When the queue is exhausted, returns the fallback response.
108/// Useful for testing multi-step flows like synthesis (compose → codegen → recompose).
109pub struct SequenceMockLlmProvider {
110    responses: std::sync::Mutex<std::collections::VecDeque<String>>,
111    fallback: String,
112}
113
114impl SequenceMockLlmProvider {
115    pub fn new(responses: Vec<impl Into<String>>, fallback: impl Into<String>) -> Self {
116        Self {
117            responses: std::sync::Mutex::new(responses.into_iter().map(|s| s.into()).collect()),
118            fallback: fallback.into(),
119        }
120    }
121}
122
123impl LlmProvider for SequenceMockLlmProvider {
124    fn complete(&self, _messages: &[Message], _config: &LlmConfig) -> Result<String, LlmError> {
125        let mut queue = self.responses.lock().unwrap();
126        Ok(queue.pop_front().unwrap_or_else(|| self.fallback.clone()))
127    }
128}
129
130#[cfg(test)]
131mod tests {
132    use super::*;
133
134    #[test]
135    fn mock_returns_configured_response() {
136        let provider = MockLlmProvider::new("hello world");
137        let result = provider
138            .complete(&[Message::user("test")], &LlmConfig::default())
139            .unwrap();
140        assert_eq!(result, "hello world");
141    }
142
143    #[test]
144    fn message_constructors() {
145        let sys = Message::system("sys");
146        assert!(matches!(sys.role, Role::System));
147        let usr = Message::user("usr");
148        assert!(matches!(usr.role, Role::User));
149        let ast = Message::assistant("ast");
150        assert!(matches!(ast.role, Role::Assistant));
151    }
152}