1use serde::{Deserialize, Serialize};
38use std::fmt;
39
40#[derive(Debug, Clone, Serialize)]
46pub struct LlmRequest {
47 pub prompt: String,
49 pub system: Option<String>,
51 pub max_tokens: u32,
53 pub temperature: f64,
55 pub stop_sequences: Vec<String>,
57}
58
59impl LlmRequest {
60 #[must_use]
62 pub fn new(prompt: impl Into<String>) -> Self {
63 Self {
64 prompt: prompt.into(),
65 system: None,
66 max_tokens: 1024,
67 temperature: 0.7,
68 stop_sequences: Vec::new(),
69 }
70 }
71
72 #[must_use]
74 pub fn with_system(mut self, system: impl Into<String>) -> Self {
75 self.system = Some(system.into());
76 self
77 }
78
79 #[must_use]
81 pub fn with_max_tokens(mut self, max_tokens: u32) -> Self {
82 self.max_tokens = max_tokens;
83 self
84 }
85
86 #[must_use]
88 pub fn with_temperature(mut self, temperature: f64) -> Self {
89 self.temperature = temperature;
90 self
91 }
92
93 #[must_use]
95 pub fn with_stop_sequence(mut self, stop: impl Into<String>) -> Self {
96 self.stop_sequences.push(stop.into());
97 self
98 }
99}
100
101#[derive(Debug, Clone, Deserialize)]
103pub struct LlmResponse {
104 pub content: String,
106 pub model: String,
108 pub usage: TokenUsage,
110 pub finish_reason: FinishReason,
112}
113
114#[derive(Debug, Clone, Default, Deserialize)]
116pub struct TokenUsage {
117 pub prompt_tokens: u32,
119 pub completion_tokens: u32,
121 pub total_tokens: u32,
123}
124
125#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)]
127#[serde(rename_all = "snake_case")]
128pub enum FinishReason {
129 Stop,
131 MaxTokens,
133 StopSequence,
135 ContentFilter,
137}
138
139#[derive(Debug, Clone)]
141pub struct LlmError {
142 pub kind: LlmErrorKind,
144 pub message: String,
146 pub retryable: bool,
148}
149
150impl fmt::Display for LlmError {
151 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
152 write!(f, "{:?}: {}", self.kind, self.message)
153 }
154}
155
156impl std::error::Error for LlmError {}
157
158#[derive(Debug, Clone, Copy, PartialEq, Eq)]
160pub enum LlmErrorKind {
161 Authentication,
163 RateLimit,
165 InvalidRequest,
167 ModelNotFound,
169 Network,
171 ProviderError,
173 ParseError,
175 Timeout,
177}
178
179impl LlmError {
180 #[must_use]
182 pub fn auth(message: impl Into<String>) -> Self {
183 Self {
184 kind: LlmErrorKind::Authentication,
185 message: message.into(),
186 retryable: false,
187 }
188 }
189
190 #[must_use]
192 pub fn rate_limit(message: impl Into<String>) -> Self {
193 Self {
194 kind: LlmErrorKind::RateLimit,
195 message: message.into(),
196 retryable: true,
197 }
198 }
199
200 #[must_use]
202 pub fn network(message: impl Into<String>) -> Self {
203 Self {
204 kind: LlmErrorKind::Network,
205 message: message.into(),
206 retryable: true,
207 }
208 }
209
210 #[must_use]
212 pub fn parse(message: impl Into<String>) -> Self {
213 Self {
214 kind: LlmErrorKind::ParseError,
215 message: message.into(),
216 retryable: false,
217 }
218 }
219
220 #[must_use]
222 pub fn provider(message: impl Into<String>) -> Self {
223 Self {
224 kind: LlmErrorKind::ProviderError,
225 message: message.into(),
226 retryable: false,
227 }
228 }
229}
230
231#[deprecated(
250 since = "0.2.0",
251 note = "Use converge_core::traits::{ChatBackend, EmbedBackend, LlmBackend} instead. See BOUNDARY.md for migration."
252)]
253pub trait LlmProvider: Send + Sync {
254 fn name(&self) -> &str;
256
257 fn model(&self) -> &str;
259
260 fn complete(&self, request: &LlmRequest) -> Result<LlmResponse, LlmError>;
266
267 fn provenance(&self, request_id: &str) -> String {
269 format!("{}:{}", self.model(), request_id)
270 }
271}
272
273#[derive(Debug, Clone)]
279pub struct MockResponse {
280 pub content: String,
282 pub confidence: f64,
284 pub success: bool,
286 pub error: Option<LlmError>,
288}
289
290impl MockResponse {
291 #[must_use]
293 pub fn success(content: impl Into<String>, confidence: f64) -> Self {
294 Self {
295 content: content.into(),
296 confidence,
297 success: true,
298 error: None,
299 }
300 }
301
302 #[must_use]
304 pub fn failure(error: LlmError) -> Self {
305 Self {
306 content: String::new(),
307 confidence: 0.0,
308 success: false,
309 error: Some(error),
310 }
311 }
312}
313
314pub struct MockProvider {
318 model: String,
319 responses: std::sync::Mutex<Vec<MockResponse>>,
320 call_count: std::sync::atomic::AtomicUsize,
321}
322
323impl MockProvider {
324 #[must_use]
326 pub fn new(responses: Vec<MockResponse>) -> Self {
327 Self {
328 model: "mock-model".into(),
329 responses: std::sync::Mutex::new(responses),
330 call_count: std::sync::atomic::AtomicUsize::new(0),
331 }
332 }
333
334 #[must_use]
336 pub fn constant(content: impl Into<String>, confidence: f64) -> Self {
337 let content = content.into();
338 let responses = (0..100)
340 .map(|_| MockResponse::success(content.clone(), confidence))
341 .collect();
342 Self::new(responses)
343 }
344
345 #[must_use]
347 pub fn call_count(&self) -> usize {
348 self.call_count.load(std::sync::atomic::Ordering::SeqCst)
349 }
350}
351
352impl LlmProvider for MockProvider {
353 fn name(&self) -> &'static str {
354 "mock"
355 }
356
357 fn model(&self) -> &str {
358 &self.model
359 }
360
361 fn complete(&self, _request: &LlmRequest) -> Result<LlmResponse, LlmError> {
362 self.call_count
363 .fetch_add(1, std::sync::atomic::Ordering::SeqCst);
364
365 let mut responses = self.responses.lock().expect("MockProvider mutex poisoned");
366
367 if responses.is_empty() {
368 return Err(LlmError::provider("MockProvider: no more responses"));
369 }
370
371 let response = responses.remove(0);
372
373 if let Some(error) = response.error {
374 return Err(error);
375 }
376
377 Ok(LlmResponse {
378 content: response.content,
379 model: self.model.clone(),
380 usage: TokenUsage {
381 prompt_tokens: 10,
382 completion_tokens: 20,
383 total_tokens: 30,
384 },
385 finish_reason: FinishReason::Stop,
386 })
387 }
388}
389
390use crate::agent::Agent;
395use crate::context::{Context, ContextKey, ProposedFact};
396use crate::effect::AgentEffect;
397use crate::validation::encode_proposal;
398use std::sync::Arc;
399
400#[derive(Clone)]
402pub struct LlmAgentConfig {
403 pub system_prompt: String,
405 pub prompt_template: String,
408 pub prompt_format: crate::prompt::PromptFormat,
410 pub target_key: ContextKey,
412 pub dependencies: Vec<ContextKey>,
414 pub default_confidence: f64,
416 pub max_tokens: u32,
418 pub temperature: f64,
420 pub requirements: Option<crate::model_selection::AgentRequirements>,
424}
425
426impl Default for LlmAgentConfig {
427 fn default() -> Self {
428 Self {
429 system_prompt: String::new(),
430 prompt_template: "{context}".into(),
431 prompt_format: crate::prompt::PromptFormat::Edn,
432 target_key: ContextKey::Hypotheses,
433 dependencies: vec![ContextKey::Seeds],
434 default_confidence: 0.7,
435 max_tokens: 1024,
436 temperature: 0.7,
437 requirements: None,
438 }
439 }
440}
441
442pub trait ResponseParser: Send + Sync {
444 fn parse(&self, response: &LlmResponse, target_key: ContextKey) -> Vec<ProposedFact>;
446}
447
448pub struct SimpleParser {
450 pub id_prefix: String,
452 pub confidence: f64,
454}
455
456impl Default for SimpleParser {
457 fn default() -> Self {
458 Self {
459 id_prefix: "llm".into(),
460 confidence: 0.7,
461 }
462 }
463}
464
465impl ResponseParser for SimpleParser {
466 fn parse(&self, response: &LlmResponse, target_key: ContextKey) -> Vec<ProposedFact> {
467 let content = response.content.trim();
468 if content.is_empty() {
469 return Vec::new();
470 }
471
472 let id = format!("{}-{}", self.id_prefix, uuid_v4_simple());
473
474 vec![ProposedFact {
475 key: target_key,
476 id,
477 content: content.to_string(),
478 confidence: self.confidence,
479 provenance: response.model.clone(),
480 }]
481 }
482}
483
484pub struct MultiLineParser {
486 pub id_prefix: String,
488 pub delimiter: String,
490 pub confidence: f64,
492}
493
494impl MultiLineParser {
495 #[must_use]
497 pub fn newline(id_prefix: impl Into<String>, confidence: f64) -> Self {
498 Self {
499 id_prefix: id_prefix.into(),
500 delimiter: "\n".into(),
501 confidence,
502 }
503 }
504}
505
506impl ResponseParser for MultiLineParser {
507 fn parse(&self, response: &LlmResponse, target_key: ContextKey) -> Vec<ProposedFact> {
508 response
509 .content
510 .split(&self.delimiter)
511 .map(str::trim)
512 .filter(|s| !s.is_empty())
513 .enumerate()
514 .map(|(i, content)| ProposedFact {
515 key: target_key,
516 id: format!("{}-{}", self.id_prefix, i),
517 content: content.to_string(),
518 confidence: self.confidence,
519 provenance: response.model.clone(),
520 })
521 .collect()
522 }
523}
524
525pub struct LlmAgent {
539 name: String,
540 provider: Arc<dyn LlmProvider>,
541 config: LlmAgentConfig,
542 parser: Arc<dyn ResponseParser>,
543 full_dependencies: Vec<ContextKey>,
546}
547
548impl LlmAgent {
549 pub fn new(
554 name: impl Into<String>,
555 provider: Arc<dyn LlmProvider>,
556 config: LlmAgentConfig,
557 ) -> Self {
558 let name_str = name.into();
559
560 let mut full_dependencies = config.dependencies.clone();
562 if !full_dependencies.contains(&config.target_key) {
563 full_dependencies.push(config.target_key);
564 }
565
566 let parser = Arc::new(SimpleParser {
568 id_prefix: name_str.clone(),
569 confidence: 0.7,
570 });
571
572 Self {
573 name: name_str,
574 provider,
575 config,
576 parser,
577 full_dependencies,
578 }
579 }
580
581 #[must_use]
583 pub fn with_parser(mut self, parser: Arc<dyn ResponseParser>) -> Self {
584 self.parser = parser;
585 self
586 }
587
588 fn build_prompt(&self, ctx: &Context) -> String {
590 use std::fmt::Write;
591
592 if matches!(self.config.prompt_format, crate::prompt::PromptFormat::Edn) {
594 let prompt_ctx =
595 crate::prompt::PromptContext::from_context(ctx, &self.config.dependencies);
596 let output_contract =
597 crate::prompt::OutputContract::new("proposed-fact", self.config.target_key);
598
599 let objective = if self.config.prompt_template == "{context}" {
601 format!("analyze-{:?}", self.config.target_key).to_lowercase()
602 } else {
603 self.config
604 .prompt_template
605 .replace("{context}", "")
606 .trim()
607 .to_string()
608 };
609
610 let agent_prompt = crate::prompt::AgentPrompt::new(
611 crate::prompt::AgentRole::Proposer,
612 objective,
613 prompt_ctx,
614 output_contract,
615 )
616 .with_constraint(crate::prompt::Constraint::NoHallucinate)
617 .with_constraint(crate::prompt::Constraint::NoInvent);
618
619 return agent_prompt.serialize(self.config.prompt_format);
620 }
621
622 let mut context_str = String::new();
624
625 for &key in &self.config.dependencies {
626 let facts = ctx.get(key);
627 if !facts.is_empty() {
628 let _ = writeln!(context_str, "\n## {key:?}");
629 for fact in facts {
630 let _ = writeln!(context_str, "- {}: {}", fact.id, fact.content);
631 }
632 }
633 }
634
635 self.config
636 .prompt_template
637 .replace("{context}", &context_str)
638 }
639}
640
641impl Agent for LlmAgent {
642 fn name(&self) -> &str {
643 &self.name
644 }
645
646 fn dependencies(&self) -> &[ContextKey] {
647 &self.full_dependencies
649 }
650
651 fn accepts(&self, ctx: &Context) -> bool {
652 let has_input = self.config.dependencies.iter().any(|k| ctx.has(*k));
654 if !has_input {
655 return false;
656 }
657
658 let my_prefix = format!("{}-", self.name);
661 let already_contributed = ctx
662 .get(self.config.target_key)
663 .iter()
664 .any(|f| f.id.starts_with(&my_prefix));
665
666 !already_contributed
667 }
668
669 fn execute(&self, ctx: &Context) -> AgentEffect {
670 let prompt = self.build_prompt(ctx);
673
674 let request = LlmRequest::new(prompt)
675 .with_system(self.config.system_prompt.clone())
676 .with_max_tokens(self.config.max_tokens)
677 .with_temperature(self.config.temperature);
678
679 match self.provider.complete(&request) {
680 Ok(response) => {
681 let proposals = self.parser.parse(&response, self.config.target_key);
682
683 let facts: Vec<_> = proposals.iter().map(encode_proposal).collect();
685
686 AgentEffect::with_facts(facts)
687 }
688 Err(e) => {
689 tracing::error!(agent = %self.name, error = %e, "LLM call failed");
691 AgentEffect::empty()
692 }
693 }
694 }
695}
696
697fn uuid_v4_simple() -> String {
699 use std::time::{SystemTime, UNIX_EPOCH};
700 let nanos = SystemTime::now()
701 .duration_since(UNIX_EPOCH)
702 .map(|d| d.as_nanos())
703 .unwrap_or(0);
704 format!("{:x}", nanos % 0xFFFF_FFFF)
705}
706
707#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
722pub enum LlmRole {
723 WebResearch,
726
727 FastAnalysis,
730
731 DeepAnalysis,
734
735 Verification,
738
739 Creative,
742
743 Synthesis,
746
747 Code,
750
751 Summarization,
754
755 Custom(&'static str),
757}
758
759impl fmt::Display for LlmRole {
760 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
761 match self {
762 Self::WebResearch => write!(f, "web-research"),
763 Self::FastAnalysis => write!(f, "fast-analysis"),
764 Self::DeepAnalysis => write!(f, "deep-analysis"),
765 Self::Verification => write!(f, "verification"),
766 Self::Creative => write!(f, "creative"),
767 Self::Synthesis => write!(f, "synthesis"),
768 Self::Code => write!(f, "code"),
769 Self::Summarization => write!(f, "summarization"),
770 Self::Custom(name) => write!(f, "custom:{name}"),
771 }
772 }
773}
774
775pub struct LlmRouter {
798 providers: std::collections::HashMap<LlmRole, Arc<dyn LlmProvider>>,
800 default: Option<Arc<dyn LlmProvider>>,
802}
803
804impl Default for LlmRouter {
805 fn default() -> Self {
806 Self::new()
807 }
808}
809
810impl LlmRouter {
811 #[must_use]
813 pub fn new() -> Self {
814 Self {
815 providers: std::collections::HashMap::new(),
816 default: None,
817 }
818 }
819
820 #[must_use]
822 pub fn with_provider(mut self, role: LlmRole, provider: Arc<dyn LlmProvider>) -> Self {
823 self.providers.insert(role, provider);
824 self
825 }
826
827 #[must_use]
829 pub fn with_default(mut self, provider: Arc<dyn LlmProvider>) -> Self {
830 self.default = Some(provider);
831 self
832 }
833
834 #[must_use]
836 pub fn get(&self, role: LlmRole) -> Option<Arc<dyn LlmProvider>> {
837 self.providers
838 .get(&role)
839 .cloned()
840 .or_else(|| self.default.clone())
841 }
842
843 #[must_use]
845 pub fn has_provider(&self, role: LlmRole) -> bool {
846 self.providers.contains_key(&role) || self.default.is_some()
847 }
848
849 #[must_use]
851 pub fn roles(&self) -> Vec<LlmRole> {
852 self.providers.keys().copied().collect()
853 }
854
855 pub fn complete(&self, role: LlmRole, request: &LlmRequest) -> Result<LlmResponse, LlmError> {
862 let provider = self.get(role).ok_or_else(|| {
863 LlmError::provider(format!("No provider configured for role: {role}"))
864 })?;
865 provider.complete(request)
866 }
867}
868
869#[derive(Debug, Clone, Default)]
889pub struct ModelConfig {
890 models: std::collections::HashMap<LlmRole, (String, String)>,
892}
893
894impl ModelConfig {
895 #[must_use]
897 pub fn new() -> Self {
898 Self::default()
899 }
900
901 #[must_use]
903 pub fn model(
904 mut self,
905 role: LlmRole,
906 provider: impl Into<String>,
907 model_id: impl Into<String>,
908 ) -> Self {
909 self.models.insert(role, (provider.into(), model_id.into()));
910 self
911 }
912
913 #[must_use]
915 pub fn get(&self, role: LlmRole) -> Option<(&str, &str)> {
916 self.models
917 .get(&role)
918 .map(|(p, m)| (p.as_str(), m.as_str()))
919 }
920
921 #[must_use]
923 pub fn has(&self, role: LlmRole) -> bool {
924 self.models.contains_key(&role)
925 }
926
927 #[must_use]
931 pub fn high_quality() -> Self {
932 Self::new()
933 .model(LlmRole::WebResearch, "perplexity", "sonar-pro")
934 .model(LlmRole::FastAnalysis, "google", "gemini-2.0-flash")
935 .model(LlmRole::DeepAnalysis, "anthropic", "claude-opus-4")
936 .model(LlmRole::Verification, "openai", "gpt-4.5")
937 .model(LlmRole::Creative, "anthropic", "claude-opus-4")
938 .model(LlmRole::Synthesis, "anthropic", "claude-opus-4")
939 .model(LlmRole::Code, "anthropic", "claude-sonnet-4")
940 .model(LlmRole::Summarization, "google", "gemini-2.0-flash")
941 }
942
943 #[must_use]
945 pub fn fast() -> Self {
946 Self::new()
947 .model(LlmRole::WebResearch, "perplexity", "sonar")
948 .model(LlmRole::FastAnalysis, "google", "gemini-2.0-flash")
949 .model(LlmRole::DeepAnalysis, "google", "gemini-2.0-flash")
950 .model(LlmRole::Verification, "anthropic", "claude-haiku-3.5")
951 .model(LlmRole::Creative, "anthropic", "claude-sonnet-4")
952 .model(LlmRole::Synthesis, "anthropic", "claude-sonnet-4")
953 .model(LlmRole::Code, "anthropic", "claude-sonnet-4")
954 .model(LlmRole::Summarization, "google", "gemini-2.0-flash")
955 }
956
957 #[must_use]
959 pub fn anthropic_only() -> Self {
960 Self::new()
961 .model(LlmRole::WebResearch, "anthropic", "claude-sonnet-4")
962 .model(LlmRole::FastAnalysis, "anthropic", "claude-haiku-3.5")
963 .model(LlmRole::DeepAnalysis, "anthropic", "claude-opus-4")
964 .model(LlmRole::Verification, "anthropic", "claude-sonnet-4")
965 .model(LlmRole::Creative, "anthropic", "claude-opus-4")
966 .model(LlmRole::Synthesis, "anthropic", "claude-opus-4")
967 .model(LlmRole::Code, "anthropic", "claude-sonnet-4")
968 .model(LlmRole::Summarization, "anthropic", "claude-haiku-3.5")
969 }
970}
971
972#[cfg(test)]
977mod tests {
978 use super::*;
979
980 #[test]
981 fn mock_provider_returns_responses_in_order() {
982 let provider = MockProvider::new(vec![
983 MockResponse::success("First response", 0.8),
984 MockResponse::success("Second response", 0.9),
985 ]);
986
987 let request = LlmRequest::new("test");
988
989 let r1 = provider.complete(&request).unwrap();
990 assert_eq!(r1.content, "First response");
991
992 let r2 = provider.complete(&request).unwrap();
993 assert_eq!(r2.content, "Second response");
994
995 assert_eq!(provider.call_count(), 2);
996 }
997
998 #[test]
999 fn mock_provider_can_return_errors() {
1000 let provider = MockProvider::new(vec![MockResponse::failure(LlmError::rate_limit(
1001 "Too many requests",
1002 ))]);
1003
1004 let request = LlmRequest::new("test");
1005 let result = provider.complete(&request);
1006
1007 assert!(result.is_err());
1008 let err = result.unwrap_err();
1009 assert_eq!(err.kind, LlmErrorKind::RateLimit);
1010 assert!(err.retryable);
1011 }
1012
1013 #[test]
1014 fn constant_mock_returns_same_response() {
1015 let provider = MockProvider::constant("Always this", 0.75);
1016
1017 let request = LlmRequest::new("test");
1018
1019 for _ in 0..5 {
1020 let response = provider.complete(&request).unwrap();
1021 assert_eq!(response.content, "Always this");
1022 }
1023 }
1024
1025 #[test]
1026 fn request_builder_works() {
1027 let request = LlmRequest::new("Hello")
1028 .with_system("You are helpful")
1029 .with_max_tokens(500)
1030 .with_temperature(0.5)
1031 .with_stop_sequence("\n");
1032
1033 assert_eq!(request.prompt, "Hello");
1034 assert_eq!(request.system, Some("You are helpful".into()));
1035 assert_eq!(request.max_tokens, 500);
1036 assert!((request.temperature - 0.5).abs() < 0.001);
1037 assert_eq!(request.stop_sequences, vec!["\n"]);
1038 }
1039
1040 #[test]
1041 fn provenance_string_includes_model() {
1042 let provider = MockProvider::constant("test", 0.8);
1043 let provenance = provider.provenance("req-123");
1044 assert_eq!(provenance, "mock-model:req-123");
1045 }
1046
1047 #[test]
1048 fn llm_agent_emits_proposals() {
1049 use crate::context::Fact;
1050 use crate::engine::Engine;
1051 use crate::validation::ValidationAgent;
1052
1053 let provider = Arc::new(MockProvider::constant(
1054 "The Nordic market shows strong growth potential",
1055 0.8,
1056 ));
1057
1058 let config = LlmAgentConfig {
1059 system_prompt: "You are a market analyst".into(),
1060 prompt_template: "Analyze this market: {context}".into(),
1061 target_key: ContextKey::Hypotheses,
1062 dependencies: vec![ContextKey::Seeds],
1063 default_confidence: 0.75,
1064 ..Default::default()
1065 };
1066
1067 let mut engine = Engine::new();
1068
1069 let mut ctx = Context::new();
1071 let _ = ctx.add_fact(Fact {
1072 key: ContextKey::Seeds,
1073 id: "market".into(),
1074 content: "Nordic B2B SaaS".into(),
1075 });
1076
1077 engine.register(LlmAgent::new("MarketAnalyst", provider.clone(), config));
1079 engine.register(ValidationAgent::with_defaults());
1080
1081 let result = engine.run(ctx).expect("should converge");
1082
1083 assert!(provider.call_count() > 0);
1085
1086 let hypotheses = result.context.get(ContextKey::Hypotheses);
1088 assert_eq!(hypotheses.len(), 1);
1089 assert!(hypotheses[0].content.contains("Nordic"));
1090 }
1091
1092 #[test]
1093 fn llm_agent_handles_provider_errors() {
1094 use crate::context::Fact;
1095 use crate::engine::Engine;
1096
1097 let provider = Arc::new(MockProvider::new(vec![MockResponse::failure(
1098 LlmError::rate_limit("Too many requests"),
1099 )]));
1100
1101 let config = LlmAgentConfig {
1102 dependencies: vec![ContextKey::Seeds],
1103 ..Default::default()
1104 };
1105
1106 let mut engine = Engine::new();
1107
1108 let mut ctx = Context::new();
1109 let _ = ctx.add_fact(Fact {
1110 key: ContextKey::Seeds,
1111 id: "test".into(),
1112 content: "test".into(),
1113 });
1114
1115 engine.register(LlmAgent::new("FailingAgent", provider, config));
1116
1117 let result = engine.run(ctx).expect("should converge despite error");
1119 assert!(result.converged);
1120
1121 assert!(result.context.get(ContextKey::Proposals).is_empty());
1123 }
1124
1125 #[test]
1126 fn multi_line_parser_splits_response() {
1127 let parser = MultiLineParser::newline("item", 0.7);
1128
1129 let response = LlmResponse {
1130 content: "First insight\nSecond insight\n\nThird insight".into(),
1131 model: "test-model".into(),
1132 usage: TokenUsage::default(),
1133 finish_reason: FinishReason::Stop,
1134 };
1135
1136 let proposals = parser.parse(&response, ContextKey::Signals);
1137
1138 assert_eq!(proposals.len(), 3);
1139 assert_eq!(proposals[0].id, "item-0");
1140 assert_eq!(proposals[0].content, "First insight");
1141 assert_eq!(proposals[1].content, "Second insight");
1142 assert_eq!(proposals[2].content, "Third insight");
1143 }
1144
1145 #[test]
1146 fn llm_agent_builds_prompt_from_context() {
1147 use crate::context::Fact;
1148 use crate::prompt::PromptFormat;
1149
1150 let provider = Arc::new(MockProvider::constant("response", 0.8));
1151
1152 let config = LlmAgentConfig {
1153 prompt_template: "Context:\n{context}\n\nAnalyze this.".into(),
1154 dependencies: vec![ContextKey::Seeds, ContextKey::Signals],
1155 prompt_format: PromptFormat::Plain, ..Default::default()
1157 };
1158
1159 let agent = LlmAgent::new("TestAgent", provider, config);
1160
1161 let mut ctx = Context::new();
1162 let _ = ctx.add_fact(Fact {
1163 key: ContextKey::Seeds,
1164 id: "seed-1".into(),
1165 content: "Market info".into(),
1166 });
1167 let _ = ctx.add_fact(Fact {
1168 key: ContextKey::Signals,
1169 id: "signal-1".into(),
1170 content: "Growth trend".into(),
1171 });
1172
1173 let prompt = agent.build_prompt(&ctx);
1174
1175 assert!(prompt.contains("Market info"));
1176 assert!(prompt.contains("Growth trend"));
1177 assert!(prompt.contains("Analyze this"));
1178 }
1179
1180 #[test]
1185 fn router_routes_by_role() {
1186 let gemini = Arc::new(MockProvider::new(vec![MockResponse::success(
1187 "Gemini response",
1188 0.85,
1189 )]));
1190 let claude = Arc::new(MockProvider::new(vec![MockResponse::success(
1191 "Claude response",
1192 0.90,
1193 )]));
1194
1195 let router = LlmRouter::new()
1196 .with_provider(LlmRole::FastAnalysis, gemini)
1197 .with_provider(LlmRole::Synthesis, claude);
1198
1199 let request = LlmRequest::new("test");
1200
1201 let fast_response = router.complete(LlmRole::FastAnalysis, &request).unwrap();
1202 assert_eq!(fast_response.content, "Gemini response");
1203
1204 let synth_response = router.complete(LlmRole::Synthesis, &request).unwrap();
1205 assert_eq!(synth_response.content, "Claude response");
1206 }
1207
1208 #[test]
1209 fn router_falls_back_to_default() {
1210 let default = Arc::new(MockProvider::constant("Default response", 0.75));
1211
1212 let router = LlmRouter::new().with_default(default);
1213
1214 let request = LlmRequest::new("test");
1215
1216 let response = router.complete(LlmRole::WebResearch, &request).unwrap();
1218 assert_eq!(response.content, "Default response");
1219
1220 let response = router.complete(LlmRole::Code, &request).unwrap();
1221 assert_eq!(response.content, "Default response");
1222 }
1223
1224 #[test]
1225 fn router_returns_error_when_no_provider() {
1226 let router = LlmRouter::new(); let request = LlmRequest::new("test");
1229 let result = router.complete(LlmRole::WebResearch, &request);
1230
1231 assert!(result.is_err());
1232 let err = result.unwrap_err();
1233 assert!(err.message.contains("No provider configured"));
1234 }
1235
1236 #[test]
1237 fn model_config_stores_choices() {
1238 let config = ModelConfig::new()
1239 .model(LlmRole::WebResearch, "perplexity", "sonar-pro")
1240 .model(LlmRole::DeepAnalysis, "anthropic", "claude-opus-4");
1241
1242 assert_eq!(
1243 config.get(LlmRole::WebResearch),
1244 Some(("perplexity", "sonar-pro"))
1245 );
1246 assert_eq!(
1247 config.get(LlmRole::DeepAnalysis),
1248 Some(("anthropic", "claude-opus-4"))
1249 );
1250 assert_eq!(config.get(LlmRole::Code), None);
1251 }
1252
1253 #[test]
1254 fn model_config_presets_cover_all_standard_roles() {
1255 let high_quality = ModelConfig::high_quality();
1256 let fast = ModelConfig::fast();
1257 let anthropic = ModelConfig::anthropic_only();
1258
1259 for role in [
1261 LlmRole::WebResearch,
1262 LlmRole::FastAnalysis,
1263 LlmRole::DeepAnalysis,
1264 LlmRole::Verification,
1265 LlmRole::Creative,
1266 LlmRole::Synthesis,
1267 LlmRole::Code,
1268 LlmRole::Summarization,
1269 ] {
1270 assert!(high_quality.has(role), "high_quality missing {role}");
1271 assert!(fast.has(role), "fast missing {role}");
1272 assert!(anthropic.has(role), "anthropic missing {role}");
1273 }
1274 }
1275
1276 #[test]
1277 fn high_quality_uses_different_families_for_verification() {
1278 let config = ModelConfig::high_quality();
1279
1280 let (deep_provider, _) = config.get(LlmRole::DeepAnalysis).unwrap();
1281 let (verify_provider, _) = config.get(LlmRole::Verification).unwrap();
1282
1283 assert_ne!(
1285 deep_provider, verify_provider,
1286 "Verification should use different model family than deep analysis"
1287 );
1288 }
1289
1290 #[test]
1291 fn llm_role_display() {
1292 assert_eq!(format!("{}", LlmRole::WebResearch), "web-research");
1293 assert_eq!(format!("{}", LlmRole::DeepAnalysis), "deep-analysis");
1294 assert_eq!(format!("{}", LlmRole::Custom("my-role")), "custom:my-role");
1295 }
1296}