1use converge_traits::llm::LlmResponse;
19use converge_core::prompt::{
20 AgentPrompt, AgentRole, Constraint, OutputContract, PromptContext, PromptFormat,
21};
22use converge_core::{ContextKey, ProposedFact};
23
24pub struct ProviderPromptBuilder {
28 base: AgentPrompt,
29 output_format_hint: Option<String>,
31}
32
33impl ProviderPromptBuilder {
34 #[must_use]
36 pub fn new(base: AgentPrompt) -> Self {
37 Self {
38 base,
39 output_format_hint: None,
40 }
41 }
42
43 #[must_use]
48 pub fn with_output_format(mut self, format: impl Into<String>) -> Self {
49 self.output_format_hint = Some(format.into());
50 self
51 }
52
53 #[must_use]
62 pub fn build_for_claude(&self) -> String {
63 let edn_prompt = self.base.serialize(PromptFormat::Edn);
64
65 let mut prompt = String::from("<prompt>\n");
67 prompt.push_str(&edn_prompt);
68 prompt.push_str("\n</prompt>\n\n");
69
70 if let Some(ref format) = self.output_format_hint {
72 if format.as_str() == "xml" {
73 prompt.push_str("<instructions>\n");
74 prompt.push_str("Respond in XML format with the following structure:\n");
75 prompt.push_str("<response>\n");
76 prompt.push_str(" <proposals>\n");
77 prompt.push_str(
78 " <proposal id=\"...\" confidence=\"0.0-1.0\">content</proposal>\n",
79 );
80 prompt.push_str(" </proposals>\n");
81 prompt.push_str("</response>\n");
82 prompt.push_str("</instructions>");
83 } else {
84 prompt.push_str("<instructions>Respond in ");
86 prompt.push_str(format);
87 prompt.push_str(" format.</instructions>");
88 }
89 } else {
90 prompt.push_str("<instructions>\n");
92 prompt.push_str("Respond with proposed facts in a structured format.\n");
93 prompt.push_str("Each proposal should include: id, content, confidence (0.0-1.0).\n");
94 prompt.push_str("</instructions>");
95 }
96
97 prompt
98 }
99
100 #[must_use]
107 pub fn build_edn_only(&self) -> String {
108 self.base.serialize(PromptFormat::Edn)
109 }
110
111 #[must_use]
117 pub fn build_for_openai(&self) -> String {
118 let edn_prompt = self.base.serialize(PromptFormat::Edn);
119
120 let mut prompt = String::from("Prompt (EDN format):\n");
121 prompt.push_str(&edn_prompt);
122 prompt.push_str("\n\n");
123
124 prompt.push_str("Respond with a JSON object containing an array of proposals:\n");
126 prompt.push_str("{\n");
127 prompt.push_str(" \"proposals\": [\n");
128 prompt.push_str(" {\"id\": \"...\", \"content\": \"...\", \"confidence\": 0.0-1.0}\n");
129 prompt.push_str(" ]\n");
130 prompt.push_str("}\n");
131
132 prompt
133 }
134
135 #[must_use]
137 pub fn build_generic(&self) -> String {
138 self.base.serialize(PromptFormat::Edn)
139 }
140}
141
142pub struct StructuredResponseParser;
146
147impl StructuredResponseParser {
148 #[must_use]
159 pub fn parse_claude_xml(
160 response: &LlmResponse,
161 target_key: ContextKey,
162 model: &str,
163 ) -> Vec<ProposedFact> {
164 let content = &response.content;
165
166 let mut proposals = Vec::new();
168 let mut in_proposal = false;
169 let mut current_id = String::new();
170 let mut current_confidence = 0.7; let mut current_content = String::new();
172
173 let lines: Vec<&str> = content.lines().collect();
174 for line in lines {
175 let line = line.trim();
176
177 if line.starts_with("<proposal") {
179 in_proposal = true;
180 if let Some(id_start) = line.find("id=\"") {
182 let id_end = line[id_start + 4..].find('"').unwrap_or(0);
183 current_id = line[id_start + 4..id_start + 4 + id_end].to_string();
184 }
185 if let Some(conf_start) = line.find("confidence=\"") {
186 let conf_end = line[conf_start + 12..].find('"').unwrap_or(0);
187 if let Ok(conf) =
188 line[conf_start + 12..conf_start + 12 + conf_end].parse::<f64>()
189 {
190 current_confidence = conf;
191 }
192 }
193 if let Some(content_start) = line.find('>') {
195 if let Some(content_end) = line.find("</proposal>") {
196 current_content = line[content_start + 1..content_end].trim().to_string();
197 }
198 }
199 } else if in_proposal
200 && !line.starts_with("</proposal>")
201 && !line.starts_with("<proposal")
202 {
203 if !current_content.is_empty() {
205 current_content.push(' ');
206 }
207 current_content.push_str(line);
208 }
209
210 if line.contains("</proposal>") {
211 if !current_id.is_empty() && !current_content.is_empty() {
212 proposals.push(ProposedFact {
213 key: target_key,
214 id: current_id.clone(),
215 content: current_content.clone(),
216 confidence: current_confidence,
217 provenance: format!("{}:{}", model, response.model),
218 });
219 }
220 in_proposal = false;
221 current_id.clear();
222 current_content.clear();
223 current_confidence = 0.7;
224 }
225 }
226
227 proposals
228 }
229
230 pub fn parse_openai_json(
245 response: &LlmResponse,
246 target_key: ContextKey,
247 model: &str,
248 ) -> Result<Vec<ProposedFact>, String> {
249 use serde_json::Value;
250
251 let json: Value = serde_json::from_str(&response.content)
252 .map_err(|e| format!("Failed to parse JSON: {e}"))?;
253
254 let mut proposals = Vec::new();
255
256 if let Some(proposals_array) = json.get("proposals").and_then(|v| v.as_array()) {
257 for proposal in proposals_array {
258 let id = proposal
259 .get("id")
260 .and_then(|v| v.as_str())
261 .ok_or_else(|| "Missing or invalid 'id' field".to_string())?
262 .to_string();
263
264 let content = proposal
265 .get("content")
266 .and_then(|v| v.as_str())
267 .ok_or_else(|| "Missing or invalid 'content' field".to_string())?
268 .to_string();
269
270 let confidence = proposal
271 .get("confidence")
272 .and_then(serde_json::Value::as_f64)
273 .unwrap_or(0.7);
274
275 proposals.push(ProposedFact {
276 key: target_key,
277 id,
278 content,
279 confidence,
280 provenance: format!("{}:{}", model, response.model),
281 });
282 }
283 } else {
284 if let (Some(id), Some(content)) = (
286 json.get("id").and_then(|v| v.as_str()),
287 json.get("content").and_then(|v| v.as_str()),
288 ) {
289 let confidence = json
290 .get("confidence")
291 .and_then(serde_json::Value::as_f64)
292 .unwrap_or(0.7);
293 proposals.push(ProposedFact {
294 key: target_key,
295 id: id.to_string(),
296 content: content.to_string(),
297 confidence,
298 provenance: format!("{}:{}", model, response.model),
299 });
300 } else {
301 return Err("No proposals found in JSON response".to_string());
302 }
303 }
304
305 Ok(proposals)
306 }
307
308 #[must_use]
310 pub fn parse_generic(
311 response: &LlmResponse,
312 target_key: ContextKey,
313 model: &str,
314 ) -> Vec<ProposedFact> {
315 use std::time::{SystemTime, UNIX_EPOCH};
316
317 let id = SystemTime::now().duration_since(UNIX_EPOCH).map_or_else(
319 |_| "proposal-0".to_string(),
320 |d| format!("proposal-{:x}", d.as_nanos() % 0xFFFF_FFFF),
321 );
322
323 vec![ProposedFact {
325 key: target_key,
326 id,
327 content: response.content.clone(),
328 confidence: 0.7,
329 provenance: format!("{}:{}", model, response.model),
330 }]
331 }
332}
333
334pub fn build_claude_prompt(
336 role: AgentRole,
337 objective: impl Into<String>,
338 context: PromptContext,
339 output_contract: OutputContract,
340 constraints: impl IntoIterator<Item = Constraint>,
341) -> String {
342 let base =
343 AgentPrompt::new(role, objective, context, output_contract).with_constraints(constraints);
344
345 ProviderPromptBuilder::new(base)
346 .with_output_format("xml")
347 .build_for_claude()
348}
349
350pub fn build_openai_prompt(
352 role: AgentRole,
353 objective: impl Into<String>,
354 context: PromptContext,
355 output_contract: OutputContract,
356 constraints: impl IntoIterator<Item = Constraint>,
357) -> String {
358 let base =
359 AgentPrompt::new(role, objective, context, output_contract).with_constraints(constraints);
360
361 ProviderPromptBuilder::new(base).build_for_openai()
362}
363
364#[cfg(test)]
365#[allow(clippy::float_cmp)] mod tests {
367 use super::*;
368 use converge_core::Fact;
369
370 #[test]
371 fn test_claude_prompt_building() {
372 let mut ctx = PromptContext::new();
373 ctx.add_facts(
374 ContextKey::Signals,
375 vec![Fact {
376 key: ContextKey::Signals,
377 id: "s1".to_string(),
378 content: "Test signal".to_string(),
379 }],
380 );
381
382 let prompt = build_claude_prompt(
383 AgentRole::Proposer,
384 "test-objective",
385 ctx,
386 OutputContract::new("proposed-fact", ContextKey::Competitors),
387 vec![Constraint::NoInvent, Constraint::NoHallucinate],
388 );
389
390 assert!(prompt.contains("<prompt>"));
391 assert!(prompt.contains(":r :proposer"));
392 assert!(prompt.contains("<instructions>"));
393 assert!(prompt.contains("XML format"));
394 }
395
396 #[test]
397 fn test_openai_prompt_building() {
398 let ctx = PromptContext::new();
399
400 let prompt = build_openai_prompt(
401 AgentRole::Proposer,
402 "test-objective",
403 ctx,
404 OutputContract::new("proposed-fact", ContextKey::Strategies),
405 vec![Constraint::NoInvent],
406 );
407
408 assert!(prompt.contains("EDN format"));
409 assert!(prompt.contains("JSON"));
410 assert!(prompt.contains("proposals"));
411 }
412
413 #[test]
414 fn test_claude_xml_parsing() {
415 let xml_response = r#"
416<response>
417 <proposals>
418 <proposal id="p1" confidence="0.85">Test content 1</proposal>
419 <proposal id="p2" confidence="0.90">Test content 2</proposal>
420 </proposals>
421</response>
422"#;
423
424 let response = LlmResponse {
425 content: xml_response.to_string(),
426 model: "claude-sonnet-4-6".to_string(),
427 usage: converge_traits::llm::TokenUsage {
428 prompt_tokens: 100,
429 completion_tokens: 50,
430 total_tokens: 150,
431 },
432 finish_reason: converge_traits::llm::FinishReason::Stop,
433 };
434
435 let proposals = StructuredResponseParser::parse_claude_xml(
436 &response,
437 ContextKey::Competitors,
438 "anthropic",
439 );
440
441 assert_eq!(proposals.len(), 2);
442 assert_eq!(proposals[0].id, "p1");
443 assert_eq!(proposals[0].confidence, 0.85);
444 assert_eq!(proposals[1].id, "p2");
445 assert_eq!(proposals[1].confidence, 0.90);
446 }
447
448 #[test]
449 fn test_openai_json_parsing() {
450 let json_response = r#"
451{
452 "proposals": [
453 {"id": "p1", "content": "Test content 1", "confidence": 0.85},
454 {"id": "p2", "content": "Test content 2", "confidence": 0.90}
455 ]
456}
457"#;
458
459 let response = LlmResponse {
460 content: json_response.to_string(),
461 model: "gpt-4".to_string(),
462 usage: converge_traits::llm::TokenUsage {
463 prompt_tokens: 100,
464 completion_tokens: 50,
465 total_tokens: 150,
466 },
467 finish_reason: converge_traits::llm::FinishReason::Stop,
468 };
469
470 let proposals = StructuredResponseParser::parse_openai_json(
471 &response,
472 ContextKey::Strategies,
473 "openai",
474 )
475 .unwrap();
476
477 assert_eq!(proposals.len(), 2);
478 assert_eq!(proposals[0].id, "p1");
479 assert_eq!(proposals[0].confidence, 0.85);
480 assert_eq!(proposals[1].id, "p2");
481 assert_eq!(proposals[1].confidence, 0.90);
482 }
483}