Skip to main content

systemprompt_agent/services/a2a_server/processing/
ai_executor.rs

1use anyhow::Result;
2use futures::StreamExt;
3use std::sync::Arc;
4use tokio::sync::mpsc;
5
6use crate::models::a2a::Artifact;
7use crate::services::SkillService;
8use systemprompt_models::{
9    AiMessage, AiProvider, AiRequest, CallToolResult, MessageRole, RequestContext, StreamChunk,
10    ToolCall, ToolResultFormatter,
11};
12
13use super::message::StreamEvent;
14use crate::models::AgentRuntimeInfo;
15
16fn resolve_provider_config(
17    request_context: &RequestContext,
18    agent_runtime: &AgentRuntimeInfo,
19    ai_service: &dyn AiProvider,
20) -> (String, String, u32) {
21    if let Some(config) = request_context.tool_model_config() {
22        let provider = config
23            .provider
24            .as_deref()
25            .or(agent_runtime.provider.as_deref())
26            .unwrap_or_else(|| ai_service.default_provider())
27            .to_string();
28        let model = config
29            .model
30            .as_deref()
31            .or(agent_runtime.model.as_deref())
32            .unwrap_or_else(|| ai_service.default_model())
33            .to_string();
34        let max_tokens = config
35            .max_output_tokens
36            .or(agent_runtime.max_output_tokens)
37            .unwrap_or_else(|| ai_service.default_max_output_tokens());
38
39        tracing::debug!(
40            provider,
41            model,
42            max_output_tokens = max_tokens,
43            "Using tool_model_config"
44        );
45
46        return (provider, model, max_tokens);
47    }
48
49    let provider = agent_runtime
50        .provider
51        .as_deref()
52        .unwrap_or_else(|| ai_service.default_provider())
53        .to_string();
54    let model = agent_runtime
55        .model
56        .as_deref()
57        .unwrap_or_else(|| ai_service.default_model())
58        .to_string();
59    let max_tokens = agent_runtime
60        .max_output_tokens
61        .unwrap_or_else(|| ai_service.default_max_output_tokens());
62
63    (provider, model, max_tokens)
64}
65
66#[allow(missing_debug_implementations)]
67pub struct SynthesizeToolResultsParams<'a> {
68    pub ai_service: Arc<dyn AiProvider>,
69    pub agent_runtime: &'a AgentRuntimeInfo,
70    pub original_messages: Vec<AiMessage>,
71    pub initial_response: &'a str,
72    pub tool_calls: &'a [ToolCall],
73    pub tool_results: &'a [CallToolResult],
74    pub artifacts: &'a [Artifact],
75    pub tx: mpsc::UnboundedSender<StreamEvent>,
76    pub request_context: RequestContext,
77    pub skill_service: Arc<SkillService>,
78}
79
80pub async fn synthesize_tool_results_with_artifacts(
81    params: SynthesizeToolResultsParams<'_>,
82) -> Result<String, ()> {
83    let SynthesizeToolResultsParams {
84        ai_service,
85        agent_runtime,
86        original_messages,
87        initial_response,
88        tool_calls,
89        tool_results,
90        artifacts,
91        tx,
92        request_context,
93        skill_service: _skill_service,
94    } = params;
95    let tool_results_context = ToolResultFormatter::format_for_synthesis(tool_calls, tool_results);
96    let artifact_references = build_artifact_references(artifacts);
97
98    let synthesis_prompt = build_synthesis_prompt(
99        tool_calls.len(),
100        &tool_results_context,
101        &artifact_references,
102    );
103
104    let mut synthesis_messages = original_messages;
105    synthesis_messages.push(AiMessage {
106        role: MessageRole::Assistant,
107        content: initial_response.to_string(),
108        parts: Vec::new(),
109    });
110    synthesis_messages.push(AiMessage {
111        role: MessageRole::User,
112        content: synthesis_prompt,
113        parts: Vec::new(),
114    });
115
116    tracing::info!(
117        tool_result_count = tool_results.len(),
118        "Calling AI to synthesize tool results"
119    );
120
121    let (provider, model, max_output_tokens) =
122        resolve_provider_config(&request_context, agent_runtime, ai_service.as_ref());
123
124    let synthesis_request = AiRequest::builder(
125        synthesis_messages,
126        &provider,
127        &model,
128        max_output_tokens,
129        request_context,
130    )
131    .build();
132
133    match ai_service.generate(&synthesis_request).await {
134        Ok(response) => {
135            let synthesized_text = response.content;
136
137            tracing::info!(text_len = synthesized_text.len(), "Synthesis complete");
138
139            if tx
140                .send(StreamEvent::Text(synthesized_text.clone()))
141                .is_err()
142            {
143                tracing::debug!("Stream receiver dropped during synthesis");
144            }
145
146            Ok(synthesized_text)
147        },
148        Err(e) => {
149            tracing::error!(error = %e, "Synthesis failed");
150            Err(())
151        },
152    }
153}
154
155pub async fn process_without_tools(
156    ai_service: Arc<dyn AiProvider>,
157    agent_runtime: &AgentRuntimeInfo,
158    ai_messages: Vec<AiMessage>,
159    tx: mpsc::UnboundedSender<StreamEvent>,
160    request_context: RequestContext,
161) -> Result<(String, Vec<ToolCall>, Vec<CallToolResult>), ()> {
162    let (provider, model, max_output_tokens) =
163        resolve_provider_config(&request_context, agent_runtime, ai_service.as_ref());
164
165    let generate_request = AiRequest::builder(
166        ai_messages,
167        &provider,
168        &model,
169        max_output_tokens,
170        request_context,
171    )
172    .build();
173
174    match ai_service.generate_stream(&generate_request).await {
175        Ok(mut stream) => {
176            let mut accumulated_text = String::new();
177            while let Some(chunk) = stream.next().await {
178                match chunk {
179                    Ok(StreamChunk::Text(text)) => {
180                        accumulated_text.push_str(&text);
181                        if tx.send(StreamEvent::Text(text)).is_err() {
182                            tracing::debug!("Stream receiver dropped during generation");
183                        }
184                    },
185                    Ok(StreamChunk::Usage { .. }) => {},
186                    Err(e) => {
187                        if tx.send(StreamEvent::Error(e.to_string())).is_err() {
188                            tracing::debug!("Stream receiver dropped while sending error");
189                        }
190                        return Err(());
191                    },
192                }
193            }
194            Ok((accumulated_text, Vec::new(), Vec::new()))
195        },
196        Err(e) => {
197            if tx.send(StreamEvent::Error(e.to_string())).is_err() {
198                tracing::debug!("Stream receiver dropped while sending error");
199            }
200            Err(())
201        },
202    }
203}
204
205fn build_synthesis_prompt(
206    tool_count: usize,
207    tool_results_context: &str,
208    artifact_references: &str,
209) -> String {
210    format!(
211        r#"# Tool Execution Complete
212
213You executed {} tool(s). Now provide a BRIEF conversational response.
214
215## Tool Results Summary
216
217{}
218
219## Artifacts Created
220
221{}
222
223## CRITICAL RULES - READ CAREFULLY
224
2251. **NEVER repeat artifact content** - The user sees artifacts separately. Your message should REFERENCE them, never duplicate their content.
2262. **Maximum 100 words** - Be extremely concise. 2-3 sentences is ideal.
2273. **Describe what was done, not what it contains** - Say "I've created a blog post about X" NOT "Here is the blog post: [full content]"
2284. **Be conversational** - Natural, friendly summary. Not a report or transcript.
2295. **Reference artifacts naturally** - Use format like "(see the artifact for the full content)"
230
231## BAD EXAMPLE (DO NOT DO THIS)
232"I've created your blog post. Here's the content:
233
234[2000 words of article text]
235
236Let me know if you'd like any changes."
237
238## GOOD EXAMPLE
239"Done! I've created a blog post exploring the Human-AI collaboration workflow. The article covers the key differences between automation and augmentation approaches, with practical steps for maintaining your authentic voice. Take a look at the artifact and let me know if you'd like any adjustments."
240
241---
242
243Provide your brief, conversational response now. Remember: the artifact has the content - your message is just the friendly summary."#,
244        tool_count, tool_results_context, artifact_references
245    )
246}
247
248fn build_artifact_references(artifacts: &[Artifact]) -> String {
249    if artifacts.is_empty() {
250        return "No artifacts were created.".to_string();
251    }
252
253    artifacts
254        .iter()
255        .map(|artifact| {
256            let artifact_type = &artifact.metadata.artifact_type;
257            let artifact_name = artifact
258                .title
259                .clone()
260                .unwrap_or_else(|| artifact.id.to_string());
261
262            format!(
263                "- **{}** ({}): Reference as '(see {} for details)'",
264                artifact_name, artifact_type, artifact_name
265            )
266        })
267        .collect::<Vec<_>>()
268        .join("\n")
269}