distri_types/
execution.rs

1use schemars::JsonSchema;
2use serde::{Deserialize, Serialize};
3
4use browsr_types::FileType;
5
6use crate::{Part, PlanStep, TaskStatus, ToolResponse};
7
8/// Execution strategy types
9#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
10pub enum ExecutionType {
11    Sequential,
12    Interleaved,
13    Retriable,
14    React,
15    Code,
16}
17
18/// Execution result with detailed information
19#[derive(Debug, Clone, JsonSchema, Serialize, Deserialize)]
20#[serde(rename_all = "snake_case")]
21pub struct ExecutionResult {
22    pub step_id: String,
23    pub parts: Vec<Part>,
24    pub status: ExecutionStatus,
25    pub reason: Option<String>, // for rejection or failure
26    pub timestamp: i64,
27}
28
29impl ExecutionResult {
30    pub fn is_success(&self) -> bool {
31        self.status == ExecutionStatus::Success || self.status == ExecutionStatus::InputRequired
32    }
33    pub fn is_failed(&self) -> bool {
34        self.status == ExecutionStatus::Failed
35    }
36    pub fn is_rejected(&self) -> bool {
37        self.status == ExecutionStatus::Rejected
38    }
39    pub fn is_input_required(&self) -> bool {
40        self.status == ExecutionStatus::InputRequired
41    }
42
43    pub fn as_observation(&self) -> String {
44        let mut txt = String::new();
45        if let Some(reason) = &self.reason {
46            txt.push_str(&reason);
47        }
48        let parts_txt = self
49            .parts
50            .iter()
51            .filter_map(|p| match p {
52                Part::Text(text) => Some(text.clone()),
53                Part::ToolCall(tool_call) => Some(format!(
54                    "Action: {} with {}",
55                    tool_call.tool_name,
56                    serde_json::to_string(&tool_call.input).unwrap_or_default()
57                )),
58                Part::Data(data) => serde_json::to_string(&data).ok(),
59                Part::ToolResult(tool_result) => serde_json::to_string(&tool_result.result()).ok(),
60                Part::Image(image) => match image {
61                    FileType::Url { url, .. } => Some(format!("[Image: {}]", url)),
62                    FileType::Bytes {
63                        name, mime_type, ..
64                    } => Some(format!(
65                        "[Image: {} ({})]",
66                        name.as_deref().unwrap_or("unnamed"),
67                        mime_type
68                    )),
69                },
70                Part::Artifact(artifact) => Some(format!(
71                    "[Artifact ID:{}\n You can use artifact tools to read the full content\n{}]",
72                    artifact.file_id,
73                    if let Some(stats) = &artifact.stats {
74                        format!(" ({})", stats.context_info())
75                    } else {
76                        String::new()
77                    }
78                )),
79            })
80            .collect::<Vec<_>>()
81            .join("\n");
82        if !parts_txt.is_empty() {
83            txt.push_str("\n");
84            txt.push_str(&parts_txt);
85        }
86        txt
87    }
88}
89
90#[derive(Debug, Clone, JsonSchema, Serialize, Deserialize, PartialEq, Eq)]
91#[serde(rename_all = "snake_case")]
92pub enum ExecutionStatus {
93    Success,
94    Failed,
95    Rejected,
96    InputRequired,
97}
98
99impl Into<TaskStatus> for ExecutionStatus {
100    fn into(self) -> TaskStatus {
101        match self {
102            ExecutionStatus::Success => TaskStatus::Completed,
103            ExecutionStatus::Failed => TaskStatus::Failed,
104            ExecutionStatus::Rejected => TaskStatus::Canceled,
105            ExecutionStatus::InputRequired => TaskStatus::InputRequired,
106        }
107    }
108}
109
110pub enum ToolResultWithSkip {
111    ToolResult(ToolResponse),
112    // Skip tool call if it is external
113    Skip {
114        tool_call_id: String,
115        reason: String,
116    },
117}
118
119pub fn from_tool_results(tool_results: Vec<ToolResultWithSkip>) -> Vec<Part> {
120    tool_results
121        .iter()
122        .filter_map(|result| match result {
123            ToolResultWithSkip::ToolResult(tool_result) => {
124                // Simply extract parts from the tool response
125                Some(tool_result.parts.clone())
126            }
127            _ => None,
128        })
129        .flatten()
130        .collect()
131}
132
133#[derive(Debug, Clone, Serialize, Deserialize, Default)]
134pub struct ContextUsage {
135    pub tokens: u32,
136    pub input_tokens: u32,
137    pub output_tokens: u32,
138    pub current_iteration: usize,
139    pub context_size: ContextSize,
140}
141
142#[derive(Debug, Clone, Serialize, Deserialize, Default)]
143pub struct ContextSize {
144    pub message_count: usize,
145    pub message_chars: usize,
146    pub message_estimated_tokens: usize,
147    pub execution_history_count: usize,
148    pub execution_history_chars: usize,
149    pub execution_history_estimated_tokens: usize,
150    pub scratchpad_chars: usize,
151    pub scratchpad_estimated_tokens: usize,
152    pub total_chars: usize,
153    pub total_estimated_tokens: usize,
154    /// Per-agent context size breakdown
155    pub agent_breakdown: std::collections::HashMap<String, AgentContextSize>,
156}
157
158#[derive(Debug, Clone, Serialize, Deserialize, Default)]
159pub struct AgentContextSize {
160    pub agent_id: String,
161    pub task_count: usize,
162    pub execution_history_count: usize,
163    pub execution_history_chars: usize,
164    pub execution_history_estimated_tokens: usize,
165    pub scratchpad_chars: usize,
166    pub scratchpad_estimated_tokens: usize,
167}
168
169/// Enriched execution history entry that includes context metadata
170#[derive(Debug, Clone, Serialize, Deserialize)]
171pub struct ExecutionHistoryEntry {
172    pub thread_id: String, // Conversation context
173    pub task_id: String,   // Individual user task/request
174    pub run_id: String,    // Specific execution strand
175    pub execution_result: ExecutionResult,
176    pub stored_at: i64, // When this was stored
177}
178
179/// Entry for scratchpad formatting
180#[derive(Debug, Clone, Serialize, Deserialize)]
181pub struct ScratchpadEntry {
182    pub timestamp: i64,
183    #[serde(flatten)]
184    pub entry_type: ScratchpadEntryType,
185    pub task_id: String,
186    #[serde(default)]
187    pub parent_task_id: Option<String>,
188    pub entry_kind: Option<String>,
189}
190
191/// Type of scratchpad entry - only for Thought/Action/Observation tracking
192#[derive(Debug, Clone, Serialize, Deserialize)]
193#[serde(rename_all = "snake_case", tag = "type", content = "data")]
194pub enum ScratchpadEntryType {
195    #[serde(rename = "task")]
196    Task(Vec<Part>),
197    #[serde(rename = "plan")]
198    PlanStep(PlanStep),
199    #[serde(rename = "execution")]
200    Execution(ExecutionHistoryEntry),
201}
202
203#[cfg(test)]
204mod tests {
205    use super::*;
206    use serde_json::json;
207
208    #[test]
209    fn test_scratchpad_large_observation_issue() {
210        println!("=== TESTING LARGE DATA OBSERVATION IN SCRATCHPAD ===");
211
212        // Create a very large tool response observation (similar to search results)
213        let large_data = json!({
214            "results": (0..100).map(|i| json!({
215                "id": i,
216                "name": format!("Minister {}", i),
217                "email": format!("minister{}@gov.sg", i),
218                "portfolio": format!("Ministry of Complex Affairs {}", i),
219                "biography": format!("Very long biography text that goes on and on for minister {} with lots of details about their career, education, achievements, and political history. This is intentionally verbose to demonstrate the issue with large content in scratchpad observations.", i),
220            })).collect::<Vec<_>>()
221        });
222
223        println!(
224            "Large data size: {} bytes",
225            serde_json::to_string(&large_data).unwrap().len()
226        );
227
228        // Test 1: Direct Part::Data (BROKEN - causes scratchpad bloat)
229        let execution_result_data = ExecutionResult {
230            step_id: "test-step-1".to_string(),
231            parts: vec![Part::Data(large_data.clone())],
232            status: ExecutionStatus::Success,
233            reason: None,
234            timestamp: 1234567890,
235        };
236
237        let observation_data = execution_result_data.as_observation();
238        println!(
239            "🚨 BROKEN: Direct Part::Data observation size: {} chars",
240            observation_data.len()
241        );
242        println!(
243            "Preview (first 200 chars): {}",
244            &observation_data.chars().take(200).collect::<String>()
245        );
246
247        // Test 2: File metadata (GOOD - concise)
248        let file_metadata = crate::filesystem::FileMetadata {
249            file_id: "large-search-results.json".to_string(),
250            relative_path: "thread123/task456/large-search-results.json".to_string(),
251            size: serde_json::to_string(&large_data).unwrap().len() as u64,
252            content_type: Some("application/json".to_string()),
253            original_filename: Some("search_results.json".to_string()),
254            created_at: chrono::Utc::now(),
255            updated_at: chrono::Utc::now(),
256            checksum: Some("abc123".to_string()),
257            stats: None,
258            preview: Some("JSON search results with 100 minister entries".to_string()),
259        };
260
261        let execution_result_file = ExecutionResult {
262            step_id: "test-step-2".to_string(),
263            parts: vec![Part::Artifact(file_metadata)],
264            status: ExecutionStatus::Success,
265            reason: None,
266            timestamp: 1234567890,
267        };
268
269        let observation_file = execution_result_file.as_observation();
270        println!(
271            "āœ… GOOD: File metadata observation size: {} chars",
272            observation_file.len()
273        );
274        println!("Content: {}", observation_file);
275
276        // Demonstrate the problem
277        println!("\n=== SCRATCHPAD IMPACT ===");
278        println!(
279            "āŒ Direct approach adds {} chars to scratchpad (CAUSES LOOPS!)",
280            observation_data.len()
281        );
282        println!(
283            "āœ… File metadata adds only {} chars to scratchpad",
284            observation_file.len()
285        );
286        println!(
287            "šŸ’” Size reduction: {:.1}%",
288            (1.0 - (observation_file.len() as f64 / observation_data.len() as f64)) * 100.0
289        );
290
291        // This test shows the fix is working - observations are now truncated
292        assert!(observation_data.len() < 1000, "Large data is now truncated"); // Fixed expectation
293        assert!(
294            observation_file.len() < 300,
295            "File metadata stays reasonably concise"
296        ); // Updated for detailed format
297
298        println!("\n🚨 CONCLUSION: as_observation() needs to truncate large Part::Data!");
299    }
300
301    #[test]
302    fn test_observation_truncation_fix() {
303        println!("=== TESTING OBSERVATION TRUNCATION FIX ===");
304
305        // Test large data truncation
306        let large_data = json!({
307            "big_array": (0..200).map(|i| format!("item_{}", i)).collect::<Vec<_>>()
308        });
309
310        let execution_result = ExecutionResult {
311            step_id: "test-truncation".to_string(),
312            parts: vec![Part::Data(large_data)],
313            status: ExecutionStatus::Success,
314            reason: None,
315            timestamp: 1234567890,
316        };
317
318        let observation = execution_result.as_observation();
319        println!("Truncated observation size: {} chars", observation.len());
320        println!("Content: {}", observation);
321
322        // Should be truncated and include total char count
323        assert!(
324            observation.len() < 600,
325            "Observation should be truncated to <600 chars"
326        );
327        assert!(
328            observation.contains("truncated"),
329            "Should indicate truncation"
330        );
331        assert!(
332            observation.contains("total chars"),
333            "Should show total char count"
334        );
335
336        // Test long text truncation
337        let long_text = "This is a very long text. ".repeat(100);
338        let text_result = ExecutionResult {
339            step_id: "test-text-truncation".to_string(),
340            parts: vec![Part::Text(long_text.clone())],
341            status: ExecutionStatus::Success,
342            reason: None,
343            timestamp: 1234567890,
344        };
345
346        let text_observation = text_result.as_observation();
347        println!("Text observation size: {} chars", text_observation.len());
348        assert!(
349            text_observation.len() < 1100,
350            "Text should be truncated to ~1000 chars"
351        );
352        if long_text.len() > 1000 {
353            assert!(
354                text_observation.contains("truncated"),
355                "Long text should be truncated"
356            );
357        }
358
359        println!("āœ… Observation truncation is working!");
360    }
361}