acton-ai 0.26.0

An agentic AI framework where each agent is an actor
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
//! Stream handling for LLM responses.
//!
//! This module provides the `StreamHandler` trait for custom actors that need
//! to handle streaming LLM responses with their own state management.
//!
//! # Example
//!
//! ```rust,ignore
//! use acton_ai::prelude::*;
//!
//! #[acton_actor]
//! struct TokenCollector {
//!     buffer: String,
//!     token_count: usize,
//! }
//!
//! impl StreamHandler for TokenCollector {
//!     fn on_token(&mut self, token: &str) {
//!         self.buffer.push_str(token);
//!         self.token_count += 1;
//!     }
//!
//!     fn on_end(&mut self, reason: StopReason) -> StreamAction {
//!         println!("Collected {} tokens: {}", self.token_count, self.buffer);
//!         StreamAction::Complete
//!     }
//! }
//! ```

use crate::messages::StopReason;
use crate::types::CorrelationId;

/// Action to take after processing a stream event.
///
/// Returned from `StreamHandler::on_end` to indicate what should happen
/// after the stream completes.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum StreamAction {
    /// Continue listening for more streams.
    ///
    /// Use this when the handler should stay active to handle
    /// subsequent LLM requests.
    #[default]
    Continue,

    /// Signal completion to any waiters.
    ///
    /// Use this when you want to notify code that's waiting for
    /// the stream to complete (e.g., via `collect().await`).
    Complete,

    /// Stop the actor after this stream.
    ///
    /// Use this when the handler should be cleaned up after
    /// processing this stream.
    Stop,
}

impl StreamAction {
    /// Returns true if the action is Continue.
    #[must_use]
    pub fn is_continue(&self) -> bool {
        matches!(self, Self::Continue)
    }

    /// Returns true if the action is Complete.
    #[must_use]
    pub fn is_complete(&self) -> bool {
        matches!(self, Self::Complete)
    }

    /// Returns true if the action is Stop.
    #[must_use]
    pub fn is_stop(&self) -> bool {
        matches!(self, Self::Stop)
    }
}

/// Trait for handling streaming LLM responses.
///
/// Implement this trait on your actor to receive streaming tokens
/// with full access to actor state. This is the Layer 3 API for
/// users who need custom stream processing with stateful actors.
///
/// # Example
///
/// ```rust,ignore
/// #[acton_actor]
/// struct MyCollector {
///     buffer: String,
///     word_count: usize,
/// }
///
/// impl StreamHandler for MyCollector {
///     fn on_start(&mut self, correlation_id: &CorrelationId) {
///         println!("Stream {} starting", correlation_id);
///         self.buffer.clear();
///         self.word_count = 0;
///     }
///
///     fn on_token(&mut self, token: &str) {
///         self.buffer.push_str(token);
///         // Count words as they come in
///         self.word_count += token.split_whitespace().count();
///     }
///
///     fn on_end(&mut self, reason: StopReason) -> StreamAction {
///         println!("Stream ended: {:?}, {} words", reason, self.word_count);
///         StreamAction::Complete
///     }
/// }
/// ```
///
/// # Subscription
///
/// When using `ActonAI::spawn_stream_handler()`, the handler is automatically
/// subscribed to `LLMStreamStart`, `LLMStreamToken`, and `LLMStreamEnd` messages.
/// No manual subscription is required.
pub trait StreamHandler: Send + 'static {
    /// Called when a new stream starts.
    ///
    /// Use this to initialize any per-stream state.
    ///
    /// # Arguments
    ///
    /// * `correlation_id` - The correlation ID for this stream, useful for
    ///   tracking which request this stream belongs to.
    fn on_start(&mut self, _correlation_id: &CorrelationId) {}

    /// Called for each token in the stream.
    ///
    /// This is the only required method. Tokens are delivered in order
    /// as they are received from the LLM.
    ///
    /// # Arguments
    ///
    /// * `token` - The token text. May be a partial word, punctuation, or whitespace.
    fn on_token(&mut self, token: &str);

    /// Called when the stream ends.
    ///
    /// Returns a `StreamAction` indicating what should happen next.
    ///
    /// # Arguments
    ///
    /// * `stop_reason` - Why the LLM stopped generating:
    ///   - `EndTurn`: Normal completion
    ///   - `MaxTokens`: Reached token limit
    ///   - `ToolUse`: Model wants to call tools
    ///   - `StopSequence`: Hit a stop sequence
    ///
    /// # Returns
    ///
    /// - `StreamAction::Continue`: Keep listening for more streams
    /// - `StreamAction::Complete`: Signal waiters that this stream is done
    /// - `StreamAction::Stop`: Stop the actor
    fn on_end(&mut self, _stop_reason: StopReason) -> StreamAction {
        StreamAction::Continue
    }
}

/// Record of a tool call that was executed during the conversation.
///
/// This captures all information about a tool invocation, including
/// the arguments passed and the result returned.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ExecutedToolCall {
    /// The unique ID of the tool call (assigned by the LLM)
    pub id: String,
    /// The name of the tool that was called
    pub name: String,
    /// The arguments passed to the tool (as JSON)
    pub arguments: serde_json::Value,
    /// The result of the tool execution (Ok = JSON result, Err = error message)
    pub result: Result<serde_json::Value, String>,
}

impl ExecutedToolCall {
    /// Creates a new executed tool call record with a successful result.
    #[must_use]
    pub fn success(
        id: impl Into<String>,
        name: impl Into<String>,
        arguments: serde_json::Value,
        result: serde_json::Value,
    ) -> Self {
        Self {
            id: id.into(),
            name: name.into(),
            arguments,
            result: Ok(result),
        }
    }

    /// Creates a new executed tool call record with an error result.
    #[must_use]
    pub fn error(
        id: impl Into<String>,
        name: impl Into<String>,
        arguments: serde_json::Value,
        error: impl Into<String>,
    ) -> Self {
        Self {
            id: id.into(),
            name: name.into(),
            arguments,
            result: Err(error.into()),
        }
    }

    /// Returns true if the tool execution succeeded.
    #[must_use]
    pub fn is_success(&self) -> bool {
        self.result.is_ok()
    }

    /// Returns true if the tool execution failed.
    #[must_use]
    pub fn is_error(&self) -> bool {
        self.result.is_err()
    }
}

/// Response collected from a completed stream.
///
/// Returned by `PromptBuilder::collect()` after the stream completes.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct CollectedResponse {
    /// The complete text generated by the LLM.
    pub text: String,

    /// Why the LLM stopped generating.
    pub stop_reason: StopReason,

    /// Number of tokens in the response.
    pub token_count: usize,

    /// Tool calls that were executed during the conversation (if any).
    ///
    /// This is populated when tools were used and contains all tool calls
    /// that were executed during the conversation loop.
    pub tool_calls: Vec<ExecutedToolCall>,
}

impl CollectedResponse {
    /// Creates a new collected response.
    #[must_use]
    pub fn new(text: String, stop_reason: StopReason, token_count: usize) -> Self {
        Self {
            text,
            stop_reason,
            token_count,
            tool_calls: Vec::new(),
        }
    }

    /// Creates a new collected response with tool calls.
    #[must_use]
    pub fn with_tool_calls(
        text: String,
        stop_reason: StopReason,
        token_count: usize,
        tool_calls: Vec<ExecutedToolCall>,
    ) -> Self {
        Self {
            text,
            stop_reason,
            token_count,
            tool_calls,
        }
    }

    /// Returns true if any tools were called during the conversation.
    #[must_use]
    pub fn has_tool_calls(&self) -> bool {
        !self.tool_calls.is_empty()
    }

    /// Returns true if the response completed normally.
    #[must_use]
    pub fn is_complete(&self) -> bool {
        matches!(self.stop_reason, StopReason::EndTurn)
    }

    /// Returns true if the response was truncated due to token limit.
    #[must_use]
    pub fn is_truncated(&self) -> bool {
        matches!(self.stop_reason, StopReason::MaxTokens)
    }

    /// Returns true if the model wants to call tools.
    #[must_use]
    pub fn needs_tool_call(&self) -> bool {
        matches!(self.stop_reason, StopReason::ToolUse)
    }
}

impl Default for CollectedResponse {
    fn default() -> Self {
        Self {
            text: String::new(),
            stop_reason: StopReason::EndTurn,
            token_count: 0,
            tool_calls: Vec::new(),
        }
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn stream_action_default_is_continue() {
        assert_eq!(StreamAction::default(), StreamAction::Continue);
    }

    #[test]
    fn stream_action_is_continue() {
        assert!(StreamAction::Continue.is_continue());
        assert!(!StreamAction::Complete.is_continue());
        assert!(!StreamAction::Stop.is_continue());
    }

    #[test]
    fn stream_action_is_complete() {
        assert!(!StreamAction::Continue.is_complete());
        assert!(StreamAction::Complete.is_complete());
        assert!(!StreamAction::Stop.is_complete());
    }

    #[test]
    fn stream_action_is_stop() {
        assert!(!StreamAction::Continue.is_stop());
        assert!(!StreamAction::Complete.is_stop());
        assert!(StreamAction::Stop.is_stop());
    }

    #[test]
    fn collected_response_new() {
        let response = CollectedResponse::new("Hello".to_string(), StopReason::EndTurn, 1);

        assert_eq!(response.text, "Hello");
        assert_eq!(response.stop_reason, StopReason::EndTurn);
        assert_eq!(response.token_count, 1);
    }

    #[test]
    fn collected_response_is_complete() {
        let complete = CollectedResponse::new("test".to_string(), StopReason::EndTurn, 1);
        assert!(complete.is_complete());

        let truncated = CollectedResponse::new("test".to_string(), StopReason::MaxTokens, 1);
        assert!(!truncated.is_complete());
    }

    #[test]
    fn collected_response_is_truncated() {
        let truncated = CollectedResponse::new("test".to_string(), StopReason::MaxTokens, 1);
        assert!(truncated.is_truncated());

        let complete = CollectedResponse::new("test".to_string(), StopReason::EndTurn, 1);
        assert!(!complete.is_truncated());
    }

    #[test]
    fn collected_response_needs_tool_call() {
        let tool_use = CollectedResponse::new("".to_string(), StopReason::ToolUse, 0);
        assert!(tool_use.needs_tool_call());

        let complete = CollectedResponse::new("test".to_string(), StopReason::EndTurn, 1);
        assert!(!complete.needs_tool_call());
    }

    #[test]
    fn collected_response_default() {
        let response = CollectedResponse::default();

        assert!(response.text.is_empty());
        assert_eq!(response.stop_reason, StopReason::EndTurn);
        assert_eq!(response.token_count, 0);
        assert!(response.tool_calls.is_empty());
    }

    #[test]
    fn collected_response_new_has_empty_tool_calls() {
        let response = CollectedResponse::new("test".to_string(), StopReason::EndTurn, 1);
        assert!(response.tool_calls.is_empty());
        assert!(!response.has_tool_calls());
    }

    #[test]
    fn collected_response_with_tool_calls() {
        let tool_call = ExecutedToolCall::success(
            "tc_1",
            "calculator",
            serde_json::json!({"expr": "2+2"}),
            serde_json::json!({"result": 4}),
        );

        let response = CollectedResponse::with_tool_calls(
            "The result is 4".to_string(),
            StopReason::EndTurn,
            5,
            vec![tool_call],
        );

        assert!(response.has_tool_calls());
        assert_eq!(response.tool_calls.len(), 1);
        assert_eq!(response.tool_calls[0].name, "calculator");
    }

    #[test]
    fn executed_tool_call_success() {
        let call = ExecutedToolCall::success(
            "tc_1",
            "my_tool",
            serde_json::json!({"arg": "value"}),
            serde_json::json!({"result": "ok"}),
        );

        assert!(call.is_success());
        assert!(!call.is_error());
        assert_eq!(call.id, "tc_1");
        assert_eq!(call.name, "my_tool");
    }

    #[test]
    fn executed_tool_call_error() {
        let call = ExecutedToolCall::error(
            "tc_2",
            "failing_tool",
            serde_json::json!({}),
            "Tool execution failed",
        );

        assert!(!call.is_success());
        assert!(call.is_error());
        assert_eq!(call.result.unwrap_err(), "Tool execution failed");
    }
}