ggen_ai/streaming.rs
1//! LLM Streaming Support via rust-genai
2//!
3//! This module provides type aliases and utilities for working with LLM streaming responses.
4//! The actual streaming implementation uses genai's native streaming capabilities.
5//!
6//! ## Usage
7//!
8//! For streaming responses, use `LlmClient::complete_stream()` which returns a stream of `LlmChunk`:
9//!
10//! ```rust,no_run
11//! use ggen_ai::{GenAiClient, LlmClient, LlmConfig};
12//! use futures::StreamExt;
13//!
14//! # async fn example() -> Result<(), Box<dyn std::error::Error>> {
15//! let client = GenAiClient::new(LlmConfig::default())?;
16//! let mut stream = client.complete_stream("Hello, world!").await?;
17//!
18//! while let Some(chunk) = stream.next().await {
19//! print!("{}", chunk.content);
20//! }
21//! # Ok(())
22//! # }
23//! ```
24//!
25//! The genai library natively supports streaming for all major providers:
26//! - OpenAI (including tool calls and reasoning chunks)
27//! - Anthropic Claude
28//! - Google Gemini
29//! - Ollama (local models)
30//! - Groq, xAI/Grok, DeepSeek, Cohere
31//!
32//! No custom streaming implementation needed - genai handles everything!
33
34use serde::{Deserialize, Serialize};
35
36// Re-export the streaming chunk type from client module
37pub use crate::client::LlmChunk as StreamChunk;
38
39/// Configuration for streaming behavior
40///
41/// Note: Most streaming configuration is handled by the provider's native API.
42/// This config is primarily for application-level concerns like buffering.
43#[derive(Debug, Clone)]
44pub struct StreamConfig {
45 /// Buffer size hint for stream processing
46 pub buffer_size: usize,
47 /// Timeout for receiving chunks (application-level)
48 pub chunk_timeout: std::time::Duration,
49}
50
51impl Default for StreamConfig {
52 fn default() -> Self {
53 Self {
54 buffer_size: 10,
55 chunk_timeout: std::time::Duration::from_secs(30),
56 }
57 }
58}
59
60/// Metadata for streaming chunks (deprecated - use LlmChunk directly)
61#[deprecated(
62 since = "0.2.0",
63 note = "Use LlmChunk from client module instead. This will be removed in 0.3.0"
64)]
65#[derive(Debug, Clone, Serialize, Deserialize)]
66pub struct StreamMetadata {
67 pub model: String,
68 pub tokens_used: Option<usize>,
69 pub finish_reason: Option<String>,
70}
71
72#[cfg(test)]
73mod tests {
74 use super::*;
75
76 #[test]
77 fn test_stream_config_default() {
78 let config = StreamConfig::default();
79 assert_eq!(config.buffer_size, 10);
80 assert_eq!(config.chunk_timeout.as_secs(), 30);
81 }
82}