turbomcp_client/llm/
mod.rs

1//! Enhanced LLM Integration System
2//!
3//! This module provides a comprehensive LLM integration system that builds on the existing
4//! SamplingHandler foundation with advanced features:
5//!
6//! - **Provider abstraction**: Generic LLMProvider trait for multi-provider support
7//! - **Token management**: Token counting and context window management
8//! - **Session management**: Conversation tracking with history and metadata
9//! - **Streaming support**: Infrastructure for streaming responses
10//! - **Smart routing**: Intelligent provider selection based on request type
11//! - **Registry system**: Centralized management of multiple LLM providers
12//!
13//! ## Architecture
14//!
15//! ```text
16//! LLMRegistry
17//!     ├── LLMProvider (OpenAI, Anthropic, Custom)
18//!     ├── SessionManager
19//!     │   ├── ConversationSession
20//!     │   └── ContextStrategy
21//!     ├── TokenCounter
22//!     └── RequestRouter
23//! ```
24//!
25//! ## Usage
26//!
27//! ```rust,no_run
28//! use turbomcp_client::llm::{LLMRegistry, OpenAIProvider, LLMProviderConfig};
29//! use std::sync::Arc;
30//!
31//! # async fn example() -> Result<(), Box<dyn std::error::Error>> {
32//! let mut registry = LLMRegistry::new();
33//!
34//! // Register providers
35//! let openai = Arc::new(OpenAIProvider::new(LLMProviderConfig {
36//!     api_key: std::env::var("OPENAI_API_KEY")?,
37//!     model: "gpt-4".to_string(),
38//!     ..Default::default()
39//! })?);
40//! registry.register_provider("openai", openai).await?;
41//!
42//! // Set as default provider
43//! registry.set_default_provider("openai")?;
44//!
45//! // List available providers
46//! let providers = registry.list_providers();
47//! println!("Available providers: {:?}", providers);
48//! # Ok(())
49//! # }
50//! ```
51
52pub mod core;
53pub mod providers;
54pub mod registry;
55pub mod routing;
56pub mod session;
57pub mod streaming;
58pub mod tokens;
59
60// Re-export public API
61pub use core::{
62    LLMCapabilities, LLMError, LLMProvider, LLMProviderConfig, LLMRequest, LLMResponse, LLMResult,
63    ModelInfo,
64};
65
66pub use providers::{AnthropicProvider, OllamaProvider, OpenAIProvider};
67
68pub use session::{
69    ContextStrategy, ConversationSession, SessionConfig, SessionManager, SessionMetadata,
70};
71
72pub use tokens::{ContextWindow, TokenCounter, TokenUsage};
73
74pub use registry::{LLMRegistry, ProviderInfo, RegistryConfig};
75
76pub use streaming::{StreamChunk, StreamingHandler, StreamingResponse};
77
78pub use routing::{RequestRouter, RouteRule, RoutingStrategy};