ai_lib_rust/lib.rs
1//! # ai-lib-rust
2//!
3//! 这是 AI-Protocol 规范的高性能 Rust 参考实现,提供统一的多厂商 AI 模型交互接口。
4//!
5//! Protocol Runtime for AI-Protocol - A high-performance Rust reference implementation
6//! that enables provider-agnostic AI model interactions.
7//!
8//! ## Overview
9//!
10//! This library implements the AI-Protocol specification as a runtime, where all logic
11//! is operators and all configuration is protocol. It provides a unified interface
12//! for interacting with AI models across different providers without hardcoding
13//! provider-specific logic.
14//!
15//! ## Core Philosophy
16//!
17//! - **Protocol-Driven**: All behavior is configured through protocol manifests, not code
18//! - **Provider-Agnostic**: Unified interface across OpenAI, Anthropic, Google, and others
19//! - **Streaming-First**: Native support for Server-Sent Events (SSE) streaming
20//! - **Type-Safe**: Strongly typed request/response handling with comprehensive error types
21//!
22//! ## Key Features
23//!
24//! - **Unified Client**: [`AiClient`] provides a single entry point for all AI interactions
25//! - **Protocol Loading**: Load and validate protocol manifests from local files or remote URLs
26//! - **Streaming Pipeline**: Configurable operator pipeline for response processing
27//! - **Batching**: Efficient request batching with [`batch::BatchCollector`] (requires `batch` feature)
28//! - **Caching**: Response caching with pluggable backends via [`cache`] module
29//! - **Resilience**: Circuit breaker and rate limiting via [`resilience`] module
30//! - **Content Safety**: Guardrails for content filtering via [`guardrails`] module
31//! - **Telemetry**: Optional feedback collection via [`telemetry`] module
32//!
33//! ## Quick Start
34//!
35//! ```rust,no_run
36//! use ai_lib_rust::{AiClient, AiClientBuilder, Message, MessageRole};
37//!
38//! #[tokio::main]
39//! async fn main() -> ai_lib_rust::Result<()> {
40//! let client = AiClientBuilder::new()
41//! .with_protocol_path("protocols/openai.yaml")?
42//! .with_api_key("your-api-key")
43//! .build()?;
44//!
45//! let messages = vec![
46//! Message::user("Hello, how are you?"),
47//! ];
48//!
49//! // Streaming response
50//! let mut stream = client.chat_stream(&messages, None).await?;
51//! // Process stream events...
52//!
53//! Ok(())
54//! }
55//! ```
56//!
57//! ## Module Organization
58//!
59//! | Module | Description |
60//! |--------|-------------|
61//! | [`protocol`] | Protocol specification loading and validation |
62//! | [`client`] | AI client implementation and builders |
63//! | [`pipeline`] | Streaming response pipeline operators |
64//! | [`types`] | Core type definitions (messages, events, tools) |
65//! | [`batch`] | Request batching and parallel execution (requires `batch` feature) |
66//! | [`cache`] | Response caching with multiple backends |
67//! | [`embeddings`] | Embedding generation and vector operations (requires `embeddings` feature) |
68//! | [`resilience`] | Circuit breaker and rate limiting |
69//! | [`guardrails`] | Content filtering and safety checks (requires `guardrails` feature) |
70//! | [`tokens`] | Token counting and cost estimation (requires `tokens` feature) |
71//! | [`telemetry`] | Optional feedback and telemetry collection (requires `telemetry` feature) |
72
73// Core modules (always available)
74pub mod cache;
75pub mod client;
76pub mod drivers;
77pub mod feedback;
78pub mod pipeline;
79pub mod plugins;
80pub mod protocol;
81pub mod registry;
82pub mod resilience;
83pub mod structured;
84pub mod transport;
85pub mod types;
86pub mod utils;
87
88// Capability-based modules (feature-gated)
89#[cfg(feature = "batch")]
90pub mod batch;
91#[cfg(feature = "computer_use")]
92pub mod computer_use;
93#[cfg(feature = "embeddings")]
94pub mod embeddings;
95#[cfg(feature = "guardrails")]
96pub mod guardrails;
97#[cfg(feature = "mcp")]
98pub mod mcp;
99#[cfg(feature = "multimodal")]
100pub mod multimodal;
101#[cfg(feature = "tokens")]
102pub mod tokens;
103#[cfg(feature = "telemetry")]
104pub mod telemetry;
105#[cfg(feature = "stt")]
106pub mod stt;
107#[cfg(feature = "tts")]
108pub mod tts;
109#[cfg(feature = "reranking")]
110pub mod rerank;
111
112// Infrastructure modules (feature-gated)
113#[cfg(feature = "routing_mvp")]
114pub mod routing;
115#[cfg(feature = "interceptors")]
116pub mod interceptors;
117
118// Re-export main types for convenience
119pub use client::CallStats;
120pub use client::CancelHandle;
121pub use client::ClientMetrics;
122pub use client::ChatBatchRequest;
123pub use client::EndpointExt;
124pub use client::{AiClient, AiClientBuilder};
125
126// Feedback types: always available from feedback module; full telemetry re-exports when feature is on
127pub use feedback::{FeedbackEvent, FeedbackSink};
128pub use types::{
129 events::StreamingEvent,
130 message::{Message, MessageRole},
131 tool::ToolCall,
132};
133
134// Optional re-exports
135#[cfg(feature = "routing_mvp")]
136pub use routing::{
137 CustomModelManager, LoadBalancingStrategy, ModelArray, ModelCapabilities, ModelEndpoint,
138 ModelInfo, ModelSelectionStrategy, PerformanceMetrics, PricingInfo, QualityTier, SpeedTier,
139};
140
141use futures::Stream;
142use std::pin::Pin;
143
144/// Result type alias for the library
145pub type Result<T> = std::result::Result<T, Error>;
146
147/// A specialized Result for pipeline operations
148pub type PipeResult<T> = std::result::Result<T, Error>;
149
150/// A unified pinned, boxed stream that emits `PipeResult<T>`
151pub type BoxStream<'a, T> = Pin<Box<dyn Stream<Item = PipeResult<T>> + Send + 'a>>;
152
153/// Error type for the library
154pub mod error;
155pub mod error_code;
156pub use error::{Error, ErrorContext};
157pub use error_code::StandardErrorCode;