Skip to main content

ai_lib_rust/
lib.rs

1//! # ai-lib-rust
2//!
3//! 这是 AI-Protocol 规范的高性能 Rust 参考实现,提供统一的多厂商 AI 模型交互接口。
4//!
5//! Protocol Runtime for AI-Protocol - A high-performance Rust reference implementation
6//! that enables provider-agnostic AI model interactions.
7//!
8//! ## Overview
9//!
10//! This library implements the AI-Protocol specification as a runtime, where all logic
11//! is operators and all configuration is protocol. It provides a unified interface
12//! for interacting with AI models across different providers without hardcoding
13//! provider-specific logic.
14//!
15//! ## Core Philosophy
16//!
17//! - **Protocol-Driven**: All behavior is configured through protocol manifests, not code
18//! - **Provider-Agnostic**: Unified interface across OpenAI, Anthropic, Google, and others
19//! - **Streaming-First**: Native support for Server-Sent Events (SSE) streaming
20//! - **Type-Safe**: Strongly typed request/response handling with comprehensive error types
21//!
22//! ## Key Features
23//!
24//! - **Unified Client**: [`AiClient`] provides a single entry point for all AI interactions
25//! - **Protocol Loading**: Load and validate protocol manifests from local files or remote URLs
26//! - **Streaming Pipeline**: Configurable operator pipeline for response processing
27//! - **Batching**: Efficient request batching with [`batch::BatchCollector`]
28//! - **Caching**: Response caching with pluggable backends via [`cache`] module
29//! - **Resilience**: Circuit breaker and rate limiting via [`resilience`] module
30//! - **Content Safety**: Guardrails for content filtering via [`guardrails`] module
31//! - **Telemetry**: Optional feedback collection via [`telemetry`] module
32//!
33//! ## Quick Start
34//!
35//! ```rust,no_run
36//! use ai_lib_rust::{AiClient, AiClientBuilder, Message, MessageRole};
37//!
38//! #[tokio::main]
39//! async fn main() -> ai_lib_rust::Result<()> {
40//!     let client = AiClientBuilder::new()
41//!         .with_protocol_path("protocols/openai.yaml")?
42//!         .with_api_key("your-api-key")
43//!         .build()?;
44//!
45//!     let messages = vec![
46//!         Message::user("Hello, how are you?"),
47//!     ];
48//!
49//!     // Streaming response
50//!     let mut stream = client.chat_stream(&messages, None).await?;
51//!     // Process stream events...
52//!     
53//!     Ok(())
54//! }
55//! ```
56//!
57//! ## Module Organization
58//!
59//! | Module | Description |
60//! |--------|-------------|
61//! | [`protocol`] | Protocol specification loading and validation |
62//! | [`client`] | AI client implementation and builders |
63//! | [`pipeline`] | Streaming response pipeline operators |
64//! | [`types`] | Core type definitions (messages, events, tools) |
65//! | [`batch`] | Request batching and parallel execution |
66//! | [`cache`] | Response caching with multiple backends |
67//! | [`embeddings`] | Embedding generation and vector operations |
68//! | [`resilience`] | Circuit breaker and rate limiting |
69//! | [`guardrails`] | Content filtering and safety checks |
70//! | [`tokens`] | Token counting and cost estimation |
71//! | [`telemetry`] | Optional feedback and telemetry collection |
72
73pub mod batch;
74pub mod cache;
75pub mod client;
76pub mod embeddings;
77pub mod guardrails;
78pub mod pipeline;
79pub mod plugins;
80pub mod protocol;
81pub mod resilience;
82pub mod telemetry;
83pub mod tokens;
84pub mod transport;
85pub mod types;
86pub mod utils;
87
88#[cfg(feature = "routing_mvp")]
89pub mod routing;
90
91#[cfg(feature = "interceptors")]
92pub mod interceptors;
93
94// Re-export main types for convenience
95pub use client::CallStats;
96pub use client::CancelHandle;
97pub use client::ChatBatchRequest;
98pub use client::EndpointExt;
99pub use client::{AiClient, AiClientBuilder};
100pub use telemetry::{FeedbackEvent, FeedbackSink};
101pub use types::{
102    events::StreamingEvent,
103    message::{Message, MessageRole},
104    tool::ToolCall,
105};
106
107// Optional re-exports
108#[cfg(feature = "routing_mvp")]
109pub use routing::{
110    CustomModelManager, LoadBalancingStrategy, ModelArray, ModelCapabilities, ModelEndpoint,
111    ModelInfo, ModelSelectionStrategy, PerformanceMetrics, PricingInfo, QualityTier, SpeedTier,
112};
113
114use futures::Stream;
115use std::pin::Pin;
116
117/// Result type alias for the library
118pub type Result<T> = std::result::Result<T, Error>;
119
120/// A specialized Result for pipeline operations
121pub type PipeResult<T> = std::result::Result<T, Error>;
122
123/// A unified pinned, boxed stream that emits `PipeResult<T>`
124pub type BoxStream<'a, T> = Pin<Box<dyn Stream<Item = PipeResult<T>> + Send + 'a>>;
125
126/// Error type for the library
127pub mod error;
128pub use error::{Error, ErrorContext};