saorsa_ai/lib.rs
1//! saorsa-ai: Unified multi-provider LLM API.
2//!
3//! Provides a common interface for streaming completions, tool calling,
4//! and authentication across multiple LLM providers.
5//!
6//! # Architecture Overview
7//!
8//! ```text
9//! ┌─────────────────────────────────────────────────────────────┐
10//! │ Application / Agent Layer │
11//! │ (Sends CompletionRequest, receives StreamEvent stream) │
12//! └─────────────────────────────────────────────────────────────┘
13//! │
14//! ▼
15//! ┌─────────────────────────────────────────────────────────────┐
16//! │ ProviderRegistry (Factory) │
17//! │ ProviderKind → ProviderConfig → Box<dyn Provider> │
18//! └─────────────────────────────────────────────────────────────┘
19//! │
20//! ┌─────────────┼─────────────┬─────────────┐
21//! ▼ ▼ ▼ ▼
22//! ┌──────────────┬──────────────┬──────────────┬──────────────┐
23//! │ Anthropic │ OpenAI │ Gemini │ Ollama │
24//! │ Provider │ Provider │ Provider │ Provider │
25//! └──────────────┴──────────────┴──────────────┴──────────────┘
26//! │
27//! ▼
28//! ┌─────────────────────────────────────────────────────────────┐
29//! │ Streaming HTTP (reqwest, Server-Sent Events) │
30//! │ POST /v1/messages → stream of JSON events → StreamEvent │
31//! └─────────────────────────────────────────────────────────────┘
32//! │
33//! ▼
34//! ┌─────────────────────────────────────────────────────────────┐
35//! │ Message Protocol (vendor-agnostic types) │
36//! │ Message, ContentBlock, ToolDefinition, ContentDelta │
37//! └─────────────────────────────────────────────────────────────┘
38//! ```
39//!
40//! ## Provider Abstraction
41//!
42//! All providers implement the `Provider` trait:
43//!
44//! - **`stream_completion`**: Returns `Pin<Box<dyn Stream<Item = Result<StreamEvent>>>>`
45//! - **Unified event types**: `StreamEvent::{ContentDelta, ToolUse, Done, Error}`
46//! - **Model metadata**: Context windows, tool support, vision capabilities
47//!
48//! ## Supported Providers
49//!
50//! - **Anthropic**: Claude models with streaming, tool use, vision
51//! - **OpenAI**: GPT models with streaming, function calling, vision
52//! - **Gemini**: Google Gemini with streaming and tool use
53//! - **Ollama**: Local model hosting with OpenAI-compatible API
54//! - **OpenAI-Compatible**: Generic adapter for compatible APIs (Groq, etc.)
55//!
56//! ## Key Types
57//!
58//! - `Provider`: Core trait for LLM completion providers
59//! - `CompletionRequest`: Vendor-agnostic request (messages, tools, params)
60//! - `StreamEvent`: Streaming events (content deltas, tool calls, completion)
61//! - `Message`: Conversation message with role and content blocks
62//! - `ToolDefinition`: JSON Schema-based tool specification
63
64pub mod anthropic;
65pub mod error;
66pub mod gemini;
67pub mod message;
68pub mod models;
69pub mod ollama;
70pub mod openai;
71pub mod openai_compat;
72pub mod provider;
73pub mod tokens;
74pub mod types;
75
76#[cfg(feature = "mistralrs")]
77pub mod mistralrs;
78
79pub use anthropic::AnthropicProvider;
80pub use error::{Result, SaorsaAiError};
81pub use gemini::GeminiProvider;
82pub use message::{ContentBlock, Message, Role, ToolDefinition};
83#[cfg(feature = "mistralrs")]
84pub use mistralrs::{MistralrsConfig, MistralrsProvider};
85pub use models::{
86 ModelInfo, all_models, get_context_window, lookup_by_provider_prefix, lookup_model,
87 lookup_model_by_prefix, supports_tools, supports_vision,
88};
89pub use ollama::OllamaProvider;
90pub use openai::OpenAiProvider;
91pub use openai_compat::{OpenAiCompatBuilder, OpenAiCompatProvider};
92pub use provider::{
93 Provider, ProviderConfig, ProviderKind, ProviderRegistry, StreamingProvider, determine_provider,
94};
95pub use types::{
96 CompletionRequest, CompletionResponse, ContentDelta, StopReason, StreamEvent, ThinkingConfig,
97 Usage,
98};