Skip to main content

llm_stack/
lib.rs

1//! # llm-stack
2//!
3//! Provider-agnostic types and traits for interacting with large language models.
4//!
5//! This crate defines the shared vocabulary that every LLM provider implementation
6//! speaks: messages, responses, tool calls, streaming events, usage tracking, and
7//! errors. It intentionally contains **zero** provider-specific code — concrete
8//! providers live in sibling crates and implement [`Provider`] (or its
9//! object-safe counterpart [`DynProvider`]).
10//!
11//! # Architecture
12//!
13//! ```text
14//!  ┌──────────────┐   ┌──────────────┐   ┌──────────────┐
15//!  │  anthropic   │   │   openai     │   │  your-own    │
16//!  │  (provider)  │   │  (provider)  │   │  (provider)  │
17//!  └──────┬───────┘   └──────┬───────┘   └──────┬───────┘
18//!         │                  │                  │
19//!         └─────────┬────────┴──────────────────┘
20//!                   │
21//!            ┌──────▼──────┐
22//!            │  llm-core   │  ← you are here
23//!            └─────────────┘
24//! ```
25//!
26//! # Quick start
27//!
28//! ```rust,no_run
29//! use llm_stack::{ChatMessage, ChatParams, Provider};
30//!
31//! # async fn example(provider: impl Provider) -> Result<(), llm_stack::LlmError> {
32//! let params = ChatParams {
33//!     messages: vec![ChatMessage::user("Explain ownership in Rust")],
34//!     max_tokens: Some(1024),
35//!     ..Default::default()
36//! };
37//!
38//! let response = provider.generate(&params).await?;
39//! # Ok(())
40//! # }
41//! ```
42//!
43//! # Modules
44//!
45//! | Module | Purpose |
46//! |--------|---------|
47//! | [`chat`] | Messages, content blocks, tool calls, and responses |
48//! | [`context`] | Token-budgeted conversation history management |
49//! | [`error`] | Unified [`LlmError`] across all providers |
50//! | [`intercept`] | Unified interceptor system for LLM calls and tool executions |
51//! | [`provider`] | The [`Provider`] trait and request parameters |
52//! | [`stream`] | Server-sent event types and the [`ChatStream`] alias |
53//! | [`structured`] | Typed LLM responses with schema validation (feature-gated) |
54//! | [`tool`] | Tool execution engine with registry and approval hooks |
55//! | [`registry`] | Dynamic provider instantiation from configuration |
56//! | [`usage`] | Token counts and cost tracking |
57
58#![warn(missing_docs)]
59
60pub mod chat;
61pub mod context;
62pub mod error;
63pub mod intercept;
64pub mod provider;
65pub mod registry;
66pub mod stream;
67pub mod structured;
68pub mod tool;
69pub mod usage;
70
71pub mod mcp;
72
73#[cfg(any(test, feature = "test-utils"))]
74pub mod mock;
75
76#[cfg(any(test, feature = "test-utils"))]
77pub mod test_helpers;
78
79pub use chat::{
80    ChatMessage, ChatResponse, ChatRole, ContentBlock, ImageSource, StopReason, ToolCall,
81    ToolResult,
82};
83pub use error::LlmError;
84pub use provider::{
85    Capability, ChatParams, DynProvider, JsonSchema, Provider, ProviderMetadata, RetryPredicate,
86    ToolChoice, ToolDefinition, ToolRetryConfig,
87};
88pub use stream::{ChatStream, StreamEvent};
89pub use tool::{
90    FnToolHandler, LoopAction, LoopDepth, LoopDetectionConfig, NoCtxToolHandler, StopConditionFn,
91    StopContext, StopDecision, TerminationReason, ToolApproval, ToolError, ToolHandler,
92    ToolLoopConfig, ToolLoopEvent, ToolLoopResult, ToolOutput, ToolRegistry, tool_fn,
93    tool_fn_with_ctx, tool_loop_channel,
94};
95pub use usage::{Cost, ModelPricing, Usage, UsageTracker};
96
97pub use context::{ContextWindow, estimate_message_tokens, estimate_tokens};
98pub use registry::{ProviderConfig, ProviderFactory, ProviderRegistry};
99
100pub use mcp::{McpError, McpRegistryExt, McpService};
101
102#[cfg(feature = "schema")]
103pub use structured::{
104    GenerateObjectConfig, GenerateObjectResult, PartialObject, collect_stream_object,
105    generate_object, stream_object_async,
106};
107
108#[cfg(any(test, feature = "test-utils"))]
109pub use mock::{MockError, MockProvider};