ai_sdk_core/lib.rs
1//! # AI SDK Core
2//!
3//! High-level, ergonomic APIs for building applications with large language models.
4//!
5//! This crate provides production-ready abstractions over the provider specification
6//! layer, offering builder-based APIs, automatic tool execution, structured output
7//! generation, and comprehensive error handling.
8//!
9//! ## Core Features
10//!
11//! - **Text Generation**: `generate_text()` and `stream_text()` for chat completion
12//! - **Tool Execution**: Automatic multi-step tool calling with custom functions
13//! - **Embeddings**: `embed()` and `embed_many()` for semantic vector generation
14//! - **Structured Output**: `generate_object()` for schema-validated JSON
15//! - **Middleware**: Extensible hooks for logging, caching, and custom behavior
16//! - **Multi-Provider**: Registry system for managing multiple provider configurations
17//!
18//! ## Example: Text Generation
19//!
20//! Generate text using a simple builder pattern with provider-agnostic configuration:
21//!
22//! ```rust,ignore
23//! use ai_sdk_core::generate_text;
24//! use ai_sdk_openai::openai;
25//!
26//! let result = generate_text()
27//! .model(openai("gpt-4").api_key(api_key))
28//! .prompt("Explain the fundamentals of quantum computing")
29//! .temperature(0.7)
30//! .max_tokens(500)
31//! .execute()
32//! .await?;
33//!
34//! println!("Response: {}", result.text());
35//! println!("Tokens used: {}", result.usage.total_tokens);
36//! ```
37//!
38//! ## Example: Tool Calling
39//!
40//! Implement custom tools that the model can call during generation. The framework
41//! handles the execution loop automatically:
42//!
43//! ```rust,ignore
44//! use ai_sdk_core::{generate_text, Tool, ToolContext};
45//! use ai_sdk_openai::openai;
46//! use async_trait::async_trait;
47//! use std::sync::Arc;
48//!
49//! struct WeatherTool;
50//!
51//! #[async_trait]
52//! impl Tool for WeatherTool {
53//! fn name(&self) -> &str { "get_weather" }
54//!
55//! fn description(&self) -> &str {
56//! "Retrieves current weather conditions for a specified location"
57//! }
58//!
59//! fn input_schema(&self) -> serde_json::Value {
60//! serde_json::json!({
61//! "type": "object",
62//! "properties": {
63//! "location": {
64//! "type": "string",
65//! "description": "City name or coordinates"
66//! }
67//! },
68//! "required": ["location"]
69//! })
70//! }
71//!
72//! async fn execute(&self, input: serde_json::Value, _ctx: &ToolContext)
73//! -> Result<serde_json::Value, ai_sdk_core::ToolError> {
74//! let location = input["location"].as_str().unwrap_or("unknown");
75//! Ok(serde_json::json!({
76//! "location": location,
77//! "temperature": 72,
78//! "conditions": "sunny"
79//! }))
80//! }
81//! }
82//!
83//! let result = generate_text()
84//! .model(openai("gpt-4").api_key(api_key))
85//! .prompt("What's the weather like in Tokyo?")
86//! .tools(vec![Arc::new(WeatherTool)])
87//! .max_steps(5)
88//! .execute()
89//! .await?;
90//! ```
91//!
92//! ## Example: Streaming
93//!
94//! Process responses incrementally as they arrive for real-time user feedback:
95//!
96//! ```rust,ignore
97//! use ai_sdk_core::stream_text;
98//! use tokio_stream::StreamExt;
99//!
100//! let result = stream_text()
101//! .model(openai("gpt-4").api_key(api_key))
102//! .prompt("Write a creative short story about time travel")
103//! .temperature(0.9)
104//! .execute()
105//! .await?;
106//!
107//! let mut stream = result.into_stream();
108//! while let Some(part) = stream.next().await {
109//! match part? {
110//! TextStreamPart::TextDelta(delta) => print!("{}", delta),
111//! TextStreamPart::FinishReason(reason) => {
112//! println!("\nFinished: {:?}", reason);
113//! }
114//! _ => {}
115//! }
116//! }
117//! ```
118
119#![warn(missing_docs)]
120#![warn(rustdoc::broken_intra_doc_links)]
121
122/// Internal module for embedding functionality
123#[path = "embed/mod.rs"]
124mod embeddings;
125/// Error definitions for the crate.
126pub mod error;
127// mod generate_text;
128mod retry;
129mod stop_condition;
130// mod stream_text;
131mod text;
132mod tool;
133
134/// Utility functions for media type detection, file download, and base64 encoding
135pub mod util;
136
137/// Generate structured objects with schema validation
138pub mod generate_object;
139
140/// Middleware system for customizing language model behavior
141pub mod middleware;
142
143/// Provider registry system for multi-provider management
144pub mod registry;
145
146// Re-export commonly used types from ai-sdk-provider
147pub use ai_sdk_provider::language_model::{
148 CallOptions, Content, FinishReason, LanguageModel, Message, ToolCallPart, ToolResultPart, Usage,
149};
150pub use ai_sdk_provider::{EmbeddingModel, EmbeddingUsage, JsonValue};
151
152// Re-export core functionality
153pub use embeddings::{
154 embed, embed_many, EmbedBuilder, EmbedManyBuilder, EmbedManyResult, EmbedResult,
155};
156pub use error::{EmbedError, Error, GenerateError, Result, ToolError};
157pub use retry::RetryPolicy;
158pub use stop_condition::{stop_after_steps, stop_on_finish, StopCondition};
159pub use text::{generate_text, GenerateTextBuilder, GenerateTextResult, StepResult};
160pub use text::{stream_text, StreamTextBuilder, StreamTextResult, TextStreamPart};
161pub use tool::{Tool, ToolContext, ToolExecutor, ToolOutput};