ai_sdk_openai/
lib.rs

1//! # OpenAI Provider Implementation
2//!
3//! Production-ready OpenAI provider implementing the AI SDK provider specification.
4//! This crate delivers complete access to OpenAI's model portfolio including GPT
5//! language models, DALL-E image generation, Whisper transcription, embeddings,
6//! and text-to-speech capabilities.
7//!
8//! ## Supported Models
9//!
10//! - **GPT Language Models** - GPT-4, GPT-4 Turbo, GPT-3.5 Turbo, o1 (reasoning models)
11//! - **Text Embeddings** - text-embedding-3-small, text-embedding-3-large, ada-002
12//! - **Image Generation** - DALL-E 2, DALL-E 3 with quality and style controls
13//! - **Speech Synthesis** - TTS-1 (standard), TTS-1-HD (high definition)
14//! - **Speech Transcription** - Whisper-1 with timestamps and translations
15//!
16//! ## Features
17//!
18//! - **Full Specification Compliance**: Implements all v3 provider traits
19//! - **Streaming Support**: Server-sent events for real-time token streaming
20//! - **Tool Calling**: Native function calling with parallel tool execution
21//! - **Vision Support**: Multimodal inputs with image URLs and base64 data
22//! - **Structured Output**: JSON mode and response format constraints
23//! - **Error Handling**: Comprehensive error types with retry guidance
24//! - **Request Inspection**: Access to raw request/response bodies for debugging
25//!
26//! ## Quick Start
27//!
28//! ### Basic Text Generation
29//!
30//! ```rust,ignore
31//! use ai_sdk_openai::OpenAIChatModel;
32//! use ai_sdk_provider::LanguageModel;
33//!
34//! #[tokio::main]
35//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
36//!     let api_key = std::env::var("OPENAI_API_KEY")?;
37//!     let model = OpenAIChatModel::new("gpt-4", api_key);
38//!
39//!     let response = model
40//!         .generate("Explain photosynthesis in simple terms")
41//!         .temperature(0.7)
42//!         .max_tokens(200)
43//!         .await?;
44//!
45//!     println!("{}", response.text());
46//!     Ok(())
47//! }
48//! ```
49//!
50//! ### Using the Provider Factory
51//!
52//! ```rust,ignore
53//! use ai_sdk_openai::OpenAIProvider;
54//! use ai_sdk_provider::ProviderV3;
55//!
56//! let provider = OpenAIProvider::new("your-api-key");
57//! let chat_model = provider.language_model("gpt-4");
58//! let embedding_model = provider.embedding_model("text-embedding-3-small");
59//! ```
60//!
61//! ## Configuration
62//!
63//! ### API Key
64//!
65//! Obtain your OpenAI API key from: <https://platform.openai.com/api-keys>
66//!
67//! Set via environment variable:
68//! ```bash
69//! export OPENAI_API_KEY=sk-...
70//! ```
71//!
72//! Or pass directly to model constructors:
73//! ```rust,ignore
74//! let model = OpenAIChatModel::new("gpt-4", "sk-...");
75//! ```
76//!
77//! ### Custom Base URL
78//!
79//! Use OpenAI-compatible endpoints:
80//! ```rust,ignore
81//! use ai_sdk_openai::{OpenAIChatModel, OpenAIConfig};
82//!
83//! let config = OpenAIConfig::new("your-api-key")
84//!     .base_url("https://your-proxy.com/v1");
85//! let model = OpenAIChatModel::from_config("gpt-4", config);
86//! ```
87
88#![cfg_attr(docsrs, feature(doc_cfg))]
89#![warn(missing_docs)]
90
91mod api_types;
92mod chat;
93mod embedding;
94mod error;
95mod image;
96pub mod model_detection;
97mod multimodal;
98mod openai_config;
99mod provider;
100pub mod responses;
101mod speech;
102mod transcription;
103
104pub use chat::OpenAIChatModel;
105pub use embedding::OpenAIEmbeddingModel;
106pub use error::OpenAIError;
107pub use image::OpenAIImageModel;
108pub use multimodal::{convert_audio_part, convert_image_part, MultimodalError, OpenAIContentPart};
109pub use openai_config::{OpenAIConfig, OpenAIUrlOptions};
110pub use provider::OpenAIProvider;
111pub use speech::OpenAISpeechModel;
112pub use transcription::OpenAITranscriptionModel;