turbine_llm/lib.rs
1//! # Turbine LLM
2//!
3//! A unified Rust interface for multiple LLM providers with growing model support.
4//!
5//! Turbine provides a simple, consistent API to interact with various Large Language Model
6//! providers including OpenAI, Anthropic, Google Gemini, and Groq. Switch between providers
7//! with minimal code changes.
8//!
9//! ## Features
10//!
11//! - **Unified API**: Single interface for multiple LLM providers
12//! - **Simple & Clean**: Minimal, straightforward code
13//! - **Text & JSON Output**: Support for both text and structured JSON responses
14//! - **Async/Await**: Built with Tokio for async operations
15//! - **Type-Safe**: Full Rust type safety with proper error handling
16//!
17//! ## Quick Start
18//!
19//! ### Simplified API (Recommended)
20//!
21//! ```no_run
22//! use turbine_llm::TurbineClient;
23//!
24//! #[tokio::main]
25//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
26//! // Create client with model string - automatically detects provider
27//! let client = TurbineClient::from_model("openai/gpt-4o-mini")?;
28//!
29//! // Simple one-liner to send a message
30//! let response = client.send("What is Rust?").await?;
31//! println!("{}", response.content);
32//!
33//! Ok(())
34//! }
35//! ```
36//!
37//! ### Traditional API
38//!
39//! ```no_run
40//! use turbine_llm::{TurbineClient, LLMRequest, Message, Provider};
41//!
42//! #[tokio::main]
43//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
44//! // Create a client for your chosen provider
45//! let client = TurbineClient::new(Provider::OpenAI)?;
46//!
47//! // Build a request
48//! let request = LLMRequest::new("gpt-4o-mini")
49//! .with_system_prompt("You are a helpful assistant.")
50//! .with_message(Message::user("What is Rust?"))
51//! .with_max_tokens(100);
52//!
53//! // Send the request
54//! let response = client.send_request(&request).await?;
55//! println!("{}", response.content);
56//!
57//! Ok(())
58//! }
59//! ```
60//!
61//! ## Supported Providers
62//!
63//! Set the appropriate API key as an environment variable:
64//!
65//! - **OpenAI**: `OPENAI_API_KEY`
66//! - **Anthropic**: `ANTHROPIC_API_KEY`
67//! - **Gemini**: `GEMINI_API_KEY`
68//! - **Groq**: `GROQ_API_KEY`
69//!
70//! ## JSON Output
71//!
72//! Request structured JSON responses from any provider:
73//!
74//! ```no_run
75//! use turbine_llm::{TurbineClient, LLMRequest, Message, OutputFormat, Provider};
76//!
77//! # #[tokio::main]
78//! # async fn main() -> Result<(), Box<dyn std::error::Error>> {
79//! let client = TurbineClient::new(Provider::Anthropic)?;
80//!
81//! let request = LLMRequest::new("claude-3-5-sonnet-20241022")
82//! .with_system_prompt("Return data as JSON.")
83//! .with_message(Message::user("Give me info about Paris"))
84//! .with_output_format(OutputFormat::Json);
85//!
86//! let response = client.send_request(&request).await?;
87//! # Ok(())
88//! # }
89//! ```
90
91pub mod client;
92pub mod error;
93pub mod models;
94pub mod providers;
95pub mod types;
96
97// Re-export commonly used types for convenience
98pub use client::TurbineClient;
99pub use error::{Result, TurbineError};
100pub use models::{LLMRequest, LLMResponse, Message, Usage};
101pub use types::{OutputFormat, Provider};