cerebras_rs/
lib.rs

1#![doc = include_str!("../README.md")]
2#![warn(missing_docs)]
3#![warn(rustdoc::missing_crate_level_docs)]
4#![allow(unused_imports)]
5#![allow(clippy::too_many_arguments)]
6
7//! # Cerebras Rust SDK
8//! 
9//! High-performance Rust SDK for the Cerebras Inference API, providing low-latency
10//! AI model inference powered by Cerebras Wafer-Scale Engines and CS-3 systems.
11//! 
12//! ## Features
13//! 
14//! - **Async/await support** - Built on tokio for high-performance async operations
15//! - **Streaming responses** - Real-time token streaming for chat and completions
16//! - **Type-safe API** - Strongly typed requests and responses
17//! - **Builder patterns** - Ergonomic API for constructing requests
18//! - **Comprehensive error handling** - Detailed error types for all failure modes
19//! 
20//! ## Quick Start
21//! 
22//! ```rust,no_run
23//! use cerebras_rs::{Client, ChatCompletionRequest, ChatMessage, ModelIdentifier};
24//! 
25//! #[tokio::main]
26//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
27//!     let client = Client::new(std::env::var("CEREBRAS_API_KEY")?);
28//!     
29//!     let request = ChatCompletionRequest::builder(ModelIdentifier::Llama3Period18b)
30//!         .user_message("Hello, how are you?")
31//!         .build();
32//!     
33//!     let response = client.chat_completion(request).await?;
34//!     if let Some(choices) = &response.choices {
35//!         if let Some(choice) = choices.first() {
36//!             if let Some(message) = &choice.message {
37//!                 println!("{}", message.content);
38//!             }
39//!         }
40//!     }
41//!     
42//!     Ok(())
43//! }
44//! ```
45//! 
46//! You can also directly use the API client:
47//! 
48//! ```rust,no_run
49//! use cerebras_rs::Client;
50//! 
51//! #[tokio::main]
52//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
53//!     let client = Client::new("your-api-key-here");
54//!     // Now you can use client to make API calls
55//!     Ok(())
56//! }
57//! ```
58//! 
59//! ## Chat Completions
60//! 
61//! ```rust,no_run
62//! use cerebras_rs::{Client, ChatCompletionRequest, ChatMessage, ModelIdentifier};
63//! 
64//! #[tokio::main]
65//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
66//!     let client = Client::from_env()?;
67//!     
68//!     let request = ChatCompletionRequest::builder(ModelIdentifier::Llama3Period18b)
69//!         .system_message("You are a helpful assistant.")
70//!         .user_message("What is the capital of France?")
71//!         .max_tokens(100)
72//!         .temperature(0.7)
73//!         .build();
74//!     
75//!     let response = client.chat_completion(request).await?;
76//!     if let Some(choices) = &response.choices {
77//!         if let Some(choice) = choices.first() {
78//!             if let Some(message) = &choice.message {
79//!                 println!("{}", message.content);
80//!             }
81//!         }
82//!     }
83//!     
84//!     Ok(())
85//! }
86//! ```
87//! 
88//! ## Error Handling
89//! 
90//! ```rust,no_run
91//! use cerebras_rs::{Client, ChatCompletionRequest, ChatMessage, ModelIdentifier, Error};
92//! 
93//! #[tokio::main]
94//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
95//!     let client = Client::from_env()?;
96//!     let request = ChatCompletionRequest::builder(ModelIdentifier::Llama3Period18b)
97//!         .user_message("Hello")
98//!         .build();
99//!     
100//!     match client.chat_completion(request).await {
101//!         Ok(response) => println!("Success!"),
102//!         Err(Error::RateLimit(retry_after)) => {
103//!             println!("Rate limited, retry after {} seconds", retry_after);
104//!         },
105//!         Err(e) => eprintln!("Error: {}", e),
106//!     }
107//!     
108//!     Ok(())
109//! }
110//! ```
111//! 
112//! ## Completions
113//! 
114//! ```rust,no_run
115//! use cerebras_rs::{Client, CompletionRequest, ModelIdentifier, models::Prompt};
116//! 
117//! #[tokio::main]
118//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
119//!     let client = Client::from_env()?;
120//!     
121//!     let request = CompletionRequest::builder(ModelIdentifier::Llama3Period18b)
122//!         .prompt("Once upon a time")
123//!         .max_tokens(100)
124//!         .temperature(0.7)
125//!         .build();
126//!     
127//!     let response = client.completion(request).await?;
128//!     if let Some(choices) = &response.choices {
129//!         if let Some(choice) = choices.first() {
130//!             if let Some(text) = &choice.text {
131//!                 println!("{}", text);
132//!             }
133//!         }
134//!     }
135//!     
136//!     Ok(())
137//! }
138//! ```
139//! 
140//! ## Streaming
141//! 
142//! ```rust,no_run
143//! use cerebras_rs::{Client, ChatCompletionRequest, ModelIdentifier};
144//! use futures_util::StreamExt;
145//! 
146//! #[tokio::main]
147//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
148//!     let client = Client::from_env()?;
149//!     
150//!     let request = ChatCompletionRequest::builder(ModelIdentifier::Llama3Period18b)
151//!         .user_message("Tell me a story")
152//!         .stream(true)
153//!         .build();
154//!     
155//!     let mut stream = client.chat_completion_stream(request).await?;
156//!     
157//!     while let Some(chunk) = stream.next().await {
158//!         match chunk {
159//!             Ok(chunk) => {
160//!                 if let Some(choices) = &chunk.choices {
161//!                     if let Some(choice) = choices.first() {
162//!                         if let Some(delta) = &choice.delta {
163//!                             if let Some(content) = &delta.content {
164//!                                 print!("{}", content);
165//!                             }
166//!                         }
167//!                     }
168//!                 }
169//!             }
170//!             Err(e) => eprintln!("Error: {}", e),
171//!         }
172//!     }
173//!     
174//!     Ok(())
175//! }
176//! ```
177//! 
178//! ## Function Calling
179//! 
180//! ```rust,no_run
181//! use cerebras_rs::{Client, ChatCompletionRequest, ChatMessage, ModelIdentifier};
182//! use cerebras_rs::models::{Tool, FunctionDefinition, tool::Type, ToolChoiceOption};
183//! use serde_json::json;
184//! use std::collections::HashMap;
185//! 
186//! #[tokio::main]
187//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
188//!     let client = Client::from_env()?;
189//!     
190//!     // Define a weather function tool
191//!     let weather_tool = Tool {
192//!         r#type: Some(Type::Function),
193//!         function: Some(FunctionDefinition {
194//!             name: "get_weather".to_string(),
195//!             description: Some("Get current weather".to_string()),
196//!             parameters: Some(HashMap::new()), // Simplified for example
197//!         }),
198//!     };
199//!     
200//!     let request = ChatCompletionRequest::builder(ModelIdentifier::Llama3Period18b)
201//!         .user_message("What's the weather like in San Francisco?")
202//!         .tool(weather_tool)
203//!         .build();
204//!     
205//!     let response = client.chat_completion(request).await?;
206//!     // Process response...
207//!     
208//!     Ok(())
209//! }
210//! ```
211//! 
212//! ## Advanced: Custom Configuration
213//! 
214//! ```rust,no_run
215//! use cerebras_rs::{Client, Configuration, ChatCompletionRequest, ModelIdentifier};
216//! 
217//! #[tokio::main]
218//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
219//!     // Create a custom configuration
220//!     let mut config = Configuration::new();
221//!     config.bearer_access_token = Some("your-api-key".to_string());
222//!     config.base_path = "https://custom-endpoint.example.com".to_string();
223//!     
224//!     let client = Client::with_configuration(config);
225//!     
226//!     let request = ChatCompletionRequest::builder(ModelIdentifier::Llama3Period18b)
227//!         .user_message("Hello")
228//!         .build();
229//!     
230//!     let response = client.chat_completion(request).await?;
231//!     // Process response...
232//!     
233//!     Ok(())
234//! }
235//! ```
236
237extern crate serde;
238extern crate serde_json;
239extern crate serde_repr;
240extern crate url;
241extern crate reqwest;
242extern crate uuid;
243extern crate chrono;
244extern crate base64;
245
246extern crate tokio;
247extern crate async_trait;
248
249extern crate tokio_stream;
250extern crate futures_util;
251extern crate eventsource_stream;
252extern crate pin_project_lite;
253
254extern crate thiserror;
255extern crate anyhow;
256
257pub mod apis;
258pub mod models;
259
260// Re-export commonly used types at the crate root
261pub use apis::configuration::{ApiKey, Configuration};
262pub use apis::default_api as api;
263
264// Re-export all models at the crate root for convenience
265pub use models::*;
266
267// Re-export the main client
268mod client;
269pub use client::Client;
270
271// Builder patterns
272pub mod builders;
273
274// Streaming support
275pub mod streaming;
276
277// Error handling
278mod error;
279pub use error::{Error, Result};
280
281// Prelude module for convenient imports
282pub mod prelude {
283    //! The prelude module provides convenient imports for common usage.
284    //! 
285    //! # Example
286    //! ```rust
287    //! use cerebras_rs::prelude::*;
288    //! ```
289    
290    pub use crate::{
291        Client,
292        Error,
293        Result,
294        ChatCompletionRequest,
295        ChatMessage,
296        CompletionRequest,
297        ModelIdentifier,
298    };
299    
300    pub use crate::streaming::{ChatCompletionStream, CompletionStream};
301    
302    pub use crate::builders::{ChatCompletionBuilder, CompletionBuilder};
303}
304
305// Version information
306/// The version of the Cerebras Rust SDK
307pub const VERSION: &str = env!("CARGO_PKG_VERSION");
308
309/// The OpenAPI specification version this SDK was generated from
310pub const OPENAPI_VERSION: &str = "1.0.0";