llm_kit_cerebras/
lib.rs

1//! Cerebras provider for the LLM Kit.
2//!
3//! This crate provides a provider implementation for Cerebras, offering high-speed
4//! AI model inference powered by Cerebras Wafer-Scale Engines and CS-3 systems.
5//!
6//! # Features
7//!
8//! - **Chat Completions**: Full support for chat-based language models
9//! - **Streaming**: Real-time streaming of model responses
10//! - **Tool Calling**: Function calling capabilities for building agents
11//! - **Structured Outputs**: JSON schema-based structured output generation
12//! - **Reasoning Models**: Support for reasoning/thinking models
13//!
14//! # Examples
15//!
16//! ## Basic Usage with Client Builder (Recommended)
17//!
18//! ```no_run
19//! use llm_kit_cerebras::CerebrasClient;
20//!
21//! // Create a provider using the client builder
22//! let provider = CerebrasClient::new()
23//!     .api_key("your-api-key")
24//!     .build();
25//!
26//! let model = provider.chat_model("llama-3.3-70b");
27//! ```
28//!
29//! ## Alternative: Using Settings Directly
30//!
31//! ```no_run
32//! use llm_kit_cerebras::{CerebrasProvider, CerebrasProviderSettings};
33//!
34//! // Create a provider using settings
35//! let provider = CerebrasProvider::new(
36//!     CerebrasProviderSettings::new("https://api.cerebras.ai/v1")
37//!         .with_api_key("your-api-key")
38//! );
39//!
40//! let model = provider.chat_model("llama-3.3-70b");
41//! ```
42//!
43//! ## Chained Usage
44//!
45//! ```no_run
46//! use llm_kit_cerebras::CerebrasClient;
47//!
48//! let model = CerebrasClient::new()
49//!     .api_key("your-api-key")
50//!     .build()
51//!     .chat_model("llama-3.3-70b");
52//! ```
53//!
54//! ## Using Model Constants
55//!
56//! ```no_run
57//! use llm_kit_cerebras::{CerebrasClient, chat::models};
58//!
59//! let provider = CerebrasClient::new()
60//!     .api_key("your-api-key")
61//!     .build();
62//!
63//! // Use predefined model constants
64//! let model = provider.chat_model(models::LLAMA_3_3_70B);
65//! ```
66//!
67//! ## Custom Headers
68//!
69//! ```no_run
70//! use llm_kit_cerebras::CerebrasClient;
71//!
72//! let provider = CerebrasClient::new()
73//!     .api_key("your-api-key")
74//!     .header("X-Custom-Header", "value")
75//!     .build();
76//!
77//! let model = provider.chat_model("llama-3.3-70b");
78//! ```
79//!
80//! ## Environment Variable for API Key
81//!
82//! The provider will automatically read the API key from the `CEREBRAS_API_KEY`
83//! environment variable if not provided explicitly:
84//!
85//! ```no_run
86//! use llm_kit_cerebras::CerebrasClient;
87//!
88//! // API key will be read from CEREBRAS_API_KEY environment variable
89//! let provider = CerebrasClient::new().build();
90//!
91//! let model = provider.chat_model("llama-3.3-70b");
92//! ```
93//!
94//! # Available Models
95//!
96//! Cerebras offers several high-performance language models:
97//!
98//! ## Production Models
99//! - `llama3.1-8b` - Llama 3.1 8B parameter model
100//! - `llama-3.3-70b` - Llama 3.3 70B parameter model
101//! - `gpt-oss-120b` - GPT-OSS 120B parameter model
102//! - `qwen-3-32b` - Qwen 3 32B parameter model
103//!
104//! ## Preview Models
105//! - `qwen-3-235b-a22b-instruct-2507` - Qwen 3 235B instruct model
106//! - `qwen-3-235b-a22b-thinking-2507` - Qwen 3 235B thinking/reasoning model
107//! - `zai-glm-4.6` - ZAI GLM 4.6 model
108//!
109//! For more information, see: <https://inference-docs.cerebras.ai/models/overview>
110//!
111//! # Note
112//!
113//! Due to high demand in the early launch phase, context windows are temporarily
114//! limited to 8192 tokens in the Free Tier.
115
116/// Chat model types and identifiers
117pub mod chat;
118
119/// Client builder for creating Cerebras providers
120pub mod client;
121
122/// Error types for Cerebras operations
123pub mod error;
124
125/// Provider implementation and creation functions
126pub mod provider;
127
128/// Settings and configuration for Cerebras providers
129pub mod settings;
130
131// Re-export main types
132pub use chat::CerebrasChatModelId;
133pub use client::CerebrasClient;
134pub use error::CerebrasErrorData;
135pub use provider::CerebrasProvider;
136pub use settings::CerebrasProviderSettings;
137
138/// Default Cerebras provider instance using environment variables.
139///
140/// This creates a provider that will read the API key from the
141/// `CEREBRAS_API_KEY` environment variable.
142///
143/// # Examples
144///
145/// ```no_run
146/// use llm_kit_cerebras::cerebras;
147///
148/// let model = cerebras().chat_model("llama-3.3-70b");
149/// ```
150pub fn cerebras() -> CerebrasProvider {
151    CerebrasClient::new().build()
152}