llm_kit_deepseek/lib.rs
1//! DeepSeek provider implementation for the LLM Kit.
2//!
3//! This crate provides a provider implementation for DeepSeek's chat and reasoning models,
4//! supporting standard chat completions and advanced reasoning capabilities.
5//!
6//! # Features
7//!
8//! - Chat completions with `deepseek-chat`
9//! - Advanced reasoning with `deepseek-reasoner` (R1)
10//! - Streaming support
11//! - Tool calling
12//! - DeepSeek-specific metadata (prompt cache hit/miss tokens)
13//!
14//! # Examples
15//!
16//! ## Basic Usage with Client Builder (Recommended)
17//!
18//! ```no_run
19//! use llm_kit_deepseek::DeepSeekClient;
20//!
21//! // Create a provider using the client builder
22//! let provider = DeepSeekClient::new()
23//! .api_key("your-api-key")
24//! .build();
25//!
26//! let model = provider.chat_model("deepseek-chat");
27//! ```
28//!
29//! ## Alternative: Direct Instantiation
30//!
31//! ```no_run
32//! use llm_kit_deepseek::{DeepSeekProvider, DeepSeekProviderSettings};
33//!
34//! // Create a provider using settings
35//! let provider = DeepSeekProvider::new(
36//! DeepSeekProviderSettings::new()
37//! .with_api_key("your-api-key")
38//! );
39//!
40//! let model = provider.chat_model("deepseek-chat");
41//! ```
42//!
43//! ## Chained Usage
44//!
45//! ```no_run
46//! use llm_kit_deepseek::DeepSeekClient;
47//!
48//! let model = DeepSeekClient::new()
49//! .api_key("your-api-key")
50//! .build()
51//! .chat_model("deepseek-reasoner");
52//! ```
53//!
54//! ## Environment Variable
55//!
56//! ```no_run
57//! use llm_kit_deepseek::DeepSeekClient;
58//!
59//! // API key will be read from DEEPSEEK_API_KEY environment variable
60//! let provider = DeepSeekClient::new()
61//! .load_api_key_from_env()
62//! .build();
63//!
64//! let model = provider.chat_model("deepseek-chat");
65//! ```
66//!
67//! ## Text Generation
68//!
69//! ```no_run
70//! use llm_kit_deepseek::DeepSeekClient;
71//! use llm_kit_provider::language_model::call_options::LanguageModelCallOptions;
72//! use llm_kit_provider::language_model::prompt::LanguageModelMessage;
73//! use llm_kit_provider::language_model::content::LanguageModelContent;
74//!
75//! # async fn example() -> Result<(), Box<dyn std::error::Error>> {
76//! let provider = DeepSeekClient::new()
77//! .api_key("your-api-key")
78//! .build();
79//!
80//! let model = provider.chat_model("deepseek-chat");
81//!
82//! let prompt = vec![LanguageModelMessage::user_text("Write a function to calculate factorial")];
83//! let options = LanguageModelCallOptions::new(prompt).with_temperature(0.7);
84//! let result = model.do_generate(options).await?;
85//!
86//! for content in &result.content {
87//! if let LanguageModelContent::Text(text) = content {
88//! println!("Response: {}", text.text);
89//! }
90//! }
91//! # Ok(())
92//! # }
93//! ```
94//!
95//! ## Reasoning Model
96//!
97//! ```no_run
98//! use llm_kit_deepseek::DeepSeekClient;
99//! use llm_kit_provider::language_model::call_options::LanguageModelCallOptions;
100//! use llm_kit_provider::language_model::prompt::LanguageModelMessage;
101//! use llm_kit_provider::language_model::content::LanguageModelContent;
102//!
103//! # async fn example() -> Result<(), Box<dyn std::error::Error>> {
104//! let provider = DeepSeekClient::new()
105//! .api_key("your-api-key")
106//! .build();
107//!
108//! // Use the DeepSeek Reasoner (R1) model for advanced reasoning
109//! let model = provider.chat_model("deepseek-reasoner");
110//!
111//! let prompt = vec![LanguageModelMessage::user_text("Solve this logic puzzle: ...")];
112//! let options = LanguageModelCallOptions::new(prompt);
113//! let result = model.do_generate(options).await?;
114//!
115//! // Access reasoning and text content
116//! for content in &result.content {
117//! match content {
118//! LanguageModelContent::Reasoning(reasoning) => {
119//! println!("Reasoning: {}", reasoning.text);
120//! }
121//! LanguageModelContent::Text(text) => {
122//! println!("Answer: {}", text.text);
123//! }
124//! _ => {}
125//! }
126//! }
127//! # Ok(())
128//! # }
129//! ```
130//!
131//! ## Streaming
132//!
133//! ```no_run
134//! use llm_kit_deepseek::DeepSeekClient;
135//! use llm_kit_provider::language_model::call_options::LanguageModelCallOptions;
136//! use llm_kit_provider::language_model::prompt::LanguageModelMessage;
137//! use llm_kit_provider::language_model::stream_part::LanguageModelStreamPart;
138//! use futures_util::StreamExt;
139//!
140//! # async fn example() -> Result<(), Box<dyn std::error::Error>> {
141//! let provider = DeepSeekClient::new()
142//! .api_key("your-api-key")
143//! .build();
144//!
145//! let model = provider.chat_model("deepseek-chat");
146//!
147//! let prompt = vec![LanguageModelMessage::user_text("Tell me a story")];
148//! let options = LanguageModelCallOptions::new(prompt);
149//! let mut result = model.do_stream(options).await?;
150//!
151//! // Stream text deltas
152//! while let Some(part) = result.stream.next().await {
153//! if let LanguageModelStreamPart::TextDelta(delta) = part {
154//! print!("{}", delta.delta);
155//! }
156//! }
157//! # Ok(())
158//! # }
159//! ```
160//!
161//! ## DeepSeek-Specific Metadata
162//!
163//! DeepSeek provides prompt cache statistics in the metadata:
164//!
165//! ```no_run
166//! use llm_kit_deepseek::DeepSeekClient;
167//! use llm_kit_provider::language_model::call_options::LanguageModelCallOptions;
168//! use llm_kit_provider::language_model::prompt::LanguageModelMessage;
169//!
170//! # async fn example() -> Result<(), Box<dyn std::error::Error>> {
171//! let provider = DeepSeekClient::new()
172//! .api_key("your-api-key")
173//! .build();
174//!
175//! let model = provider.chat_model("deepseek-chat");
176//!
177//! let prompt = vec![LanguageModelMessage::user_text("Hello")];
178//! let options = LanguageModelCallOptions::new(prompt);
179//! let result = model.do_generate(options).await?;
180//!
181//! // Access DeepSeek-specific metadata
182//! if let Some(provider_metadata) = &result.provider_metadata {
183//! if let Some(deepseek) = provider_metadata.get("deepseek") {
184//! println!("Prompt cache hit tokens: {:?}",
185//! deepseek.get("promptCacheHitTokens"));
186//! println!("Prompt cache miss tokens: {:?}",
187//! deepseek.get("promptCacheMissTokens"));
188//! }
189//! }
190//! # Ok(())
191//! # }
192//! ```
193
194/// Chat completion implementation for DeepSeek models.
195pub mod chat;
196
197/// Client builder for creating DeepSeek providers.
198pub mod client;
199
200/// Error types for DeepSeek provider operations.
201pub mod error;
202
203/// Provider implementation and creation functions.
204pub mod provider;
205
206/// Settings and configuration for DeepSeek providers.
207pub mod settings;
208
209// Re-export main types from chat
210pub use chat::{
211 DeepSeekChatLanguageModel, DeepSeekChatModelId, DeepSeekMetadataExtractor,
212 DeepSeekProviderOptions, DeepSeekUsage,
213};
214
215pub use client::DeepSeekClient;
216pub use error::DeepSeekError;
217pub use provider::DeepSeekProvider;
218pub use settings::DeepSeekProviderSettings;
219
220// Re-export error data type
221pub use error::DeepSeekErrorData;